gt
stringclasses
1 value
context
stringlengths
2.49k
119k
#!/usr/bin/env python # # From Eric Kabuchanga, kabuchanga@rcmrd.org # RCMRD Nairobi, Kenya # Minor teaks for MacOSX Pat Cappelaere - Vightel Corporation # # Here is the link where you can get the original hdfs and the resulting tif files # http://41.206.34.124/frostmaps/ # http://41.206.34.124/frostmaps/ import time import datetime import glob,os, fnmatch #import arcpy #import smtplib #from email.MIMEMultipart import MIMEMultipart #from email.MIMEBase import MIMEBase #from email.MIMEText import MIMEText #from email.Utils import COMMASPACE, formatdate #from email import Encoders #import shutil import config one_day = datetime.timedelta(days=1) #_today = datetime.date.today()- one_day # PGC Debug _today = datetime.date(2014,10,2) _month = _today.month _day = _today.day _year = str(_today.year) _yrDay = str(_today.timetuple()[7]) if len(_yrDay)==1: _yrDay = "00" + _yrDay elif len(_yrDay)==2: _yrDay = "0" + _yrDay else: _yrDay=_yrDay BASE_DIR = config.FROST_DIR outPtDir = os.path.join(BASE_DIR, _year, _yrDay, 'output') if not os.path.exists(outPtDir): os.makedirs(outPtDir) srcPath = os.path.join(BASE_DIR, _year) if not os.path.exists(srcPath): os.makedirs(srcPath) resources = os.path.join(BASE_DIR, 'resources') templateMXD = os.path.join(resources, 'Frost2.mxd') #"H:\\Frost\\_resources\\Frost2.mxd" targetMXD = os.path.join(resources, 'Frost3.mxd') #"H:\\Frost\\_resources\\Frost3.mxd" symbologyLayerFile = os.path.join(resources, 'LST2.lyr') #"H:\\Frost\\_resources\\LST2.lyr" frostMapTitle = "Estimated Frost Occurrences on " + str(_today + one_day) #ouputMapFileName = "H:\\Frost\\_workingDir\\maps\\Frost_" + str(_today + one_day) ouputMapFileName = os.path.join(BASE_DIR, _year, _yrDay, "Frost_" + str(_today + one_day)) print (_today) #...................................................................................................................................................................... def send_mail(send_from, send_to, subject, text, files=[], server="192.168.0.243"): assert type(send_to)==list assert type(files)==list msg = MIMEMultipart() msg['From'] = send_from msg['To'] = COMMASPACE.join(send_to) msg['Date'] = formatdate(localtime=True) msg['Subject'] = subject msg.attach( MIMEText(text) ) for f in files: part = MIMEBase('application', "octet-stream") part.set_payload( open(f,"rb").read() ) Encoders.encode_base64(part) part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f)) msg.attach(part) smtp = smtplib.SMTP(server) smtp.set_debuglevel(1) smtp.ehlo() smtp.starttls() #smtp.ehlo() smtp.login('servir', 'servir2013') smtp.sendmail(send_from, send_to, msg.as_string()) smtp.close() #.............................................................................................................................. def _getFrostFiles(tifPath): frostFiles =[] try: dirList=os.listdir(tifPath) for fname in dirList: if fnmatch.fnmatch(fname, '*.tif'): #Process: Build Pyramids And Statistics for the TIF file arcpy.BuildPyramidsandStatistics_management(srcPath + _yrDay + "\\output\\" + fname, "INCLUDE_SUBDIRECTORIES", "BUILD_PYRAMIDS", "CALCULATE_STATISTICS", "NONE") #Process: Get Raster Properties and determine the maxmum cell value #maxCellValue = arcpy.GetRasterProperties_management(srcPath + "\\" + fname, "MAXIMUM") rst = arcpy.Raster(srcPath + _yrDay + "\\output\\" + fname) maxCellValue = rst.maximum if str(maxCellValue) == "0.0": print str(maxCellValue) + "T" else: print str(maxCellValue) + "F" frostFiles.append(fname) except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) return frostFiles #print _getFrostFiles(srcPath)[0] #..................................................................................................................................................................... def _mapping(tmp_mxdPath, symbologyLayer, target_mxdPath, MapTitle, outPutFileName): try: mxd = arcpy.mapping.MapDocument(tmp_mxdPath) #("D:\\Modis_LST\\Frost\\Frost2.mxd") df = arcpy.mapping.ListDataFrames(mxd, "Layers")[0] #Add frost layers to the map document print "Adding frost layers" for tifFile in _getFrostFiles(srcPath + _yrDay + "\\output\\" ): print tifFile result = arcpy.MakeRasterLayer_management(srcPath + _yrDay + "\\output\\" + tifFile, tifFile + ".lyr") print result.getOutput(0) addLayer = result.getOutput(0) #addLayer = arcpy.mapping.Layer(srcPath +"\\" + tifFile) arcpy.mapping.AddLayer(df, addLayer, "BOTTOM") #Apply Frost symbology to the layers print "Applying symbology" lryIndx = 0 for lyr in arcpy.mapping.ListLayers(mxd, "", df): if lryIndx > 1: arcpy.ApplySymbologyFromLayer_management(lyr,symbologyLayer) lryIndx=lryIndx+1 #Add new Map title print "Titling map" for elm in arcpy.mapping.ListLayoutElements(mxd, "TEXT_ELEMENT"): if elm.name == "map": elm.text=MapTitle print elm.text if elm.name == "day": elm.text="Map Reference no :- " + _yrDay print elm.text mxd.saveACopy(target_mxdPath) #("D:\\Modis_LST\\Frost\\Frost3.mxd") del mxd #Exprot to pdf and JPG print "Exporting maps" mappingMxd = arcpy.mapping.MapDocument(target_mxdPath) arcpy.mapping.ExportToPDF(mappingMxd, outPutFileName + ".pdf") arcpy.mapping.ExportToJPEG(mappingMxd, outPutFileName + ".jpg") #Email the maps except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) #....................................................................................................................................................................... def _getLSTFile(_time): global _yrDay, _year lstfname='MYD11_L2.A' try: if len(_yrDay) == 2: _yrDay = "0" + _yrDay print _yrDay lstfname= os.path.join(_yrDay, "lst", lstfname +_year + _yrDay + "." + _time +".005.NRT.hdf") print lstfname except IOError as e: print e return lstfname #....................................................................................................................................................................... def _getGeolocationFile(_time): global _yrDay, _year lstfname='MYD03.A' try: if len(_yrDay) == 2: _yrDay = "0" + _yrDay print _yrDay lstfname= os.path.join(_yrDay, "geo", lstfname +_year + _yrDay + "."+ _time +".005.NRT.hdf") print lstfname except IOError as e: print e return lstfname #....................................................................................................................................................................... def _getOutputFile(_time): global _yrDay, _year lstfname='Frost_' try: if len(_yrDay) == 2: _yrDay = "0" + _yrDay print _yrDay lstfname= os.path.join(_yrDay, "output", lstfname +_year + _yrDay + "."+ _time +".tif") print lstfname except IOError as e: print e return lstfname #---------------------------------------------------------------------------------------------------------------------------------------------------------------------- def _mrtSwath2Gird( inPutLST, OutPuTIF, inPutGeoloc): try: #cmd1='swath2grid -if=D:\\Modis_LST\\2014\\027\\lst\\MYD11_L2.A2013027.0030.005.NRT.hdf -of=D:\\Modis_LST\\2014\\027\\output\\output1.tif -gf=D:\\Modis_LST\\2014\\027\\geo\\MYD03.A2013027.0030.005.NRT.hdf -off=GEOTIFF_FMT -sds=LST -kk=NN -oproj=GEO -oprm="0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0" -oul="33.0 5.5" -olr="42.0 -5.5" -osst=LAT_LONG -osp=8' #cmd='swath2grid -if='+ inPutLST + ' -of='+OutPuTIF+' -gf='+inPutGeoloc+' -off=GEOTIFF_FMT -sds=LST -kk=NN -oproj=GEO -oprm="0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0" -oul="33.0 5.5" -olr="42.0 -5.5" -osst=LAT_LONG -osp=8' cmd='swath2grid -if='+ inPutLST + ' -of='+OutPuTIF+' -gf='+inPutGeoloc+' -off=GEOTIFF_FMT -sds=LST -kk=NN -oproj=GEO -oprm="0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0" -oul="14.5 15.5" -olr="51.5 -13.5" -osst=LAT_LONG -osp=8' os.system(cmd) except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) #------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- def _theMain(theTime): try: lstDir = srcPath _lstFname = _getLSTFile(theTime) _geoLocFname = _getGeolocationFile(theTime) _outPuttif = _getOutputFile(theTime) inLst = os.path.join(lstDir, _lstFname) #'D:\\Modis_LST\\2013\\027\\lst\\MYD11_L2.A2013027.0030.005.NRT.hdf' outTif = os.path.join(lstDir, _outPuttif) #'D:\\Modis_LST\\2013\\027\\output\\output1.tif' inGeoloc = os.path.join(lstDir, _geoLocFname) #'D:\\Modis_LST\\2013\\027\\geo\\MYD03.A2013027.0030.005.NRT.hdf' if ( not os.path.isfile(inLst)) or ( not os.path.isfile(inGeoloc)): print("Error: %s file not found" % inLst ) print("Or Error: %s file not found" % inGeoloc) else: _mrtSwath2Gird(inLst, outTif, inGeoloc) except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) #------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- _hr=0 while _hr < 24: _min=0 hrStr=str(_hr) if len(str(_hr)) == 1: hrStr = "0" + str(_hr) while _min < 60: if len(str(_min)) == 1: minStr = "0" + str(_min) else: minStr=str(_min) _thhr = hrStr + minStr _theMain(_thhr) #print _thhr _min=_min+5 _hr = _hr+1 #_mapping(templateMXD, symbologyLayerFile, targetMXD, frostMapTitle, ouputMapFileName) #Send frost products to users #filesToAttch = [ouputMapFileName +".pdf", ouputMapFileName +".jpg"] #recp = ["jgitau@rcmrd.org", "kabuchanga@rcmrd.org", "ashutosh.limaye@nasa.gov"] #recp = ["ayubshaka@ymail.com", "mungai_j@yahoo.com", "sakwa@meteo.go.ke", "ashulimaye@yahoo.com", "absaes@live.com", "kabuchanga@rcmrd.org", "ceresemarie@gmail.com" ] #recp2 = ["Kabuchanga@yahoo.com", "james.kiguru@aon.co.ke", "Leonard.Musao@aon.co.ke", "John.Gangla@aon.co.ke"] #send_mail(send_from, send_to, subject, text, files=[], server="192.168.0.243"): #send_mail("servir@rcmrd.org", recp, "Frost Map for " + str(_today + one_day), "Please find the attached Frost map for " + str(_today + one_day) + ". You can also find the same map on http://41.206.34.124/frostmaps/ This email was automatically send by Frost Monitoring System." , filesToAttch, "192.168.0.243:25") #send_mail("servir@rcmrd.org", recp2, "Frost Map for " + str(_today + one_day), "Please find the attached Frost map for " + str(_today + one_day) + ". You can also find the same map on http://41.206.34.124/frostmaps/ This email was automatically send by Frost Monitoring System." , filesToAttch, "192.168.0.243:25")
from masterapp.tests import * import re from mock import Mock from pylons import config #from masterapp import model class TestPlayerController(TestModel): def test_index(self): """ Testing /player """ response = self.app.get( url_for(controller='player'), headers = self.win_ff2_headers) # Test response... assert response.c.profile != None assert 'player | harmonize.fm' in response response = self.app.get( url_for(controller='player'), headers = self.win_ie6_headers) response = response.follow() assert 'Internet Explorer 6' in response.body def test_new_user(self): """ Testing /player without a user """ # Test creating a user model.Session.delete(self.user) model.Session.commit() response = self.app.get( url_for(controller='player'), headers=self.win_ff2_headers) assert 'player | harmonize.fm' in response def test_get_song_url(self): """ Testing /player/songurl/<songid> """ self.user.update_profile = Mock() # Test 404 for a non-existent song response = self.app.get(url_for( controller='player', action = 'songurl', id = 7 ), status=404) # Test for a song I do own ns = generate_fake_song(model.Session.query(model.User).first()) response = self.app.get(url_for( controller='player', action = 'songurl', id = ns.id )) # Test for a song i own and set now playing ns = generate_fake_song(model.Session.query(model.User).one()) ns2 = generate_fake_song(model.Session.query(model.User).one()) response = self.app.get(url_for( controller = 'player', action = 'songurl', id = ns.id ), params = { 'pid': ns2.id}) model.Session.add(self.user) #rebind user model.Session.add(ns2) assert self.user.nowplaying.id == ns2.id, \ 'Did not set nowplaying correctly' assert self.user.update_profile.called, \ 'Did not update facebook profile' assert re.search(ns.sha, response.body),\ 'Did not return the sha in the URL' # Test for a song none of my friends own anewuser = generate_fake_user() anewsong = generate_fake_song(anewuser) response = self.app.get(url_for( controller='player', action = 'songurl', id = anewsong.id ), params={'friend': anewuser.id}, status=401) def test_album_details(self): """ Testing /player/album_details """ # Test an illegit request response = self.app.get(url_for( controller = 'player', action = 'album_details', ), params={'album': 1}, status=404) # Fake some data mysong = generate_fake_song(self.user) friend = generate_fake_user(config['pyfacebook.fbfriendid']) friendsong = generate_fake_song(friend) # Test details for one of my albums response = self.app.get(url_for( controller = 'player', action = 'album_details', ), params={'album': mysong.albumid}) assert mysong.title in response, \ "Did not return the details on my own album" # Test details for one of my friends' albums response = self.app.get(url_for( controller = 'player', action = 'album_details', ), params={'album': friendsong.albumid, 'friend': friend.id}) assert friendsong.title in response, \ "Did not return the details on my friend's album" def test_username(self): """ Testing /player/username """ response = self.app.get(url_for( controller = 'player', action = 'username', )) assert response.body == self.user.name, \ "Did not return correct name" def test_feedback(self): """ Testing /player/feedback """ # Illegit request response = self.app.get(url_for( controller = 'player', action = 'feedback' )) assert response.body =='0',\ "feedback w/out params request went unnoticed" response = self.app.get(url_for( controller = 'player', action = 'feedback' ), params={'email': '', 'feedback': ''}) assert response.body == '0', "Empty feedback went unnoticed" response = self.app.get(url_for( controller = 'player', action = 'feedback' ), params={'email': '', 'feedback': 'Something to say'}) assert response.body == '1', "Legit feedback didn't get legit response" # Since most of the actual mail sending stuff happens in another thread, # this is a bit difficult to test. At least we know we're not getting # 500 errors. response = self.app.get(url_for( controller = 'player', action = 'feedback' ), params={'email': 'justin@harmonize.fm', 'feedback': 'Something to say'}) assert response.body == '1', "Legit email didn't get legit response" def test_blog(self): """ Testing /player/blog """ response = self.app.get(url_for( controller = 'player', action = 'blog' )) assert 'News' in response.body, "Blog did not return" def test_home(self): """ Testing /player/home """ response = self.app.get(url_for( controller = 'player', action = 'home' ), headers = self.win_ff2_headers) assert 'Harmonizer Setup.exe' in response.body,\ "Windows link to harmonizer not on home page" response = self.app.get(url_for( controller = 'player', action = 'home' ), headers = self.mac_safari3_headers) assert 'Harmonizer.dmg' in response.body,\ "Mac link to harmonizer not on home page" response = self.app.get(url_for( controller = 'player', action = 'home' ), headers = self.linux_ff3_headers) assert '/harmonizer-not-supported' in response.body,\ "Linux not supported link not on home page" def test_set_volume(self): """ Testing /player/set_volume """ response = self.app.get(url_for( controller = 'player', action = 'set_volume', id = 43 )) assert self.user.lastvolume == 43, 'Volume was not set'
#!/usr/bin/env python # Copyright 2016 DIANA-HEP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import math import numbers import struct from histogrammar.defs import Container, Factory, identity, JsonFormatException, ContainerException from histogrammar.util import n_dim, datatype, serializable, inheritdoc, maybeAdd, floatToJson, hasKeys, numeq, \ basestring from histogrammar.primitives.count import Count # Select class Select(Factory, Container): """Filter or weight data according to a given selection. This primitive is a basic building block, intended to be used in conjunction with anything that needs a user-defined cut. In particular, a standard histogram often has a custom selection, and this can be built by nesting Select -> Bin -> Count. Select also resembles :doc:`Fraction <histogrammar.primitives.fraction.Fraction>`, but without the ``denominator``. The efficiency of a cut in a Select aggregator named ``x`` is simply ``x.cut.entries / x.entries`` (because all aggregators have an ``entries`` member). """ @staticmethod def ed(entries, cut): """Create a Select that is only capable of being added. Parameters: entries (float): the number of entries. cut (:doc:`Container <histogrammar.defs.Container>`): the filled sub-aggregator. """ if not isinstance(entries, numbers.Real) and entries not in ("nan", "inf", "-inf"): raise TypeError("entries ({0}) must be a number".format(entries)) if not isinstance(cut, Container): raise TypeError("cut ({0}) must be a Container".format(cut)) if entries < 0.0: raise ValueError("entries ({0}) cannot be negative".format(entries)) out = Select(None, cut) out.entries = float(entries) return out.specialize() @staticmethod def ing(quantity, cut=Count()): """Synonym for ``__init__``.""" return Select(quantity, cut) def __getattr__(self, attr): """Pass on searches for custom methods to the ``value``, so that Limit becomes effectively invisible.""" if attr.startswith("__") and attr.endswith("__"): return getattr(Select, attr) elif attr not in self.__dict__ and hasattr(self.__dict__["cut"], attr): return getattr(self.__dict__["cut"], attr) else: return self.__dict__[attr] def __init__(self, quantity=identity, cut=Count()): """Create a Select that is capable of being filled and added. Parameters: quantity (function returning bool or float): computes the quantity of interest from the data and interprets it as a selection (multiplicative factor on weight). cut (:doc:`Container <histogrammar.defs.Container>`): will only be filled with data that pass the cut, and which are weighted by the cut. Other Parameters: entries (float): the number of entries, initially 0.0. """ if not isinstance(cut, Container): raise TypeError("cut ({0}) must be a Container".format(cut)) self.entries = 0.0 self.quantity = serializable(identity(quantity) if isinstance(quantity, str) else quantity) self.cut = cut super(Select, self).__init__() self.specialize() def fractionPassing(self): """Fraction of weights that pass the quantity.""" return self.cut.entries / self.entries @inheritdoc(Container) def zero(self): return Select(self.quantity, self.cut.zero()) @inheritdoc(Container) def __add__(self, other): if isinstance(other, Select): out = Select(self.quantity, self.cut + other.cut) out.entries = self.entries + other.entries return out.specialize() else: raise ContainerException("cannot add {0} and {1}".format(self.name, other.name)) @inheritdoc(Container) def __iadd__(self, other): if isinstance(other, Select): self.entries += other.entries self.cut += other.cut return self else: raise ContainerException("cannot add {0} and {1}".format(self.name, other.name)) @inheritdoc(Container) def __mul__(self, factor): if math.isnan(factor) or factor <= 0.0: return self.zero() else: out = self.zero() out.entries = factor * self.entries out.cut = self.cut * factor return out.specialize() @inheritdoc(Container) def __rmul__(self, factor): return self.__mul__(factor) @inheritdoc(Container) def fill(self, datum, weight=1.0): self._checkForCrossReferences() if weight > 0.0: w = self.quantity(datum) if not isinstance(w, numbers.Real): raise TypeError("function return value ({0}) must be boolean or number".format(w)) w *= weight if w > 0.0: self.cut.fill(datum, w) # no possibility of exception from here on out (for rollback) self.entries += weight def _cppGenerateCode(self, parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes, derivedFieldExprs, storageStructs, initCode, initPrefix, initIndent, fillCode, fillPrefix, fillIndent, weightVars, weightVarStack, tmpVarTypes): return self._c99GenerateCode(parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes, derivedFieldExprs, storageStructs, initCode, initPrefix, initIndent, fillCode, fillPrefix, fillIndent, weightVars, weightVarStack, tmpVarTypes) def _c99GenerateCode(self, parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes, derivedFieldExprs, storageStructs, initCode, initPrefix, initIndent, fillCode, fillPrefix, fillIndent, weightVars, weightVarStack, tmpVarTypes): initCode.append(" " * initIndent + self._c99ExpandPrefix(*initPrefix) + ".entries = 0.0;") normexpr = self._c99QuantityExpr( parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes, derivedFieldExprs, None) fillCode.append(" " * fillIndent + self._c99ExpandPrefix(*fillPrefix) + ".entries += " + weightVarStack[-1] + ";") fillCode.append(" " * fillIndent + """if (!std::isnan({0}) && {0} > 0.0) {{""".format(normexpr)) weightVars.append("weight_" + str(len(weightVars))) weightVarStack = weightVarStack + (weightVars[-1],) fillCode.append(" " * (fillIndent + 2) + """{0} = {1} * {2};""".format(weightVarStack[-1], weightVarStack[-2], normexpr)) self.cut._c99GenerateCode(parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes, derivedFieldExprs, storageStructs, initCode, initPrefix + (("var", "cut"), ), initIndent, fillCode, fillPrefix + (("var", "cut"), ), fillIndent + 2, weightVars, weightVarStack, tmpVarTypes) fillCode.append(" " * fillIndent + "}") storageStructs[self._c99StructName()] = """ typedef struct {{ double entries; {1} cut; }} {0}; """.format(self._c99StructName(), self.cut._c99StorageType()) def _clingUpdate(self, filler, *extractorPrefix): obj = self._clingExpandPrefix(filler, *extractorPrefix) self.entries += obj.entries self.cut._clingUpdate(obj, ("var", "cut")) def _c99StructName(self): return "Se" + self.cut._c99StructName() def _cudaGenerateCode(self, parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes, derivedFieldExprs, storageStructs, initCode, initPrefix, initIndent, fillCode, fillPrefix, fillIndent, combineCode, totalPrefix, itemPrefix, combineIndent, jsonCode, jsonPrefix, jsonIndent, weightVars, weightVarStack, tmpVarTypes, suppressName): normexpr = self._cudaQuantityExpr( parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes, derivedFieldExprs, None) initCode.append(" " * initIndent + self._c99ExpandPrefix(*initPrefix) + ".entries = 0.0f;") fillCode.append(" " * fillIndent + "atomicAdd(&" + self._c99ExpandPrefix(*fillPrefix) + ".entries, " + weightVarStack[-1] + ");") combineCode.append( " " * combineIndent + "atomicAdd(&" + self._c99ExpandPrefix( * totalPrefix) + ".entries, " + self._c99ExpandPrefix( * itemPrefix) + ".entries);") jsonCode.append(" " * jsonIndent + "fprintf(out, \"{\\\"entries\\\": \");") jsonCode.append(" " * jsonIndent + "floatToJson(out, " + self._c99ExpandPrefix(*jsonPrefix) + ".entries);") weightVars.append("weight_" + str(len(weightVars))) weightVarStack = weightVarStack + (weightVars[-1],) fillCode.append(" " * fillIndent + "{newweight} = (isnan({q}) || {q} <= 0.0) ? 0.0 : ({oldweight} * {q});".format( newweight=weightVarStack[-1], oldweight=weightVarStack[-2], q=normexpr)) jsonCode.append(" " * jsonIndent + "fprintf(out, \", \\\"sub:type\\\": \\\"" + self.cut.name + "\\\"\");") jsonCode.append(" " * jsonIndent + "fprintf(out, \", \\\"data\\\": \");") self.cut._cudaGenerateCode(parser, generator, inputFieldNames, inputFieldTypes, derivedFieldTypes, derivedFieldExprs, storageStructs, initCode, initPrefix + (("var", "cut"), ), initIndent, fillCode, fillPrefix + (("var", "cut"), ), fillIndent, combineCode, totalPrefix + (("var", "cut"), ), itemPrefix + (("var", "cut"), ), combineIndent, jsonCode, jsonPrefix + (("var", "cut"), ), jsonIndent, weightVars, weightVarStack, tmpVarTypes, False) if suppressName or self.quantity.name is None: jsonCode.append(" " * jsonIndent + "fprintf(out, \"}\");") else: jsonCode.append(" " * jsonIndent + "fprintf(out, \", \\\"name\\\": " + json.dumps(json.dumps(self.quantity.name))[1:-1] + "}\");") storageStructs[self._c99StructName()] = """ typedef struct {{ float entries; {1} cut; }} {0}; """.format(self._c99StructName(), self.cut._cudaStorageType()) def _cudaUnpackAndFill(self, data, bigendian, alignment): format = "<f" entries, = struct.unpack(format, data[:struct.calcsize(format)]) self.entries += entries data = data[struct.calcsize(format):] data = self.cut._cudaUnpackAndFill(data, bigendian, alignment) return data def _numpy(self, data, weights, shape): w = self.quantity(data) self._checkNPQuantity(w, shape) self._checkNPWeights(weights, shape) weights = self._makeNPWeights(weights, shape) import numpy w = w * weights w[numpy.isnan(w)] = 0.0 w[w < 0.0] = 0.0 self.cut._numpy(data, w, shape) # no possibility of exception from here on out (for rollback) self.entries += float(weights.sum()) def _sparksql(self, jvm, converter): return converter.Select(self.quantity.asSparkSQL(), self.cut._sparksql(jvm, converter)) @property def children(self): """List of sub-aggregators, to make it possible to walk the tree.""" return [self.cut] @inheritdoc(Container) def toJsonFragment(self, suppressName): return maybeAdd({"entries": floatToJson(self.entries), "sub:type": self.cut.name, "data": self.cut.toJsonFragment(False)}, name=(None if suppressName else self.quantity.name)) @staticmethod @inheritdoc(Factory) def fromJsonFragment(json, nameFromParent): if isinstance(json, dict) and hasKeys(json.keys(), ["entries", "sub:type", "data"], ["name"]): if json["entries"] in ("nan", "inf", "-inf") or isinstance(json["entries"], numbers.Real): entries = float(json["entries"]) else: raise JsonFormatException(json, "Select.entries") if isinstance(json.get("name", None), basestring): name = json["name"] elif json.get("name", None) is None: name = None else: raise JsonFormatException(json["name"], "Select.name") if isinstance(json["sub:type"], basestring): factory = Factory.registered[json["sub:type"]] else: raise JsonFormatException(json, "Select.type") cut = factory.fromJsonFragment(json["data"], None) out = Select.ed(entries, cut) out.quantity.name = nameFromParent if name is None else name return out.specialize() else: raise JsonFormatException(json, "Select") def __repr__(self): return "<Select cut={0}>".format(self.cut.name) def __eq__(self, other): return isinstance(other, Select) and numeq(self.entries, other.entries) and self.cut == other.cut def __ne__(self, other): return not self == other def __hash__(self): return hash((self.entries, self.cut)) # extra properties: number of dimensions and datatypes of sub-hists Select.n_dim = n_dim Select.datatype = datatype # register extra methods Factory.register(Select)
import pytest from django.utils import timezone from api.base.settings.defaults import API_BASE from framework.auth.core import Auth from osf.models import RegistrationSchema, RegistrationProvider from osf_tests.factories import ( ProjectFactory, RegistrationFactory, RegistrationProviderFactory, AuthUserFactory, CollectionFactory, OSFGroupFactory, DraftRegistrationFactory, ) from osf.utils import permissions from website.project.metadata.utils import create_jsonschema_from_metaschema from website import settings OPEN_ENDED_SCHEMA_VERSION = 3 SCHEMA_VERSION = 2 @pytest.mark.django_db class DraftRegistrationTestCase: @pytest.fixture() def user(self): return AuthUserFactory() @pytest.fixture() def user_write_contrib(self): return AuthUserFactory() @pytest.fixture() def user_read_contrib(self): return AuthUserFactory() @pytest.fixture() def user_non_contrib(self): return AuthUserFactory() @pytest.fixture() def group_mem(self): return AuthUserFactory() @pytest.fixture() def group(self, group_mem): return OSFGroupFactory(creator=group_mem) @pytest.fixture() def project_public(self, user, user_write_contrib, user_read_contrib, group, group_mem): project_public = ProjectFactory(is_public=True, creator=user) project_public.add_contributor( user_write_contrib, permissions=permissions.WRITE) project_public.add_contributor( user_read_contrib, permissions=permissions.READ) project_public.save() project_public.add_osf_group(group, permissions.ADMIN) project_public.add_tag('hello', Auth(user), save=True) return project_public @pytest.fixture() def metadata(self): def metadata(draft): test_metadata = {} json_schema = create_jsonschema_from_metaschema( draft.registration_schema.schema) for key, value in json_schema['properties'].items(): response = 'Test response' items = value['properties']['value'].get('items') enum = value['properties']['value'].get('enum') if items: # multiselect response = [items['enum'][0]] elif enum: # singleselect response = enum[0] elif value['properties']['value'].get('properties'): response = {'question': {'value': 'Test Response'}} test_metadata[key] = {'value': response} return test_metadata return metadata @pytest.mark.django_db class TestDraftRegistrationList(DraftRegistrationTestCase): @pytest.fixture() def schema(self): return RegistrationSchema.objects.get( name='Open-Ended Registration', schema_version=OPEN_ENDED_SCHEMA_VERSION) @pytest.fixture() def draft_registration(self, user, project_public, schema): return DraftRegistrationFactory( initiator=user, registration_schema=schema, branched_from=project_public ) @pytest.fixture() def url_draft_registrations(self, project_public): # Specifies version to test functionality when using DraftRegistrationLegacySerializer return '/{}nodes/{}/draft_registrations/?{}'.format( API_BASE, project_public._id, 'version=2.19') def test_admin_can_view_draft_list( self, app, user, draft_registration, project_public, schema, url_draft_registrations): res = app.get(url_draft_registrations, auth=user.auth) assert res.status_code == 200 data = res.json['data'] assert len(data) == 1 assert schema._id in data[0]['relationships']['registration_schema']['links']['related']['href'] assert data[0]['id'] == draft_registration._id assert data[0]['attributes']['registration_metadata'] == {} def test_osf_group_with_admin_permissions_can_view( self, app, user, draft_registration, project_public, schema, url_draft_registrations): group_mem = AuthUserFactory() group = OSFGroupFactory(creator=group_mem) project_public.add_osf_group(group, permissions.ADMIN) res = app.get(url_draft_registrations, auth=group_mem.auth, expect_errors=True) assert res.status_code == 200 data = res.json['data'] assert len(data) == 1 assert schema._id in data[0]['relationships']['registration_schema']['links']['related']['href'] def test_cannot_view_draft_list( self, app, user_write_contrib, project_public, user_read_contrib, user_non_contrib, url_draft_registrations, group, group_mem): # test_read_only_contributor_cannot_view_draft_list res = app.get( url_draft_registrations, auth=user_read_contrib.auth, expect_errors=True) assert res.status_code == 403 # test_read_write_contributor_cannot_view_draft_list res = app.get( url_draft_registrations, auth=user_write_contrib.auth, expect_errors=True) assert res.status_code == 403 # test_logged_in_non_contributor_cannot_view_draft_list res = app.get( url_draft_registrations, auth=user_non_contrib.auth, expect_errors=True) assert res.status_code == 403 # test_unauthenticated_user_cannot_view_draft_list res = app.get(url_draft_registrations, expect_errors=True) assert res.status_code == 401 # test_osf_group_with_read_permissions project_public.remove_osf_group(group) project_public.add_osf_group(group, permissions.READ) res = app.get(url_draft_registrations, auth=group_mem.auth, expect_errors=True) assert res.status_code == 403 def test_deleted_draft_registration_does_not_show_up_in_draft_list( self, app, user, draft_registration, url_draft_registrations): draft_registration.deleted = timezone.now() draft_registration.save() res = app.get(url_draft_registrations, auth=user.auth) assert res.status_code == 200 data = res.json['data'] assert len(data) == 0 def test_draft_with_registered_node_does_not_show_up_in_draft_list( self, app, user, project_public, draft_registration, url_draft_registrations): reg = RegistrationFactory(project=project_public, draft_registration=draft_registration) draft_registration.registered_node = reg draft_registration.save() res = app.get(url_draft_registrations, auth=user.auth) assert res.status_code == 200 data = res.json['data'] assert len(data) == 0 def test_draft_with_deleted_registered_node_shows_up_in_draft_list( self, app, user, project_public, draft_registration, schema, url_draft_registrations): reg = RegistrationFactory(project=project_public, draft_registration=draft_registration) draft_registration.registered_node = reg draft_registration.save() reg.deleted = timezone.now() reg.save() res = app.get(url_draft_registrations, auth=user.auth) assert res.status_code == 200 data = res.json['data'] assert len(data) == 1 assert schema._id in data[0]['relationships']['registration_schema']['links']['related']['href'] assert data[0]['id'] == draft_registration._id assert data[0]['attributes']['registration_metadata'] == {} def test_draft_registration_serializer_usage(self, app, user, project_public, draft_registration): # Tests the usage of DraftRegistrationDetailSerializer for version 2.20 url_draft_registrations = '/{}nodes/{}/draft_registrations/?{}'.format( API_BASE, project_public._id, 'version=2.20') res = app.get(url_draft_registrations, auth=user.auth) assert res.status_code == 200 data = res.json['data'] assert len(data) == 1 # Set of fields that DraftRegistrationLegacySerializer does not provide assert data[0]['attributes']['title'] assert data[0]['attributes']['description'] assert data[0]['relationships']['affiliated_institutions'] @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation class TestDraftRegistrationCreate(DraftRegistrationTestCase): @pytest.fixture() def provider(self): return RegistrationProvider.get_default() @pytest.fixture() def non_default_provider(self, metaschema_open_ended): non_default_provider = RegistrationProviderFactory() non_default_provider.schemas.add(metaschema_open_ended) non_default_provider.save() return non_default_provider @pytest.fixture() def metaschema_open_ended(self): return RegistrationSchema.objects.get( name='Open-Ended Registration', schema_version=OPEN_ENDED_SCHEMA_VERSION) @pytest.fixture() def payload(self, metaschema_open_ended, provider): return { 'data': { 'type': 'draft_registrations', 'attributes': {}, 'relationships': { 'registration_schema': { 'data': { 'type': 'registration_schema', 'id': metaschema_open_ended._id } }, 'provider': { 'data': { 'type': 'registration-providers', 'id': provider._id, } } } } } @pytest.fixture() def payload_with_non_default_provider(self, metaschema_open_ended, non_default_provider): return { 'data': { 'type': 'draft_registrations', 'attributes': {}, 'relationships': { 'registration_schema': { 'data': { 'type': 'registration_schema', 'id': metaschema_open_ended._id } }, 'provider': { 'data': { 'type': 'registration-providers', 'id': non_default_provider._id, } } } } } @pytest.fixture() def url_draft_registrations(self, project_public): return '/{}nodes/{}/draft_registrations/?{}'.format( API_BASE, project_public._id, 'version=2.19') def test_type_is_draft_registrations( self, app, user, metaschema_open_ended, url_draft_registrations): draft_data = { 'data': { 'type': 'nodes', 'attributes': {}, 'relationships': { 'registration_schema': { 'data': { 'type': 'registration_schema', 'id': metaschema_open_ended._id } } } } } res = app.post_json_api( url_draft_registrations, draft_data, auth=user.auth, expect_errors=True) assert res.status_code == 409 def test_admin_can_create_draft( self, app, user, project_public, url_draft_registrations, payload, metaschema_open_ended): url = '{}&embed=branched_from&embed=initiator'.format(url_draft_registrations) res = app.post_json_api(url, payload, auth=user.auth) assert res.status_code == 201 data = res.json['data'] assert metaschema_open_ended._id in data['relationships']['registration_schema']['links']['related']['href'] assert data['attributes']['registration_metadata'] == {} assert f'{settings.API_DOMAIN}v2/providers/registrations/{RegistrationProvider.default__id}/' in \ data['relationships']['provider']['links']['related']['href'] assert data['embeds']['branched_from']['data']['id'] == project_public._id assert data['embeds']['initiator']['data']['id'] == user._id def test_cannot_create_draft( self, app, user_write_contrib, user_read_contrib, user_non_contrib, project_public, payload, group, url_draft_registrations, group_mem): # test_write_only_contributor_cannot_create_draft assert user_write_contrib in project_public.contributors.all() res = app.post_json_api( url_draft_registrations, payload, auth=user_write_contrib.auth, expect_errors=True) assert res.status_code == 403 # test_read_only_contributor_cannot_create_draft assert user_read_contrib in project_public.contributors.all() res = app.post_json_api( url_draft_registrations, payload, auth=user_read_contrib.auth, expect_errors=True) assert res.status_code == 403 # test_non_authenticated_user_cannot_create_draft res = app.post_json_api( url_draft_registrations, payload, expect_errors=True) assert res.status_code == 401 # test_logged_in_non_contributor_cannot_create_draft res = app.post_json_api( url_draft_registrations, payload, auth=user_non_contrib.auth, expect_errors=True) assert res.status_code == 403 # test_group_admin_cannot_create_draft res = app.post_json_api( url_draft_registrations, payload, auth=group_mem.auth, expect_errors=True) assert res.status_code == 403 # test_group_write_contrib_cannot_create_draft project_public.remove_osf_group(group) project_public.add_osf_group(group, permissions.WRITE) res = app.post_json_api( url_draft_registrations, payload, auth=group_mem.auth, expect_errors=True) assert res.status_code == 403 def test_schema_validation( self, app, user, provider, non_default_provider, payload, payload_with_non_default_provider, url_draft_registrations, metaschema_open_ended): # Schema validation for a default provider without defined schemas with any schema is tested by `test_admin_can_create_draft` # Schema validation for a non-default provider with the correct schema is tested by `test_create_draft_with_provider` # Default provider with defined schemas does not accept everything schema, _ = RegistrationSchema.objects.get_or_create(name='Test schema', schema_version=0) provider.schemas.add(schema) provider.save() res = app.post_json_api( url_draft_registrations, payload, auth=user.auth, expect_errors=True) assert res.status_code == 400 payload['data']['relationships']['registration_schema']['data']['id'] = schema._id res = app.post_json_api( url_draft_registrations, payload, auth=user.auth) assert res.status_code == 201 # Non-Default provider does not accept everything payload_with_non_default_provider['data']['relationships']['registration_schema']['data']['id'] = schema._id res = app.post_json_api( url_draft_registrations, payload_with_non_default_provider, auth=user.auth, expect_errors=True) assert res.status_code == 400 def test_registration_supplement_errors( self, app, user, provider, url_draft_registrations): # test_registration_supplement_not_found draft_data = { 'data': { 'type': 'draft_registrations', 'attributes': {}, 'relationships': { 'registration_schema': { 'data': { 'type': 'registration_schema', 'id': 'Invalid schema' } }, 'provider': { 'data': { 'type': 'registration-providers', 'id': provider._id, } } } } } res = app.post_json_api( url_draft_registrations, draft_data, auth=user.auth, expect_errors=True) assert res.status_code == 404 # test_registration_supplement_must_be_active_metaschema schema = RegistrationSchema.objects.get( name='Election Research Preacceptance Competition', active=False) draft_data = { 'data': { 'type': 'draft_registrations', 'attributes': {}, 'relationships': { 'registration_schema': { 'data': { 'type': 'registration_schema', 'id': schema._id } }, 'provider': { 'data': { 'type': 'registration-providers', 'id': provider._id, } } } } } res = app.post_json_api( url_draft_registrations, draft_data, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Registration supplement must be an active schema.' # test_registration_supplement_must_be_active schema = RegistrationSchema.objects.get( name='Election Research Preacceptance Competition', schema_version=2) draft_data = { 'data': { 'type': 'draft_registrations', 'attributes': {}, 'relationships': { 'registration_schema': { 'data': { 'type': 'registration_schema', 'id': schema._id } }, 'provider': { 'data': { 'type': 'registration-providers', 'id': provider._id, } } } } } res = app.post_json_api( url_draft_registrations, draft_data, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Registration supplement must be an active schema.' def test_cannot_create_draft_errors( self, app, user, project_public, payload): # test_cannot_create_draft_from_a_registration registration = RegistrationFactory( project=project_public, creator=user) url = '/{}nodes/{}/draft_registrations/'.format( API_BASE, registration._id) res = app.post_json_api( url, payload, auth=user.auth, expect_errors=True) assert res.status_code == 404 # test_cannot_create_draft_from_deleted_node project = ProjectFactory(is_public=True, creator=user) project.is_deleted = True project.save() url_project = '/{}nodes/{}/draft_registrations/'.format( API_BASE, project._id) res = app.post_json_api( url_project, payload, auth=user.auth, expect_errors=True) assert res.status_code == 410 assert res.json['errors'][0]['detail'] == 'The requested node is no longer available.' # test_cannot_create_draft_from_collection collection = CollectionFactory(creator=user) url = '/{}nodes/{}/draft_registrations/'.format( API_BASE, collection._id) res = app.post_json_api( url, payload, auth=user.auth, expect_errors=True) assert res.status_code == 404 def test_registration_supplement_must_be_supplied( self, app, user, url_draft_registrations): draft_data = { 'data': { 'type': 'draft_registrations', 'attributes': { } } } res = app.post_json_api( url_draft_registrations, draft_data, auth=user.auth, expect_errors=True) errors = res.json['errors'][0] assert res.status_code == 400 assert errors['detail'] == 'This field is required.' assert errors['source']['pointer'] == '/data/relationships/registration_schema' def test_cannot_supply_both_registration_metadata_and_registration_responses( self, app, user, payload, url_draft_registrations): payload['data']['attributes']['registration_metadata'] = {'summary': 'Registration data'} payload['data']['attributes']['registration_responses'] = {'summary': 'Registration data'} res = app.post_json_api( url_draft_registrations, payload, auth=user.auth, expect_errors=True) errors = res.json['errors'][0] assert res.status_code == 400 assert 'Please use `registration_responses` as `registration_metadata` will be deprecated in the future.' in errors['detail'] def test_supply_registration_responses_on_creation( self, app, user, payload, url_draft_registrations): schema = RegistrationSchema.objects.get( name='OSF-Standard Pre-Data Collection Registration', schema_version=SCHEMA_VERSION) payload['data']['relationships']['registration_schema']['data']['id'] = schema._id payload['data']['attributes']['registration_responses'] = { 'looked': 'Yes', 'datacompletion': 'No, data collection has not begun', 'comments': '' } res = app.post_json_api( url_draft_registrations, payload, auth=user.auth, expect_errors=True) attributes = res.json['data']['attributes'] assert attributes['registration_responses'] == { 'looked': 'Yes', 'datacompletion': 'No, data collection has not begun', 'comments': '' } assert attributes['registration_metadata'] == { 'looked': { 'comments': [], 'value': 'Yes', 'extra': [] }, 'datacompletion': { 'comments': [], 'value': 'No, data collection has not begun', 'extra': [] }, 'comments': { 'comments': [], 'value': '', 'extra': [] } } def test_registration_metadata_must_be_a_dictionary( self, app, user, payload, url_draft_registrations): payload['data']['attributes']['registration_metadata'] = 'Registration data' res = app.post_json_api( url_draft_registrations, payload, auth=user.auth, expect_errors=True) errors = res.json['errors'][0] assert res.status_code == 400 assert errors['source']['pointer'] == '/data/attributes/registration_metadata' assert errors['detail'] == 'Expected a dictionary of items but got type "str".' def test_registration_metadata_question_values_must_be_dictionaries( self, app, user, payload, url_draft_registrations): schema = RegistrationSchema.objects.get( name='OSF-Standard Pre-Data Collection Registration', schema_version=SCHEMA_VERSION) payload['data']['relationships']['registration_schema']['data']['id'] = schema._id payload['data']['attributes']['registration_metadata'] = {} payload['data']['attributes']['registration_metadata']['datacompletion'] = 'No, data collection has not begun' res = app.post_json_api( url_draft_registrations, payload, auth=user.auth, expect_errors=True) errors = res.json['errors'][0] assert res.status_code == 400 assert errors['detail'] == 'For your registration your response to the \'Data collection status\' field' \ ' is invalid, your response must be one of the provided options.' def test_registration_metadata_question_keys_must_be_value( self, app, user, payload, url_draft_registrations): schema = RegistrationSchema.objects.get( name='OSF-Standard Pre-Data Collection Registration', schema_version=SCHEMA_VERSION) payload['data']['relationships']['registration_schema']['data']['id'] = schema._id payload['data']['attributes']['registration_metadata'] = {} payload['data']['attributes']['registration_metadata']['datacompletion'] = { 'incorrect_key': 'No, data collection has not begun'} res = app.post_json_api( url_draft_registrations, payload, auth=user.auth, expect_errors=True) errors = res.json['errors'][0] assert res.status_code == 400 assert errors['detail'] == 'For your registration your response to the \'Data collection status\' ' \ 'field is invalid, your response must be one of the provided options.' def test_question_in_registration_metadata_must_be_in_schema( self, app, user, payload, url_draft_registrations): schema = RegistrationSchema.objects.get( name='OSF-Standard Pre-Data Collection Registration', schema_version=SCHEMA_VERSION) payload['data']['relationships']['registration_schema']['data']['id'] = schema._id payload['data']['attributes']['registration_metadata'] = {} payload['data']['attributes']['registration_metadata']['q11'] = { 'value': 'No, data collection has not begun' } res = app.post_json_api( url_draft_registrations, payload, auth=user.auth, expect_errors=True) errors = res.json['errors'][0] assert res.status_code == 400 assert errors['detail'] == 'For your registration the \'datacompletion\' field is extraneous and not' \ ' permitted in your response.' def test_multiple_choice_question_value_must_match_value_in_schema( self, app, user, payload, url_draft_registrations): schema = RegistrationSchema.objects.get( name='OSF-Standard Pre-Data Collection Registration', schema_version=SCHEMA_VERSION) payload['data']['relationships']['registration_schema']['data']['id'] = schema._id payload['data']['attributes']['registration_metadata'] = {} payload['data']['attributes']['registration_metadata']['datacompletion'] = { 'value': 'Nope, data collection has not begun'} res = app.post_json_api( url_draft_registrations, payload, auth=user.auth, expect_errors=True) errors = res.json['errors'][0] assert res.status_code == 400 assert errors['detail'] == 'For your registration your response to the \'Data collection status\'' \ ' field is invalid, your response must be one of the provided options.' def test_registration_responses_must_be_a_dictionary( self, app, user, payload, url_draft_registrations): payload['data']['attributes']['registration_responses'] = 'Registration data' res = app.post_json_api( url_draft_registrations, payload, auth=user.auth, expect_errors=True) errors = res.json['errors'][0] assert res.status_code == 400 assert errors['source']['pointer'] == '/data/attributes/registration_responses' assert errors['detail'] == 'Expected a dictionary of items but got type "str".' def test_registration_responses_question_values_must_not_be_dictionaries( self, app, user, payload, url_draft_registrations): schema = RegistrationSchema.objects.get( name='OSF-Standard Pre-Data Collection Registration', schema_version=SCHEMA_VERSION) payload['data']['relationships']['registration_schema']['data']['id'] = schema._id payload['data']['attributes']['registration_responses'] = {} payload['data']['attributes']['registration_responses']['datacompletion'] = {'value': 'No, data collection has not begun'} res = app.post_json_api( url_draft_registrations, payload, auth=user.auth, expect_errors=True) errors = res.json['errors'][0] assert res.status_code == 400 assert errors['detail'] == 'For your registration, your response to the \'Data collection status\' field' \ ' is invalid, your response must be one of the provided options.' def test_question_in_registration_responses_must_be_in_schema( self, app, user, payload, url_draft_registrations): schema = RegistrationSchema.objects.get( name='OSF-Standard Pre-Data Collection Registration', schema_version=SCHEMA_VERSION) payload['data']['relationships']['registration_schema']['data']['id'] = schema._id payload['data']['attributes']['registration_responses'] = {} payload['data']['attributes']['registration_responses']['q11'] = 'No, data collection has not begun' res = app.post_json_api( url_draft_registrations, payload, auth=user.auth, expect_errors=True) errors = res.json['errors'][0] assert res.status_code == 400 assert errors['detail'] == 'Additional properties are not allowed (\'q11\' was unexpected)' def test_registration_responses_multiple_choice_question_value_must_match_value_in_schema( self, app, user, payload, url_draft_registrations): schema = RegistrationSchema.objects.get( name='OSF-Standard Pre-Data Collection Registration', schema_version=SCHEMA_VERSION) payload['data']['relationships']['registration_schema']['data']['id'] = schema._id payload['data']['attributes']['registration_responses'] = {} payload['data']['attributes']['registration_responses']['datacompletion'] = 'Nope, data collection has not begun' res = app.post_json_api( url_draft_registrations, payload, auth=user.auth, expect_errors=True) errors = res.json['errors'][0] assert res.status_code == 400 assert errors['detail'] == 'For your registration, your response to the \'Data collection status\'' \ ' field is invalid, your response must be one of the provided options.'
from __future__ import unicode_literals from .exceptions import MultipleObjectsReturned, YouTubeError from .models import Video from .utils import safe_filename from urllib import urlencode from urllib2 import urlopen from urlparse import urlparse, parse_qs, unquote import re YT_BASE_URL = 'http://www.youtube.com/get_video_info' #YouTube quality and codecs id map. #source: http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs YT_ENCODING = { #Flash Video 5: ["flv", "240p", "Sorenson H.263", "N/A", "0.25", "MP3", "64"], 6: ["flv", "270p", "Sorenson H.263", "N/A", "0.8", "MP3", "64"], 34: ["flv", "360p", "H.264", "Main", "0.5", "AAC", "128"], 35: ["flv", "480p", "H.264", "Main", "0.8-1", "AAC", "128"], #3GP 36: ["3gp", "240p", "MPEG-4 Visual", "Simple", "0.17", "AAC", "38"], 13: ["3gp", "N/A", "MPEG-4 Visual", "N/A", "0.5", "AAC", "N/A"], 17: ["3gp", "144p", "MPEG-4 Visual", "Simple", "0.05", "AAC", "24"], #MPEG-4 18: ["mp4", "360p", "H.264", "Baseline", "0.5", "AAC", "96"], 22: ["mp4", "720p", "H.264", "High", "2-2.9", "AAC", "192"], 37: ["mp4", "1080p", "H.264", "High", "3-4.3", "AAC", "192"], 38: ["mp4", "3072p", "H.264", "High", "3.5-5", "AAC", "192"], 82: ["mp4", "360p", "H.264", "3D", "0.5", "AAC", "96"], 83: ["mp4", "240p", "H.264", "3D", "0.5", "AAC", "96"], 84: ["mp4", "720p", "H.264", "3D", "2-2.9", "AAC", "152"], 85: ["mp4", "520p", "H.264", "3D", "2-2.9", "AAC", "152"], #WebM 43: ["webm", "360p", "VP8", "N/A", "0.5", "Vorbis", "128"], 44: ["webm", "480p", "VP8", "N/A", "1", "Vorbis", "128"], 45: ["webm", "720p", "VP8", "N/A", "2", "Vorbis", "192"], 46: ["webm", "1080p", "VP8", "N/A", "N/A", "Vorbis", "192"], 100: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "128"], 101: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "192"], 102: ["webm", "720p", "VP8", "3D", "N/A", "Vorbis", "192"] } # The keys corresponding to the quality/codec map above. YT_ENCODING_KEYS = ( 'extension', 'resolution', 'video_codec', 'profile', 'video_bitrate', 'audio_codec', 'audio_bitrate' ) class YouTube(object): _filename = None _fmt_values = [] _video_url = None title = None videos = [] # fmt was an undocumented URL parameter that allowed selecting # YouTube quality mode without using player user interface. @property def url(self): """Exposes the video url.""" return self._video_url @url.setter def url(self, url): """ Defines the URL of the YouTube video.""" self._video_url = url #Reset the filename. self._filename = None #Get the video details. self._get_video_info() @property def filename(self): """ Exposes the title of the video. If this is not set, one is generated based on the name of the video. """ if not self._filename: self._filename = safe_filename(self.title) return self._filename @filename.setter def filename(self, filename): """ Defines the filename.""" self._filename = filename if self.videos: for video in self.videos: video.filename = filename @property def video_id(self): """Gets the video ID extracted from the URL.""" parts = urlparse(self._video_url) qs = getattr(parts, 'query', None) if qs: video_id = parse_qs(qs).get('v', None) if video_id: return video_id.pop() def get(self, extension=None, res=None): """ Return a single video given an extention and resolution. Keyword arguments: extention -- The desired file extention (e.g.: mp4). res -- The desired broadcasting standard of the video (e.g.: 1080p). """ result = [] for v in self.videos: if extension and v.extension != extension: continue elif res and v.resolution != res: continue else: result.append(v) if not len(result): return elif len(result) is 1: return result[0] else: d = len(result) raise MultipleObjectsReturned("get() returned more than one " "object -- it returned %d!" % d) def filter(self, extension=None, res=None): """ Return a filtered list of videos given an extention and resolution criteria. Keyword arguments: extention -- The desired file extention (e.g.: mp4). res -- The desired broadcasting standard of the video (e.g.: 1080p). """ results = [] for v in self.videos: if extension and v.extension != extension: continue elif res and v.resolution != res: continue else: results.append(v) return results def _fetch(self, path, data): """ Given a path, traverse the response for the desired data. (A modified ver. of my dictionary traverse method: https://gist.github.com/2009119) Keyword arguments: path -- A tuple representing a path to a node within a tree. data -- The data containing the tree. """ elem = path[0] #Get first element in tuple, and check if it contains a list. if type(data) is list: # Pop it, and let's continue.. return self._fetch(path, data.pop()) #Parse the url encoded data data = parse_qs(data) #Get the element in our path data = data.get(elem, None) #Offset the tuple by 1. path = path[1::1] #Check if the path has reached the end OR the element return #nothing. if len(path) is 0 or data is None: if type(data) is list and len(data) is 1: data = data.pop() return data else: # Nope, let's keep diggin' return self._fetch(path, data) def _parse_stream_map(self, data): """ Python's `parse_qs` can't properly decode the stream map containing video data so we use this instead. Keyword arguments: data -- The parsed response from YouTube. """ videoinfo = { "itag": [], "url": [], "quality": [], "fallback_host": [], "sig": [], "type": [] } text = data["url_encoded_fmt_stream_map"][0] # Split individual videos videos = text.split(",") # Unquote the characters and split to parameters videos = [video.split("&") for video in videos] for video in videos: for kv in video: key, value = kv.split("=") videoinfo.get(key, []).append(unquote(value)) return videoinfo def _get_video_info(self): """ This is responsable for executing the request, extracting the necessary details, and populating the different video resolutions and formats into a list. """ querystring = urlencode({'asv': 3, 'el': 'detailpage', 'hl': 'en_US', 'video_id': self.video_id}) self.title = None self.videos = [] response = urlopen(YT_BASE_URL + '?' + querystring) if response: content = response.read().decode() data = parse_qs(content) if 'errorcode' in data: error = data.get('reason', 'An unknown error has occurred') if isinstance(error, list): error = error.pop() raise YouTubeError(error) stream_map = self._parse_stream_map(data) video_urls = stream_map["url"] #Get the video signatures, YouTube require them as an url component video_signatures = stream_map["sig"] self.title = self._fetch(('title',), content) for idx in range(len(video_urls)): url = video_urls[idx] signature = video_signatures[idx] try: fmt, data = self._extract_fmt(url) except (TypeError, KeyError): pass else: #Add video signature to url url = "%s&signature=%s" % (url, signature) v = Video(url, self.filename, **data) self.videos.append(v) self._fmt_values.append(fmt) self.videos.sort() def _extract_fmt(self, text): """ YouTube does not pass you a completely valid URLencoded form, I suspect this is suppose to act as a deterrent.. Nothing some regulular expressions couldn't handle. Keyword arguments: text -- The malformed data contained within each url node. """ itag = re.findall('itag=(\d+)', text) if itag and len(itag) is 1: itag = int(itag[0]) attr = YT_ENCODING.get(itag, None) if not attr: return itag, None data = {} map(lambda k, v: data.update({k: v}), YT_ENCODING_KEYS, attr) return itag, data
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union from . import html_css T = TypeVar("T") BLANK_FILE_FORMAT = "<file><a>TeX primitive</a></file>" def format_template( n: int, align: str = "<", min_: Optional[int] = None, line_up: bool = True, ) -> str: if not line_up: return "{text:%s}" % align if align == "^" and (not min_ or n > min_): return " {text:%s%s}" % (align, n - 1) return "{text:%s%s}" % (align, n) def normal_format( text: str, n: int, align: str = "<", min_: Optional[int] = None, line_up: bool = True, ) -> str: template = format_template(n, align=align, min_=min_, line_up=line_up) return template.format(text=text) def tagged_format( text: Union[str, int], tag: str, n: int, align: str = "<", min_: Optional[int] = None, line_up: bool = True, ) -> str: temp = format_template(n, align=align, min_=min_, line_up=line_up) init = temp.format(text=text) total = len(init) left = total - len(init.lstrip()) right = total - len(init.rstrip()) template = ( (" " * left) + "<{tag}>" + init[left:total - right] + "</{tag}>" + (" " * right) ) return template.format(tag=tag) def nice_sorted(list_: List[T], reverse: bool = False) -> List[T]: main = [] # type: List[str] others = [] # type: List[T] for x in list_: if x is None: pass elif isinstance(x, str): main.append(x) else: others.append(x) main = sorted(main, key=html_css.strip_tags) inherits, upper, mixed, lower = [], [], [], [] for x in main: raw = html_css.strip_tags(x) if raw.startswith("inherits"): inherits.append(x) elif raw.isupper(): upper.append(x) elif any(c.isupper() for c in raw): mixed.append(x) else: lower.append(x) result = others + lower + mixed + upper + inherits return result[::-1] if reverse else result class InterfaceLoader: def __init__(self) -> None: self.syntax = '<syntax>{syntax}</syntax>' self.docstring = '<docstring>{docstring}</docstring>' self.file = '<file><a href="file:{file}">{file}</a></file>' def load(self, *args, **kwargs) -> List[str]: raw = self.render(*args, **kwargs) if kwargs.get("protect_space", False): return [html_css.protect_space(s) for s in raw] return raw def render( self, name: str, list_: List[Dict[str, Any]], **kwargs: bool ) -> List[str]: self.kwargs = kwargs self.name = name self.match_indentation = self.kwargs.get("match_indentation", True) self.hang_indentation = self.kwargs.get("hang_indentation", True) parts = [] files = set() for alt in list_: content = alt.get("con") files.add(alt.get("fil")) sig = [] if not content: syntax, docstring = self.render_aux([]) elif isinstance(content, dict): syntax, docstring = self.render_aux([content]) elif isinstance(content, list): syntax, docstring = self.render_aux(content) else: raise Exception('unexpected type "{}"'.format(type(content))) sig.append(self.syntax.format(syntax=syntax)) if docstring: sig.append(self.docstring.format(docstring=docstring)) parts.append("\n\n".join(sig)) if self.kwargs.get("show_source_files", False) and files: prim, rest = 0, [] for f in files: if f is None: prim += 1 else: rest.append(f) source = " ".join( [self.file_format(k) for k in sorted(rest)] + [BLANK_FILE_FORMAT for _ in range(prim)] ) else: source = "" if self.kwargs.get("show_copy_pop_up", False): copy = '<clipboard><a href="copy:plain">copy text</a></clipboard>' # copy = ( # '<clipboard>copy pop-up text: <a href="copy:plain">(plain)' # '</a>, <a href="copy:html">(HTML)</a></clipboard>' # ) else: copy = "" return ["\n\n".join(parts), source, copy] def file_format(self, f: str) -> str: return self.file.format(file=f) def render_aux(self, list_: list) -> Tuple[str, str]: self._syntax = [ " " * (len(self.name) + 1), html_css.control_sequence(self.name), " " * (len(self.name) + 1), ] self._docstring = [] # type: List[str] self._n = 0 for arg in list_: if arg: self.new_arg() self._content = arg.get("con") self._inherits = arg.get("inh") self._optional = arg.get("opt") self._rendering = arg.get("ren") self._len = len(html_css.unescape( html_css.strip_tags(self._rendering) )) if self._content is None and self._inherits is None: self.blank() else: self._n += 1 self.do_syntax() self.do_docstring() self.clean_syntax() return "\n".join(self._syntax), "\n\n".join(self._docstring) def new_arg(self) -> None: for i in range(3): self._syntax[i] += " " def clean_syntax(self) -> None: for i in range(2, -1, -1): if not html_css.strip_tags(self._syntax[i]).rstrip(): del self._syntax[i] def blank(self) -> None: self._syntax[0] += " " * self._len self._syntax[1] += self._rendering self._syntax[2] += " " * self._len def do_syntax(self) -> None: if self._optional: for i in range(3): self._syntax[i] += "<opt>" self._syntax[0] += tagged_format(self._n, "num", self._len, align="^") self._syntax[1] += self._rendering self._syntax[2] += normal_format( "OPT" if self._optional else "", self._len, align="^", min_=3, ) if self._optional: for i in range(3): self._syntax[i] += "</opt>" def do_docstring(self) -> None: if isinstance(self._content, str): self._docstring.append(self.docstring_str()) elif isinstance(self._content, list): self._docstring.append(self.docstring_list()) elif isinstance(self._content, dict): self._docstring.append(self.docstring_dict()) if self._inherits: if isinstance(self._inherits, list): inherits = "<inh>inherits:</inh> " + ", ".join( html_css.control_sequence(i) for i in self._inherits ) else: inherits = ( "<inh>inherits:</inh> " + html_css.control_sequence(self._inherits) ) if self._content: self._docstring[-1] += "\n" + self.guide(num=False) + inherits else: self._docstring.append(self.guide() + inherits) def docstring_str(self) -> str: return self.guide() + self._content def docstring_list(self) -> str: line_break = self.kwargs.get("line_break", 65) if isinstance(line_break, int): return self.docstring_list_break(line_break) return self.docstring_list_nobreak() def docstring_list_break(self, line_break: int) -> str: content = nice_sorted(self._content.copy(), reverse=True) lines = [] init = True while content: lines.append(self.guide(num=init)) init, begin, space = False, True, True while content and space: s = content.pop() if begin: lines[-1] += s begin = False else: len_ = len(html_css.strip_tags(lines[-1] + s)) + 1 if len_ > line_break: space = False content.append(s) else: lines[-1] += " " + s return "\n".join(lines) def docstring_list_nobreak(self) -> str: return self.guide() + " ".join(self._content) def docstring_dict(self) -> str: line_break = self.kwargs.get("line_break", 65) len_ = max(len(html_css.strip_tags(k)) for k in self._content) if isinstance(line_break, int) and not isinstance(line_break, bool): return self.docstring_dict_break(len_, line_break) return self.docstring_dict_nobreak(len_) def docstring_dict_break(self, len_: int, line_break: int) -> str: keys = nice_sorted(self._content, reverse=True) lines = [] init = True while keys: k = keys.pop() k_len = len(k) v = self._content[k] lines.append( self.assignments_guide( len_ if self.match_indentation else k_len, key=k, num=init, ) ) init = False if isinstance(v, str): lines[-1] += " " + v elif isinstance(v, list): for s in nice_sorted(v): next_len = len(html_css.strip_tags(lines[-1] + s)) + 1 if next_len > line_break: if self.hang_indentation: lines.append( self.assignments_guide( len_ if self.match_indentation else k_len, num=init, ) ) lines[-1] += " " + s else: lines.append(self.assignments_guide(0, num=init)) lines[-1] += s else: lines[-1] += " " + s return "\n".join(lines) def docstring_dict_nobreak(self, len_: int) -> str: lines = [] for i, k in enumerate(nice_sorted(self._content)): v = self._content[k] lines.append(self.assignments_guide(len_, key=k, num=not i) + " ") lines[-1] += " ".join(nice_sorted(v)) if isinstance(v, list) else v return "\n".join(lines) def guide(self, num: bool = True) -> str: if num: return tagged_format(self._n, "num", 4) return " " def assignments_guide( self, len_: int, key: Optional[str] = None, num: bool = True, ) -> str: start = self.guide(num=num) if key: len_ += len(key) - len(html_css.strip_tags(key)) text = \ tagged_format(key, "key", len_, line_up=self.match_indentation) return start + text + " <equ>=</equ>" return start + (" " * (len_ + 2))
from common_fixtures import * # NOQA from requests.auth import AuthBase from cattle import ClientApiError import ast if_test_ldap = pytest.mark.skipif(not os.environ.get('API_AUTH_LDAP_SERVER'), reason='API_AUTH_LDAP_SERVER is not set') if_do_key = pytest.mark.skipif( not os.environ.get('DIGITALOCEAN_KEY'), reason="Digital Ocean key is not set") if_ldap_port = pytest.mark.skipif( os.environ.get('LDAP_PORT') != 'True', reason="LDAP_PORT is not True") ADMIN_LDAP_CLIENT = None class LdapAuth(AuthBase): def __init__(self, jwt, prj_id=None): # setup any auth-related data here self.jwt = jwt self.prj_id = prj_id def __call__(self, r): # modify and return the request r.headers['Authorization'] = 'Bearer ' + self.jwt if self.prj_id is not None: r.headers['X-API-Project-Id'] = self.prj_id return r @pytest.fixture(scope='session', autouse=True) def ldap_client(admin_client): key = admin_client.create_apiKey() admin_client.wait_success(key) ldap_client = from_env(url=cattle_url(), access_key=key.publicValue, secret_key=key.secretValue) global ADMIN_LDAP_CLIENT ADMIN_LDAP_CLIENT = ldap_client def create_ldap_client(username=None, password=None, project_id=None): client = from_env(url=cattle_url(), access_key=ADMIN_LDAP_CLIENT._access_key, secret_key=ADMIN_LDAP_CLIENT._secret_key) client.delete_by_id = delete_by_id assert client.valid() jwt = get_authed_token(username=username, password=password)['jwt'] client._access_key = None client._secret_key = None client._auth = LdapAuth(jwt, prj_id=project_id) client.reload_schema() assert client.valid() identities = client.list_identity().data assert len(identities) > 0 return client def get_authed_token(username=None, password=None): token = requests.post(cattle_url() + '/token', { 'authProvider': "openldapconfig", 'code': username + ':' + password }) assert token.ok token = token.json() assert token['type'] != 'error' assert token['user'] == username assert token['userIdentity']['login'] == username return token def delete_ldap_token(id, cookies): response = requests.delete(cattle_url() + '/token/' + id, cookies=cookies) assert response.status_code == 204 for c in response.cookies: assert c.name != "token" assert "token=;Path=/;Expires=Thu, 01 Jan 1970 00:00:00 GMT;" \ in response.headers['set-cookie'] def load_config(access_mode='unrestricted'): if os.environ.get('API_AUTH_LDAP_TLS') == 'True': tls = True else: tls = False config = { 'accessMode': access_mode, 'server': os.environ.get('API_AUTH_LDAP_SERVER'), 'domain': os.environ.get('API_AUTH_LDAP_SEARCH_BASE'), 'port': os.environ.get('API_AUTH_LDAP_PORT'), 'serviceAccountPassword': os.environ.get('API_AUTH_LDAP_' 'SERVICE_ACCOUNT_PASSWORD'), 'serviceAccountUsername': os.environ.get('API_AUTH_LDAP_' 'SERVICE_ACCOUNT_USERNAME'), 'groupNameField': os.environ.get('SCHEMA_LDAP_GROUP_NAME_FIELD'), 'groupObjectClass': os.environ.get('SCHEMA_LDAP_GROUP_OBJECT_CLASS'), 'groupSearchField': os.environ.get('SCHEMA_LDAP_GROUP_SEARCH_FIELD'), 'groupDNField': os.environ.get('SCHEMA_LDAP_GROUP_DN_FIELD'), 'groupMemberMappingAttribute': "memberUid", 'groupMemberUserAttribute': os.environ.get('SCHEMA_LDAP_GROUP_' 'MEMBER_USER_ATTRIBUTE'), 'loginDomain': None, 'enabled': True, 'tls': tls, 'userDisabledBitMask': os.environ.get('SCHEMA_LDAP_USER_DISABLED' '_STATUS_BITMASK'), 'userEnabledAttribute': None, 'userLoginField': os.environ.get('SCHEMA_LDAP_USER_LOGIN_FIELD'), 'userNameField': os.environ.get('SCHEMA_LDAP_USER_NAME_FIELD'), 'userObjectClass': os.environ.get('SCHEMA_LDAP_USER_OBJECT_CLASS'), 'userSearchField': os.environ.get('SCHEMA_LDAP_USER_SEARCH_FIELD'), 'userMemberAttribute': "memberOf" } return config def idToMember(identity, role): return { 'externalId': identity.externalId, 'externalIdType': identity.externalIdType, 'role': role } @pytest.fixture(scope='session', autouse=True) def turn_on_off_ldap_auth(admin_client, request): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') # Disable LDAP Authentication config = load_config() config['enabled'] = False admin_client.create_openldapconfig(config) # Get main user token and client client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) token = get_authed_token(username=ldap_main_user, password=ldap_main_pass) user = token['userIdentity'] # Enable LDAP Authentication allowed_identities = [] allowed_identities.append(user) config['enabled'] = True config['allowedIdentities'] = allowed_identities admin_client.create_openldapconfig(config) def fin(): config = load_config() config['enabled'] = None client.create_openldapconfig(config) request.addfinalizer(fin) def reconfigure_ldap(admin_client, domain, groupSearchDomain): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') config = load_config() client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) token = get_authed_token(username=ldap_main_user, password=ldap_main_pass) config['enabled'] = None client.create_openldapconfig(config) user = token['userIdentity'] allowed_identities = [] allowed_identities.append(user) config['enabled'] = True config['allowedIdentities'] = allowed_identities config['domain'] = domain config['groupSearchDomain'] = groupSearchDomain admin_client.create_openldapconfig(config) # 1 @if_test_ldap def test_allow_any_ldap_user(admin_client): ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') token = get_authed_token(username=ldap_user2, password=ldap_pass2) cookies = dict(token=token['jwt']) schemas = requests.get(cattle_url() + "schemas", cookies=cookies) assert schemas.status_code == 200 @if_test_ldap def test_ldap_delete_token_on_logout(admin_client): ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') token = get_authed_token(username=ldap_user2, password=ldap_pass2) cookies = dict(token=token['jwt']) identities = requests.get(cattle_url() + "identities", cookies=cookies) assert identities.status_code == 200 delete_ldap_token("current", cookies) identities = requests.get(cattle_url() + "identities", cookies=cookies) assert identities.status_code == 401 # 4 @if_test_ldap def test_ldap_user_with_new_env(admin_client): ldap_user3 = os.environ.get('LDAP_USER3') ldap_pass3 = os.environ.get('LDAP_PASS3') # test creation of new env with new valid user token = get_authed_token(username=ldap_user3, password=ldap_pass3) cookies = dict(token=token['jwt']) schemas = requests.get(cattle_url() + "schemas", cookies=cookies) assert schemas.status_code == 200 u3_client = create_ldap_client(username=ldap_user3, password=ldap_pass3) projects = u3_client.list_project().data found = False for p in projects: if p['name'] == ldap_user3 + "-Default": found = True break assert found # 5 @if_test_ldap def test_ldap_create_new_env(admin_client): ldap_user3 = os.environ.get('LDAP_USER3') ldap_pass3 = os.environ.get('LDAP_PASS3') u3_client = create_ldap_client(username=ldap_user3, password=ldap_pass3) u3_identity = None for obj in u3_client.list_identity().data: if obj.externalIdType == 'openldap_user': u3_identity = obj break # Creating a new project project_name = random_str() + '-test_case5' project = u3_client.create_project(name=project_name, members=[ idToMember(u3_identity, 'owner') ]) u3_client.wait_success(project) assert u3_client.by_id('project', project.id) is not None projects = u3_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert len(p.members) == 1 assert p['members'][0]['role'] == 'owner' break assert found # 6 @if_test_ldap def test_ldap_create_new_env_add_member(admin_client): ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') ldap_user3 = os.environ.get('LDAP_USER3') ldap_pass3 = os.environ.get('LDAP_PASS3') u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break u3_client = create_ldap_client(username=ldap_user3, password=ldap_pass3) u3_identity = None for obj in u3_client.list_identity().data: if obj.externalIdType == 'openldap_user': u3_identity = obj break # Creating a new project project_name = random_str() + '-test_case6' project = u2_client.create_project(name=project_name, members=[ idToMember(u2_identity, 'owner') ]) u2_client.wait_success(project) assert u2_client.by_id('project', project.id) is not None projects = u2_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert len(p.members) == 1 assert p['members'][0]['role'] == 'owner' break assert found # Add new member as member new_members = [ idToMember(u2_identity, 'owner'), idToMember(u3_identity, 'member') ] project = u2_client.by_id('project', project.id) project.setmembers(members=new_members) assert u2_client.by_id('project', project.id) is not None assert u3_client.by_id('project', project.id) is not None projects = u3_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert found # Make sure the new user has no privileges project = u3_client.by_id('project', project.id) with pytest.raises(AttributeError) as excinfo: project.setmembers(members=new_members) assert "object has no attribute" in str(excinfo.value) # 7 @if_test_ldap def test_ldap_create_new_env_add_owner(admin_client): ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') ldap_user3 = os.environ.get('LDAP_USER3') ldap_pass3 = os.environ.get('LDAP_PASS3') ldap_user4 = os.environ.get('LDAP_USER4') ldap_pass4 = os.environ.get('LDAP_PASS4') u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break u3_client = create_ldap_client(username=ldap_user3, password=ldap_pass3) u3_identity = None for obj in u3_client.list_identity().data: if obj.externalIdType == 'openldap_user': u3_identity = obj break u4_client = create_ldap_client(username=ldap_user4, password=ldap_pass4) u4_identity = None for obj in u4_client.list_identity().data: if obj.externalIdType == 'openldap_user': u4_identity = obj break # Creating a new project project_name = random_str() + '-test_case7' project = u2_client.create_project(name=project_name, members=[ idToMember(u2_identity, 'owner') ]) u2_client.wait_success(project) assert u2_client.by_id('project', project.id) is not None projects = u2_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert len(p.members) == 1 assert p['members'][0]['role'] == 'owner' break assert found # Add new member as member new_members = [ idToMember(u2_identity, 'owner'), idToMember(u3_identity, 'owner') ] project = u2_client.by_id('project', project.id) project.setmembers(members=new_members) assert u2_client.by_id('project', project.id) is not None assert u3_client.by_id('project', project.id) is not None projects = u3_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert found # Make sure the new user has privileges to add new members new_members = [ idToMember(u2_identity, 'owner'), idToMember(u3_identity, 'owner'), idToMember(u4_identity, 'member') ] same_project = u3_client.by_id('project', project.id) same_project.setmembers(members=new_members) projects = u4_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert found # 8 @if_test_ldap def test_ldap_create_new_env_add_group_member(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') group = os.environ.get('LDAP_GROUP') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) main_identity = None for obj in main_client.list_identity().data: if obj.externalIdType == 'openldap_user': main_identity = obj break # Creating a new project project_name = random_str() + '-test_case8' project = main_client.create_project(name=project_name, members=[ idToMember(main_identity, 'owner') ]) main_client.wait_success(project) assert main_client.by_id('project', project.id) is not None # Add new group as member group_identity = main_client.list_identity(name=group).data[0] new_members = [ idToMember(main_identity, 'owner'), idToMember(group_identity, 'member') ] project = main_client.by_id('project', project.id) project.setmembers(members=new_members) u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) projects = u2_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert found project = u2_client.by_id('project', project.id) with pytest.raises(AttributeError) as excinfo: project.setmembers(members=new_members) assert "object has no attribute" in str(excinfo.value) # 9 @if_test_ldap def test_ldap_create_new_env_add_group_owner(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') group = os.environ.get('LDAP_GROUP') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) main_identity = None for obj in main_client.list_identity().data: if obj.externalIdType == 'openldap_user': main_identity = obj break # Creating a new project project_name = random_str() + '-test_case9' project = main_client.create_project(name=project_name, members=[ idToMember(main_identity, 'owner') ]) main_client.wait_success(project) assert main_client.by_id('project', project.id) is not None # Add new group as owner group_identity = main_client.list_identity(name=group).data[0] new_members = [ idToMember(main_identity, 'owner'), idToMember(group_identity, 'owner') ] project = main_client.by_id('project', project.id) project.setmembers(members=new_members) project = main_client.by_id('project', project.id) project_member = project.projectMembers().data[1] assert project_member['name'] == group assert project_member['role'] == 'owner' # Make sure user2 has the privileges to edit the env u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) projects = u2_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert found project = u2_client.by_id('project', project.id) project.setmembers(members=new_members) @if_test_ldap def test_ldap_group_search_domain(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') group = os.environ.get('LDAP_GROUP') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) # Narrow down domain to OU=dev, so that group search fails reconfigure_ldap(main_client, 'ou=dev,dc=us-west-2,dc=compute,\ dc=internal', '') assert len(main_client.list_identity(name=group).data) == 0 # Set groupSearchDomain so group search works reconfigure_ldap(main_client, 'ou=dev,dc=us-west-2,dc=compute,\ dc=internal', 'ou=groups,dc=us-west-2,dc=compute,dc=internal') assert len(main_client.list_identity(name=group).data) == 1 assert main_client.list_identity(name=group).data[0]['login'] == group # Set domain back to original value reconfigure_ldap(main_client, os.environ.get('API_AUTH_LDAP_SEARCH_BASE'), '') # Two groups with same name 'caringQA' are added under separate OUs # OU=groups has 'caringQA' and so does OU=groupsDuplicate duplicate_name_group = 'caringQA' # Set groupSearchDomain such that caringQA from only OU=groups is returned reconfigure_ldap(main_client, os.environ.get('API_AUTH_LDAP_SEARCH_BASE'), 'ou=groups,dc=us-west-2,dc=compute,dc=internal') groups_searched = main_client.list_identity(name=duplicate_name_group).data assert len(groups_searched) == 1 assert groups_searched[0]['externalId'] == \ 'cn=caringQA,ou=groups,dc=us-west-2,dc=compute,dc=internal' # Set groupSearchDomain such that # caringQA from only OU=groupsDuplicate is returned reconfigure_ldap(main_client, os.environ.get('API_AUTH_LDAP_SEARCH_BASE'), 'ou=groupsDuplicate,dc=us-west-2,dc=compute,dc=internal') groups_searched = main_client.list_identity(name=duplicate_name_group).data assert len(groups_searched) == 1 assert groups_searched[0]['externalId'] == \ 'cn=caringQA,ou=groupsDuplicate,dc=us-west-2,dc=compute,dc=internal' # Unset groupSearchDomain, so both groups get searched # and settings are restored for remanining tests reconfigure_ldap(main_client, os.environ.get('API_AUTH_LDAP_SEARCH_BASE'), '') assert len(main_client.list_identity(name=duplicate_name_group)) == 2 # 10 @if_test_ldap def test_ldap_create_new_env_change_owner_to_member(admin_client): ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') ldap_user3 = os.environ.get('LDAP_USER3') ldap_pass3 = os.environ.get('LDAP_PASS3') u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break u3_client = create_ldap_client(username=ldap_user3, password=ldap_pass3) u3_identity = None for obj in u3_client.list_identity().data: if obj.externalIdType == 'openldap_user': u3_identity = obj break # Creating a new project project_name = random_str() + '-test_case10' project = u2_client.create_project(name=project_name, members=[ idToMember(u2_identity, 'owner') ]) u2_client.wait_success(project) assert u2_client.by_id('project', project.id) is not None # Add new member as owner new_members = [ idToMember(u2_identity, 'owner'), idToMember(u3_identity, 'owner') ] project = u2_client.by_id('project', project.id) project.setmembers(members=new_members) assert u2_client.by_id('project', project.id) is not None assert u3_client.by_id('project', project.id) is not None projects = u3_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert found # Change owner to member new_members = [ idToMember(u2_identity, 'owner'), idToMember(u3_identity, 'member'), ] same_project = u2_client.by_id('project', project.id) same_project.setmembers(members=new_members) same_project = u3_client.by_id('project', project.id) with pytest.raises(AttributeError) as excinfo: same_project.setmembers(members=new_members) assert "object has no attribute" in str(excinfo.value) # 11 @if_test_ldap def test_ldap_create_new_env_change_member_to_owner(admin_client): ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') ldap_user3 = os.environ.get('LDAP_USER3') ldap_pass3 = os.environ.get('LDAP_PASS3') u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break u3_client = create_ldap_client(username=ldap_user3, password=ldap_pass3) u3_identity = None for obj in u3_client.list_identity().data: if obj.externalIdType == 'openldap_user': u3_identity = obj break # Creating a new project project_name = random_str() + '-test_case11' project = u2_client.create_project(name=project_name, members=[ idToMember(u2_identity, 'owner') ]) u2_client.wait_success(project) assert u2_client.by_id('project', project.id) is not None # Add new member as member new_members = [ idToMember(u2_identity, 'owner'), idToMember(u3_identity, 'member') ] project = u2_client.by_id('project', project.id) project.setmembers(members=new_members) assert u2_client.by_id('project', project.id) is not None assert u3_client.by_id('project', project.id) is not None projects = u3_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert found # Change owner to member new_members = [ idToMember(u2_identity, 'owner'), idToMember(u3_identity, 'owner'), ] same_project = u2_client.by_id('project', project.id) same_project.setmembers(members=new_members) # Try to delete user2 new_members = [ idToMember(u3_identity, 'owner') ] same_project = u3_client.by_id('project', project.id) same_project.setmembers(members=new_members) projects = u2_client.list_project().data found = True for p in projects: if p['name'] == project_name: found = False assert found # 12 @if_test_ldap def test_ldap_create_new_env_remove_existing_owner(admin_client): ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') ldap_user3 = os.environ.get('LDAP_USER3') ldap_pass3 = os.environ.get('LDAP_PASS3') u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break u3_client = create_ldap_client(username=ldap_user3, password=ldap_pass3) u3_identity = None for obj in u3_client.list_identity().data: if obj.externalIdType == 'openldap_user': u3_identity = obj break # Creating a new project project_name = random_str() + '-test_case12' project = u2_client.create_project(name=project_name, members=[ idToMember(u2_identity, 'owner') ]) u2_client.wait_success(project) assert u2_client.by_id('project', project.id) is not None # Add new member as owner new_members = [ idToMember(u2_identity, 'owner'), idToMember(u3_identity, 'owner') ] project = u2_client.by_id('project', project.id) project.setmembers(members=new_members) assert u2_client.by_id('project', project.id) is not None assert u3_client.by_id('project', project.id) is not None projects = u3_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert found # Try to delete user3 new_members = [ idToMember(u2_identity, 'owner') ] same_project = u2_client.by_id('project', project.id) same_project.setmembers(members=new_members) projects = u3_client.list_project().data found = True for p in projects: if p['name'] == project_name: found = False assert found assert u3_client.by_id('project', project.id) is None # 13 @if_test_ldap def test_ldap_create_new_env_remove_existing_member(admin_client): ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') ldap_user3 = os.environ.get('LDAP_USER3') ldap_pass3 = os.environ.get('LDAP_PASS3') u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break u3_client = create_ldap_client(username=ldap_user3, password=ldap_pass3) u3_identity = None for obj in u3_client.list_identity().data: if obj.externalIdType == 'openldap_user': u3_identity = obj break # Creating a new project project_name = random_str() + '-test_case13' project = u2_client.create_project(name=project_name, members=[ idToMember(u2_identity, 'owner') ]) u2_client.wait_success(project) assert u2_client.by_id('project', project.id) is not None # Add new member as owner new_members = [ idToMember(u2_identity, 'owner'), idToMember(u3_identity, 'member') ] project = u2_client.by_id('project', project.id) project.setmembers(members=new_members) assert u2_client.by_id('project', project.id) is not None assert u3_client.by_id('project', project.id) is not None projects = u3_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert found # Try to delete user3 new_members = [ idToMember(u2_identity, 'owner') ] same_project = u2_client.by_id('project', project.id) same_project.setmembers(members=new_members) projects = u3_client.list_project().data found = True for p in projects: if p['name'] == project_name: found = False assert found assert u3_client.by_id('project', project.id) is None # 14,15 @if_test_ldap def test_ldap_deactivate_activate_env(admin_client): ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') ldap_user3 = os.environ.get('LDAP_USER3') ldap_pass3 = os.environ.get('LDAP_PASS3') u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break u3_client = create_ldap_client(username=ldap_user3, password=ldap_pass3) u3_identity = None for obj in u3_client.list_identity().data: if obj.externalIdType == 'openldap_user': u3_identity = obj break # Creating a new project project_name = random_str() + '-test_case14' project = u2_client.create_project(name=project_name, members=[ idToMember(u2_identity, 'owner') ]) u2_client.wait_success(project) assert u2_client.by_id('project', project.id) is not None # Add new member as owner new_members = [ idToMember(u2_identity, 'owner'), idToMember(u3_identity, 'member') ] project = u2_client.by_id('project', project.id) project.setmembers(members=new_members) assert u2_client.by_id('project', project.id) is not None assert u3_client.by_id('project', project.id) is not None projects = u3_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert found # Deactivate environment dec_project = u2_client.by_id('project', project.id) dec_project.deactivate() dec_project = u2_client.by_id('project', project.id) assert dec_project['state'] == 'inactive' # Owners should see the env in their "Manage Environment" Tab projects = u2_client.list_project(all=True).data found = False for p in projects: if p['name'] == project_name: found = True assert found # Members has no access to the environment try: project = u3_client.by_id('project', project.id) except: assert True # Activate environment back dec_project.activate() act_project = u2_client.by_id('project', dec_project.id) assert act_project['state'] == 'active' assert u3_client.by_id('project', dec_project.id) is not None # 16 @if_test_ldap def test_ldap_remove_deactivated_env(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') ldap_user3 = os.environ.get('LDAP_USER3') ldap_pass3 = os.environ.get('LDAP_PASS3') u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break u3_client = create_ldap_client(username=ldap_user3, password=ldap_pass3) u3_identity = None for obj in u3_client.list_identity().data: if obj.externalIdType == 'openldap_user': u3_identity = obj break # Creating a new project project_name = random_str() + '-test_case16' project = u2_client.create_project(name=project_name, members=[ idToMember(u2_identity, 'owner') ]) u2_client.wait_success(project) assert u2_client.by_id('project', project.id) is not None # Add new member as owner new_members = [ idToMember(u2_identity, 'owner'), idToMember(u3_identity, 'member') ] project = u2_client.by_id('project', project.id) project.setmembers(members=new_members) assert u2_client.by_id('project', project.id) is not None assert u3_client.by_id('project', project.id) is not None projects = u3_client.list_project().data found = False for p in projects: if p['name'] == project_name: found = True assert found # Deactivate environment dec_project = u2_client.by_id('project', project.id) dec_project.deactivate() dec_project = u2_client.by_id('project', project.id) assert dec_project['state'] == 'inactive' # Owners should see the env in their "Manage Environment" Tab projects = u2_client.list_project(all=True).data found = False for p in projects: if p['name'] == project_name: found = True assert found # Members has no access to the environment try: project = u3_client.by_id('project', project.id) except: assert True # Remove environment main_client.delete(dec_project) time.sleep(5) project = main_client.by_id('project', dec_project.id) assert project.state == 'purged' or project.state == 'removed' # Users can't access the environment anymore assert u2_client.by_id('project', project.id) is None assert u3_client.by_id('project', project.id) is None # 17,18 @if_test_ldap def test_ldap_activate_deactivate_account(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) u2_token = get_authed_token(username=ldap_user2, password=ldap_pass2) # Deactivate the user2 account ldap_u2_name = u2_token['userIdentity']['name'] u2_account = main_client.list_account(name=ldap_u2_name).data[0] u2_account = main_client.by_id("account", u2_account.id) u2_account.deactivate() main_client.wait_success(u2_account) cookies = dict(token=u2_token['jwt']) bad_auth = requests.get(cattle_url() + "schemas", cookies=cookies) assert bad_auth.status_code == 401 # Active the user1 account u2_account = main_client.by_id("account", u2_account.id) u2_account.activate() main_client.wait_success(u2_account) cookies = dict(token=u2_token['jwt']) good_auth = requests.get(cattle_url() + "schemas", cookies=cookies) assert good_auth.status_code == 200 # 19 @if_test_ldap def test_ldap_purge_account(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER4') ldap_pass2 = os.environ.get('LDAP_PASS4') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) u2_token = get_authed_token(username=ldap_user2, password=ldap_pass2) u2_name = u2_token['userIdentity']['name'] # Purge user2 account u2_account = main_client.list_account(name=u2_name).data[0] u2_account = main_client.by_id("account", u2_account.id) u2_account.deactivate() main_client.wait_success(u2_account) main_client.delete(u2_account) u2_account = main_client.wait_success(u2_account) u2_account.purge() main_client.wait_success(u2_account) assert u2_account.removed is not None projects = main_client.list_project(all=True).data for p in projects: project = main_client.by_id('project', p.id) project_members = project.projectMembers() for project_member in project_members: if project_member['name'] == u2_name: assert False # 23,24,25,26,27 @if_test_ldap def test_ldap_member_permissions(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) main_identity = None for obj in main_client.list_identity().data: if obj.externalIdType == 'openldap_user': main_identity = obj break u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break # Create new env project_name = random_str() + '-test_case23' project = main_client.create_project(name=project_name, members=[ idToMember(main_identity, 'owner'), idToMember(u2_identity, 'member') ]) main_client.wait_success(project) assert main_client.by_id('project', project.id) is not None assert u2_client.by_id('project', project.id) is not None # user2 can not change, remove, or add users new_members = [ idToMember(main_identity, 'member'), idToMember(u2_identity, 'owner') ] member_project = u2_client.by_id('project', project.id) with pytest.raises(AttributeError) as excinfo: member_project.setmembers(members=new_members) assert "object has no attribute" in str(excinfo.value) # user2 can't deactivate or remove environment try: dec_project = u2_client.by_id('project', project.id) dec_project.deactivate() dec_project = u2_client.by_id('project', project.id) assert dec_project['state'] == 'inactive' user1_client.delete(dec_project) time.sleep(5) project = user1_client.by_id('project', project.id) assert project.state == 'purged' or project.state == 'removed' assert False except: assert True # 28 @if_test_ldap def test_ldap_change_user_to_admin(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) # Purge user2 account first u2_token = get_authed_token(username=ldap_user2, password=ldap_pass2) u2_name = u2_token['userIdentity']['name'] u2_account = main_client.list_account(name=u2_name).data[0] u2_account = main_client.by_id("account", u2_account.id) u2_account.deactivate() main_client.wait_success(u2_account) main_client.delete(u2_account) u2_account = main_client.wait_success(u2_account) u2_account.purge() main_client.wait_success(u2_account) assert u2_account.removed is not None # Test with user u2_token = get_authed_token(username=ldap_user2, password=ldap_pass2) u2_name = u2_token['userIdentity']['name'] u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) with pytest.raises(ClientApiError) as excinfo: u2_client.list_openldapconfig() assert "is not a valid type" in str(excinfo.value) # change account from user to admin u2_account = main_client.list_account(name=u2_name).data[0] u2_account = main_client.by_id("account", u2_account.id) main_client.wait_success(u2_account) main_client.update_by_id_account(u2_account.id, kind='admin') u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) assert u2_client.list_openldapconfig().data is not None # change account from admin to user u2_account = main_client.list_account(name=u2_name).data[0] u2_account = main_client.by_id("account", u2_account.id) main_client.wait_success(u2_account) main_client.update_by_id_account(u2_account.id, kind='user') # 29 @if_test_ldap def test_ldap_admin_list_all_env(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) # Purge user2 account first u2_token = get_authed_token(username=ldap_user2, password=ldap_pass2) u2_name = u2_token['userIdentity']['name'] u2_account = main_client.list_account(name=u2_name).data[0] u2_account = main_client.by_id("account", u2_account.id) u2_account.deactivate() main_client.wait_success(u2_account) main_client.delete(u2_account) u2_account = main_client.wait_success(u2_account) u2_account.purge() main_client.wait_success(u2_account) assert u2_account.removed is not None u2_token = get_authed_token(username=ldap_user2, password=ldap_pass2) u2_name = u2_token['userIdentity']['name'] # change account from user to admin u2_account = main_client.list_account(name=u2_name).data[0] u2_account = main_client.by_id("account", u2_account.id) main_client.wait_success(u2_account) main_client.update_by_id_account(u2_account.id, kind='admin') # List all projects projects = main_client.list_project() # Create new project main_client.create_project() for project in projects: project_url = cattle_url() \ + "/projects/" + project.id + "/projectmembers" cookies = dict(token=u2_token['jwt']) access = requests.get(project_url, cookies=cookies) assert access.ok # change account from admin to user u2_account = main_client.list_account(name=u2_name).data[0] u2_account = main_client.by_id("account", u2_account.id) main_client.wait_success(u2_account) main_client.update_by_id_account(u2_account.id, kind='user') # 30 @if_test_ldap @if_do_key def test_ldap_member_add_host(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) main_identity = None for obj in main_client.list_identity().data: if obj.externalIdType == 'openldap_user': main_identity = obj break u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break # test creation of new env project = main_client.create_project(members=[ idToMember(main_identity, 'owner'), idToMember(u2_identity, 'member') ]) main_client.wait_success(project) assert main_client.by_id('project', project.id) is not None assert u2_client.by_id('project', project.id) is not None u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2, project_id=project.id) # Add new host host_list = \ add_digital_ocean_hosts( u2_client, 1) assert len(host_list) == 1 # Remove host host = u2_client.list_host().data[0] deactivated_host = host.deactivate() u2_client.wait_success(deactivated_host) deactivated_host = u2_client.list_host().data[0] deactivated_host.remove() # 31 @if_test_ldap @if_do_key def test_ldap_create_new_env_with_restricted_member(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) main_identity = None for obj in main_client.list_identity().data: if obj.externalIdType == 'openldap_user': main_identity = obj break u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break # test creation of new env default_prj_id = main_client.list_project(name='Default').data[0].id default_project = main_client.by_id('project', default_prj_id) default_project.setmembers(members=[ idToMember(main_identity, 'owner'), idToMember(u2_identity, 'restricted') ]) u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2, project_id=default_prj_id) # Add new host with pytest.raises(AttributeError) as excinfo: host_list = \ add_digital_ocean_hosts( u2_client, 1) assert len(host_list) == 1 assert "object has no attribute" in str(excinfo.value) # 32 @if_test_ldap @if_do_key def test_ldap_create_service_with_restricted_member(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) main_identity = None for obj in main_client.list_identity().data: if obj.externalIdType == 'openldap_user': main_identity = obj break u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break # test creation of new env default_prj_id = main_client.list_project(name='Default').data[0].id default_project = main_client.by_id('project', default_prj_id) default_project.setmembers(members=[ idToMember(main_identity, 'owner'), idToMember(u2_identity, 'restricted') ]) main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass, project_id=default_prj_id) u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2, project_id=default_prj_id) # Add new host hosts = u2_client.list_host( kind='docker', removed_null=True, state="active").data if len(hosts) == 0: host_list = \ add_digital_ocean_hosts( main_client, 1) assert len(host_list) == 1 launch_config = {"imageUuid": TEST_IMAGE_UUID} scale = 1 create_env_and_svc(u2_client, launch_config, scale) # 33,34 @if_test_ldap @if_do_key def test_ldap_create_new_env_with_readonly_member(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) main_identity = None for obj in main_client.list_identity().data: if obj.externalIdType == 'openldap_user': main_identity = obj break u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2) u2_identity = None for obj in u2_client.list_identity().data: if obj.externalIdType == 'openldap_user': u2_identity = obj break # test creation of new env default_prj_id = main_client.list_project(name='Default').data[0].id default_project = main_client.by_id('project', default_prj_id) default_project.setmembers(members=[ idToMember(main_identity, 'owner'), idToMember(u2_identity, 'readonly') ]) u2_client = create_ldap_client(username=ldap_user2, password=ldap_pass2, project_id=default_prj_id) main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass, project_id=default_prj_id) # Add new host with pytest.raises(AttributeError) as excinfo: host_list = \ add_digital_ocean_hosts( u2_client, 1) assert len(host_list) == 1 assert "object has no attribute" in str(excinfo.value) with pytest.raises(AttributeError) as excinfo: launch_config = {"imageUuid": TEST_IMAGE_UUID} scale = 1 create_env_and_svc(u2_client, launch_config, scale) assert "object has no attribute" in str(excinfo.value) # Create service using main client launch_config = {"imageUuid": TEST_IMAGE_UUID} scale = 1 service, env = create_env_and_svc(main_client, launch_config, scale) # List service using user1 client service = u2_client.list_service(name=service.name, stackId=env.id, removed_null=True).data assert len(service) == 1 @if_test_ldap def test_ldap_list_identities(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) identities = main_client.list_identity().data user_found = 0 authenticated_user = 0 for i in range(len(identities)): if identities[i]['user'] is True: user_found += 1 if identities[i]['externalIdType'] != 'rancher_id': authenticated_user += 1 assert user_found == 2 assert authenticated_user == 1 @if_test_ldap @if_ldap_port def test_secret_setting(admin_client): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) password_setting = 'api.auth.ldap.openldap.service.account.password' secret = client.by_id_setting(password_setting) assert secret.value is None # 2 @if_test_ldap def test_ldap_required_to_specific_user(admin_client, request): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') ldap_user3 = os.environ.get('LDAP_USER3') ldap_pass3 = os.environ.get('LDAP_PASS3') ldap_user5 = os.environ.get('LDAP_USER5') ldap_pass5 = os.environ.get('LDAP_PASS5') # Login with user5 to create new environment get_authed_token(username=ldap_user5, password=ldap_pass5) main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) main_identity = main_client.list_identity(name=ldap_main_user).data[0] allowed_identities = [] allowed_identities.append(main_identity) user2_identity = ADMIN_LDAP_CLIENT.list_identity(name=ldap_user2).data[0] user2_identity = ast.literal_eval(str(user2_identity)) allowed_identities.append(user2_identity) # Enable new configuration config = load_config(access_mode='required') config['enabled'] = True config['allowedIdentities'] = allowed_identities ADMIN_LDAP_CLIENT.create_openldapconfig(config) # Try to login with user2 and user3 token2 = get_authed_token(username=ldap_user2, password=ldap_pass2) try: get_authed_token(username=ldap_user3, password=ldap_pass3) except AssertionError as e: assert '403' in str(e) try: get_authed_token(username=ldap_user5, password=ldap_pass5) except AssertionError as e: assert '403' in str(e) cookies = dict(token=token2['jwt']) good_auth = requests.get(cattle_url() + "schemas", cookies=cookies) assert good_auth.status_code == 200 def fin(): reconfigure_ldap(main_client, os.environ.get('API_AUTH_LDAP_SEARCH_BASE'), '') request.addfinalizer(fin) # 3 @if_test_ldap def test_ldap_required_to_specific_group(admin_client, request): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') ldap_user3 = os.environ.get('LDAP_USER3') ldap_pass3 = os.environ.get('LDAP_PASS3') ldap_user5 = os.environ.get('LDAP_USER5') ldap_pass5 = os.environ.get('LDAP_PASS5') group = os.environ.get('LDAP_GROUP') # Login with user5 to create new environment get_authed_token(username=ldap_user5, password=ldap_pass5) main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) main_identity = main_client.list_identity(name=ldap_main_user).data[0] allowed_identities = [] allowed_identities.append(main_identity) group_identity = ADMIN_LDAP_CLIENT.list_identity(name=group).data[0] group_identity_dict = ast.literal_eval(str(group_identity)) allowed_identities.append(group_identity_dict) # Enable new configuration config = load_config(access_mode='required') config['enabled'] = True config['allowedIdentities'] = allowed_identities ADMIN_LDAP_CLIENT.create_openldapconfig(config) # Try to login with user2 and user3 token2 = get_authed_token(username=ldap_user2, password=ldap_pass2) try: get_authed_token(username=ldap_user3, password=ldap_pass3) except AssertionError as e: assert '403' in str(e) try: get_authed_token(username=ldap_user5, password=ldap_pass5) except AssertionError as e: assert '403' in str(e) cookies = dict(token=token2['jwt']) good_auth = requests.get(cattle_url() + "schemas", cookies=cookies) assert good_auth.status_code == 200 def fin(): reconfigure_ldap(main_client, os.environ.get('API_AUTH_LDAP_SEARCH_BASE'), '') request.addfinalizer(fin) # 35 @if_test_ldap def test_ldap_restricted_to_specific_user(admin_client, request): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') ldap_user3 = os.environ.get('LDAP_USER6') ldap_pass3 = os.environ.get('LDAP_PASS6') ldap_user5 = os.environ.get('LDAP_USER5') ldap_pass5 = os.environ.get('LDAP_PASS5') # Login with user5 to create new environment get_authed_token(username=ldap_user5, password=ldap_pass5) main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) main_identity = main_client.list_identity(name=ldap_main_user).data[0] allowed_identities = [] allowed_identities.append(main_identity) user2_identity = ADMIN_LDAP_CLIENT.list_identity(name=ldap_user2).data[0] user2_identity = ast.literal_eval(str(user2_identity)) allowed_identities.append(user2_identity) # Enable new configuration config = load_config(access_mode='restricted') config['enabled'] = True config['allowedIdentities'] = allowed_identities ADMIN_LDAP_CLIENT.create_openldapconfig(config) # Try to login with user2 and user3 token2 = get_authed_token(username=ldap_user2, password=ldap_pass2) try: get_authed_token(username=ldap_user3, password=ldap_pass3) except AssertionError as e: assert '403' in str(e) cookies = dict(token=token2['jwt']) good_auth = requests.get(cattle_url() + "schemas", cookies=cookies) assert good_auth.status_code == 200 token5 = get_authed_token(username=ldap_user5, password=ldap_pass5) cookies = dict(token=token5['jwt']) good_auth = requests.get(cattle_url() + "schemas", cookies=cookies) assert good_auth.status_code == 200 def fin(): reconfigure_ldap(main_client, os.environ.get('API_AUTH_LDAP_SEARCH_BASE'), '') request.addfinalizer(fin) # 36 @if_test_ldap def test_ldap_restricted_to_specific_group(admin_client, request): ldap_main_user = os.environ.get('LDAP_MAIN_USER') ldap_main_pass = os.environ.get('LDAP_MAIN_PASS') ldap_user2 = os.environ.get('LDAP_USER2') ldap_pass2 = os.environ.get('LDAP_PASS2') ldap_user3 = os.environ.get('LDAP_USER6') ldap_pass3 = os.environ.get('LDAP_PASS6') ldap_user5 = os.environ.get('LDAP_USER5') ldap_pass5 = os.environ.get('LDAP_PASS5') group = os.environ.get('LDAP_GROUP') # Login with user5 to create new environment get_authed_token(username=ldap_user5, password=ldap_pass5) main_client = create_ldap_client(username=ldap_main_user, password=ldap_main_pass) main_identity = main_client.list_identity(name=ldap_main_user).data[0] allowed_identities = [] allowed_identities.append(main_identity) group_identity = ADMIN_LDAP_CLIENT.list_identity(name=group).data[0] group_identity_dict = ast.literal_eval(str(group_identity)) allowed_identities.append(group_identity_dict) # Enable new configuration config = load_config(access_mode='restricted') config['enabled'] = True config['allowedIdentities'] = allowed_identities ADMIN_LDAP_CLIENT.create_openldapconfig(config) # Try to login with user2 and user3 token2 = get_authed_token(username=ldap_user2, password=ldap_pass2) try: get_authed_token(username=ldap_user3, password=ldap_pass3) except AssertionError as e: assert '403' in str(e) cookies = dict(token=token2['jwt']) good_auth = requests.get(cattle_url() + "schemas", cookies=cookies) assert good_auth.status_code == 200 token5 = get_authed_token(username=ldap_user5, password=ldap_pass5) cookies = dict(token=token5['jwt']) good_auth = requests.get(cattle_url() + "schemas", cookies=cookies) assert good_auth.status_code == 200 def fin(): reconfigure_ldap(main_client, os.environ.get('API_AUTH_LDAP_SEARCH_BASE'), '') request.addfinalizer(fin)
# -*- coding: utf-8 -*- from ctypes import * import ctypes import struct, os, threading, platform, atexit from acq4.util.clibrary import * from MultiClampTelegraph import * from acq4.util.debug import * __all__ = ['MultiClamp', 'axlib', 'wmlib'] ## Load windows definitions windowsDefs = winDefs() #verbose=True) # Load AxMultiClampMsg header d = os.path.dirname(__file__) axonDefs = CParser( os.path.join(d, 'AxMultiClampMsg.h'), copyFrom=windowsDefs, cache=os.path.join(d, 'AxMultiClampMsg.h.cache'), #verbose=True ) if platform.architecture()[0] != '32bit': raise RuntimeError("MultiClamp API can only be accessed from 32-bit process!") axlib = CLibrary(windll.LoadLibrary(os.path.join(d, 'AxMultiClampMsg.dll')), axonDefs, prefix='MCCMSG_') class MultiClampChannel: """Class used to run MultiClamp commander functions for a specific channel. Instances of this class are created via MultiClamp.getChannel""" def __init__(self, mc, desc): self.mc = mc self.desc = desc self.state = None self.callback = None self.lock = threading.RLock() ## handle for axon mccmsg library self.axonDesc = { 'pszSerialNum': desc['sn'], 'uModel': desc['model'], 'uCOMPortID': desc['com'], 'uDeviceID': desc['dev'], 'uChannelID': desc['chan'] } def setCallback(self, cb): with self.lock: self.callback = cb def getState(self): with self.lock: return self.state def getMode(self): with self.lock: return self.state['mode'] def updateState(self, state): """Called by MultiClamp when changes have occurred in MCC.""" with self.lock: self.state = state cb = self.callback if cb is not None: cb(state) def getParam(self, param): self.select() fn = 'Get' + param v = self.mc.call(fn)[1] ## perform return value mapping for a few specific functions if fn in INV_NAME_MAPS: if v not in INV_NAME_MAPS[fn]: raise Exception("Return from %s was %s; expected one of %s." % (fn, v, INV_NAME_MAPS[fn].keys())) v = INV_NAME_MAPS[fn][v] ## Silly workaround--MC700A likes to tell us that secondary signal gain is 0 if fn == 'GetSecondarySignalGain' and self.desc['model'] == axlib.HW_TYPE_MC700A: return 1.0 return v def setParam(self, param, value): self.select() fn = "Set" + param ## Perform value mapping for a few functions (SetMode, SetPrimarySignal, SetSecondarySignal) if fn in NAME_MAPS: if value not in NAME_MAPS[fn]: raise Exception("Argument to %s must be one of %s" % (fn, NAME_MAPS[fn].keys())) value = NAME_MAPS[fn][value] #print fn, value self.mc.call(fn, value) def getParams(self, params): """Reads multiple parameters from multiclamp. Arguments: chan -- Use the multiclamp device associated with this channel params -- List of parameters to request. Each parameter "SomeValue" must have a corresponding function "getSomeValue" in AxMultiClampMsg.h """ res = {} for p in params: res[p] = self.getParam(p) return res def setParams(self, params): """Sets multiple parameters on multiclamp. Arguments: chan -- Use the multiclamp device associated with this channel params -- Dict of parameters to set. """ res = {} for p in params: #print "Setting", p, params[p] try: self.setParam(p, params[p]) res[p] = True except: printExc("Error while setting parameter %s=%s" % (p, str(params[p]))) res[p] = False return res def setMode(self, mode): return self.setParam('Mode', mode) def setSignal(self, signal, primary): """Set the signal of a MC primary/secondary channel by name. Use this function instead of setParam('PrimarySignal', ...). Bugs in the axon driver prevent that call from working correctly.""" model = self.desc['model'] priMap = ['PRI', 'SEC'] mode = self.getMode() if mode == 'I=0': mode = 'IC' sigMap = SIGNAL_MAP[model][mode][priMap[primary]] if signal not in sigMap: raise Exception("Signal name '%s' not found. (Using map for model=%s, mode=%s, pri=%s)" % (signal, model, mode, priMap[primary])) sig = 'SIGNAL_' + sigMap[signal] if primary == 0: self.setParam('PrimarySignal', sig) elif primary == 1: self.setParam('SecondarySignal', sig) def getPrimarySignalInfo(self): return self.getSignalInfo(0) def getSecondarySignalInfo(self): return self.getSignalInfo(1) def setPrimarySignal(self, signal): return self.setSignal(signal, 0) def setSecondarySignal(self, signal): return self.setSignal(signal, 1) def listSignals(self, mode=None): """Return two lists of signal names that may be used for this channel: #( [primary signals], [secondary signals] ) #If mode is omitted, then the current mode of the channel is used.""" if mode is None: mode = self.getMode() if mode == 'I=0': mode = 'IC' model = self.desc['model'] return (SIGNAL_MAP[model][mode]['PRI'].keys(), SIGNAL_MAP[model][mode]['SEC'].keys()) def select(self): """Select this channel for parameter get/set""" self.mc.call('SelectMultiClamp', **self.axonDesc) class MultiClamp: """Class used to interface with remote multiclamp server. Only one instance of this class should be created. Example usage: mc = MultiClamp.instance() devs = mc.listDevices() chan0 = mc.getChannel(devs[0]) chan0.setMode('IC') signal, gain, units = chan0.getSignalInfo() """ INSTANCE = None def __init__(self): self.telegraph = None if MultiClamp.INSTANCE is not None: raise Exception("Already created MultiClamp driver object; use MultiClamp.INSTANCE") self.handle = None self.lock = threading.RLock() self.channels = {} self.chanDesc = {} self.connect() self.telegraph = MultiClampTelegraph(self.chanDesc, self.telegraphMessage) MultiClamp.INSTANCE = self atexit.register(self.quit) def quit(self): ## do other things to shut down driver? self.disconnect() if self.telegraph is not None: self.telegraph.quit() MultiClamp.INSTANCE = None @staticmethod def instance(): return MultiClamp.INSTANCE def getChannel(self, channel, callback=None): """Return a MultiClampChannel object for the specified device/channel. The channel argument should be the same as a single item from listDevices(). The callback will be called when certain (but not any) changes are made to the multiclamp state.""" if channel not in self.channels: raise Exception("No channel with description '%s'. Options are %s" % (str(channel), str(self.listChannels()))) ch = self.channels[channel] if callback is not None: ch.setCallback(callback) return ch def listChannels(self): """Return a list of strings used to identify all devices/channels. These strings should be used to identify the same channel across invocations.""" return self.channels.keys() def connect(self): """(re)create connection to commander.""" #print "connect to commander.." with self.lock: if self.handle is not None: #print " disconnect first" self.disconnect() (self.handle, err) = axlib.CreateObject() if self.handle == 0: self.handle = None self.raiseError("Error while initializing Axon library:", err) self.findDevices() #print " now connected:", self.chanDesc def disconnect(self): """Destroy connection to commander""" with self.lock: if self.handle is not None and axlib is not None: axlib.DestroyObject(self.handle) self.handle = None def findDevices(self): while True: ch = self.findMultiClamp() if ch is None: break else: ## Make sure the order of keys is well defined; string must be identical every time. ch1 = ch.copy() ch1['model'] = MODELS[ch1['model']] if ch1['model'] == 'MC700A': strDesc = ",".join("%s:%s" % (k, ch1[k]) for k in ['model', 'com', 'dev', 'chan']) elif ch1['model'] == 'MC700B': strDesc = ",".join("%s:%s" % (k, ch1[k]) for k in ['model', 'sn', 'chan']) if strDesc not in self.channels: self.channels[strDesc] = MultiClampChannel(self, ch) self.chanDesc[strDesc] = ch def findMultiClamp(self): if len(self.channels) == 0: fn = 'FindFirstMultiClamp' else: fn = 'FindNextMultiClamp' try: serial = create_string_buffer('\0'*16) ret = self.call(fn, pszSerialNum=serial, uBufSize=16) except: if sys.exc_info()[1][0] == 6000: ## We have reached the end of the device list return None raise desc = {'sn': ret['pszSerialNum'], 'model': ret['puModel'], 'com': ret['puCOMPortID'], 'dev': ret['puDeviceID'], 'chan': ret['puChannelID']} return desc def call(self, fName, *args, **kargs): ## call is only used for functions that return a bool error status and have a pnError argument passed by reference. with self.lock: ret = axlib('functions', fName)(self.handle, *args, **kargs) if ret() == 0: funcStr = "%s(%s)" % (fName, ', '.join(map(str, args) + ["%s=%s" % (k, str(kargs[k])) for k in kargs])) self.raiseError("Error while running function %s\n Error:" % funcStr, ret['pnError']) return ret def raiseError(self, msg, err): raise Exception(err, msg + " " + self.errString(err)) def errString(self, err): try: return axlib.BuildErrorText(self.handle, err, create_string_buffer('\0'*256), 256)['sTxtBuf'] except: sys.excepthook(*sys.exc_info()) return "<could not generate error message>" def telegraphMessage(self, msg, chID=None, state=None): if msg == 'update': self.channels[chID].updateState(state) elif msg == 'reconnect': self.connect() ## Crapton of stuff to remember that is not provided by header files def invertDict(d): return dict([(x[1], x[0]) for x in d.items()]) MODELS = { axlib.HW_TYPE_MC700A: 'MC700A', axlib.HW_TYPE_MC700B: 'MC700B' } MODE_LIST = { "VC": axlib.MODE_VCLAMP, "IC": axlib.MODE_ICLAMP, "I=0": axlib.MODE_ICLAMPZERO, } MODE_LIST_INV = invertDict(MODE_LIST) ## Extract all signal names from library PRI_OUT_MODE_LIST = {} SEC_OUT_MODE_LIST = {} for k in axlib['values']: if k[:18] == 'MCCMSG_PRI_SIGNAL_': PRI_OUT_MODE_LIST[k[11:]] = axlib('values', k) elif k[:18] == 'MCCMSG_SEC_SIGNAL_': SEC_OUT_MODE_LIST[k[11:]] = axlib('values', k) PRI_OUT_MODE_LIST_INV = invertDict(PRI_OUT_MODE_LIST) SEC_OUT_MODE_LIST_INV = invertDict(SEC_OUT_MODE_LIST) NAME_MAPS = { 'SetMode': MODE_LIST, 'SetPrimarySignal': PRI_OUT_MODE_LIST, 'SetSecondarySignal': SEC_OUT_MODE_LIST } INV_NAME_MAPS = { 'GetMode': MODE_LIST_INV, 'GetPrimarySignal': PRI_OUT_MODE_LIST_INV, 'GetSecondarySignal': SEC_OUT_MODE_LIST_INV } ## Build a map for connecting signal strings from telegraph headers to signal values from axon headers. ## Note: Completely retarded. SIGNAL_MAP = { axlib.HW_TYPE_MC700A: { 'VC': { 'PRI': { "Membrane Potential": "VC_MEMBPOTENTIAL", "Membrane Current": "VC_MEMBCURRENT", "Pipette Potential": "VC_PIPPOTENTIAL", "100 x AC Pipette Potential": "VC_100XACMEMBPOTENTIAL", "Bath Potential": "VC_AUXILIARY1" }, 'SEC': { "Membrane plus Offset Potential": "VC_MEMBPOTENTIAL", "Membrane Current": "VC_MEMBCURRENT", "Pipette Potential": "VC_PIPPOTENTIAL", "100 x AC Pipette Potential": "VC_100XACMEMBPOTENTIAL", "Bath Potential": "VC_AUXILIARY1" } }, 'IC': { 'PRI': { ## Driver bug? Primary IC signals use VC values. Bah. "Command Current": "VC_MEMBPOTENTIAL", "Membrane Current": "VC_MEMBCURRENT", "Membrane Potential": "VC_PIPPOTENTIAL", "100 x AC Membrane Potential": "VC_100XACMEMBPOTENTIAL", "Bath Potential": "VC_AUXILIARY1" #"Command Current": "IC_CMDCURRENT", #"Membrane Current": "IC_MEMBCURRENT", #"Membrane Potential": "IC_MEMBPOTENTIAL", #"100 x AC Membrane Potential": "IC_100XACMEMBPOTENTIAL", #"Bath Potential": "IC_AUXILIARY1" }, 'SEC': { "Command Current": "IC_CMDCURRENT", "Membrane Current": "IC_MEMBCURRENT", "Membrane plus Offset Potential": "IC_MEMBPOTENTIAL", "100 x AC Membrane Potential": "IC_100XACMEMBPOTENTIAL", "Bath Potential": "IC_AUXILIARY1" } } }, axlib.HW_TYPE_MC700B: { 'VC': { 'PRI': { "Membrane Current": "VC_MEMBCURRENT", "Membrane Potential": "VC_MEMBPOTENTIAL", "Pipette Potential": "VC_PIPPOTENTIAL", "100x AC Membrane Potential": "VC_100XACMEMBPOTENTIAL", "External Command Potential": "VC_EXTCMDPOTENTIAL", "Auxiliaryl": "VC_AUXILIARY1", "Auxiliary2": "VC_AUXILIARY2", }, 'SEC': { "Membrane Current":"VC_MEMBCURRENT" , "Membrane Potential": "VC_MEMBPOTENTIAL", "Pipette Potential": "VC_PIPPOTENTIAL", "100x AC Membrane Potential": "VC_100XACMEMBPOTENTIAL", "External Command Potential": "VC_EXTCMDPOTENTIAL", "Auxiliaryl": "VC_AUXILIARY1", "Auxiliary2": "VC_AUXILIARY2", } }, 'IC': { 'PRI': { "Membrane Potential": "IC_MEMBPOTENTIAL", "Membrane Current": "IC_MEMBCURRENT", "Command Current": "IC_CMDCURRENT", "100x AC Membrane Potential": "IC_100XACMEMBPOTENTIAL", "External Command Current": "IC_EXTCMDCURRENT", "Auxiliary1": "IC_AUXILIARY1", "Auxiliary2": "IC_AUXILIARY2", }, 'SEC': { "Membrane Potential": "IC_MEMBPOTENTIAL", "Membrane Current": "IC_MEMBCURRENT", "Pipette Potential": "IC_PIPPOTENTIAL", "100x AC Membrane Potential": "IC_100XACMEMBPOTENTIAL", "External Command Current": "IC_EXTCMDCURRENT", "Auxiliary1": "IC_AUXILIARY1", "Auxiliary2": "IC_AUXILIARY2", } } } } ### Create instance of driver class MultiClamp()
# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Script to push the zone configuration to Cisco SAN switches. """ import random import re from eventlet import greenthread from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import ssh_utils from cinder import utils import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant LOG = logging.getLogger(__name__) class CiscoFCZoneClientCLI(object): """Cisco FC zone client cli implementation. OpenStack Fibre Channel zone client cli connector to manage FC zoning in Cisco SAN fabrics. Version history: 1.0 - Initial Cisco FC zone client cli """ switch_ip = None switch_port = '22' switch_user = 'admin' switch_pwd = 'none' def __init__(self, ipaddress, username, password, port, vsan): """initializing the client.""" self.switch_ip = ipaddress self.switch_port = port self.switch_user = username self.switch_pwd = password self.fabric_vsan = vsan self.sshpool = None def get_active_zone_set(self): """Return the active zone configuration. Return active zoneset from fabric. When none of the configurations are active then it will return empty map. :returns: Map -- active zone set map in the following format { 'zones': {'openstack50060b0000c26604201900051ee8e329': ['50060b0000c26604', '201900051ee8e329'] }, 'active_zone_config': 'OpenStack_Cfg' } """ zone_set = {} zone = {} zone_member = None zone_name = None switch_data = None zone_set_name = None try: switch_data = self._get_switch_info( [ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan, ' | no-more']) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed getting active zone set " "from fabric %s"), self.switch_ip) try: for line in switch_data: # Split on non-word characters, line_split = re.split('[\s\[\]]+', line) if ZoneConstant.CFG_ZONESET in line_split: # zoneset name [name] vsan [vsan] zone_set_name = \ line_split[line_split.index(ZoneConstant.CFG_ZONESET) + 2] continue if ZoneConstant.CFG_ZONE in line_split: # zone name [name] vsan [vsan] zone_name = \ line_split[line_split.index(ZoneConstant.CFG_ZONE) + 2] zone[zone_name] = list() continue if ZoneConstant.CFG_ZONE_MEMBER in line_split: # Examples: # pwwn c0:50:76:05:15:9f:00:12 # * fcid 0x1e01c0 [pwwn 50:05:07:68:02:20:48:04] [V7K_N1P2] zone_member = \ line_split[ line_split.index(ZoneConstant.CFG_ZONE_MEMBER) + 1] zone_member_list = zone.get(zone_name) zone_member_list.append(zone_member) zone_set[ZoneConstant.CFG_ZONES] = zone zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] = zone_set_name except Exception as ex: # In case of parsing error here, it should be malformed cli output. msg = _("Malformed zone configuration: (switch=%(switch)s " "zone_config=%(zone_config)s)." ) % {'switch': self.switch_ip, 'zone_config': switch_data} LOG.error(msg) exc_msg = _("Exception: %s") % six.text_type(ex) LOG.error(exc_msg) raise exception.FCZoneDriverException(reason=msg) return zone_set def add_zones(self, zones, activate, fabric_vsan, active_zone_set, zone_status): """Add zone configuration. This method will add the zone configuration passed by user. input params: zones - zone names mapped to members and VSANs. zone members are colon separated but case-insensitive { zonename1:[zonememeber1,zonemember2,...], zonename2:[zonemember1, zonemember2,...]...} e.g: {'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } activate - True/False """ LOG.debug("Add Zones - Zones passed: %s", zones) LOG.debug("Active zone set: %s", active_zone_set) zone_list = active_zone_set[ZoneConstant.CFG_ZONES] LOG.debug("zone list: %s", zone_list) LOG.debug("zone status: %s", zone_status) cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] zone_cmds = [['conf'], ['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]] for zone in zones.keys(): # if zone exists, its an update. Delete & insert LOG.debug("Update call") if zone in zone_list: # Response from get_active_zone_set strips colons from WWPNs current_zone = set(zone_list[zone]) new_wwpns = map(lambda x: x.lower().replace(':', ''), zones[zone]) new_zone = set(new_wwpns) if current_zone != new_zone: try: self.delete_zones(zone, activate, fabric_vsan, active_zone_set, zone_status) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Deleting zone failed %s"), zone) LOG.debug("Deleted Zone before insert : %s", zone) zone_cmds.append(['zone', 'name', zone]) for member in zones[zone]: zone_cmds.append(['member', 'pwwn', member]) zone_cmds.append(['end']) try: LOG.debug("Add zones: Config cmd to run: %s", zone_cmds) self._ssh_execute(zone_cmds, True, 1) if activate: self.activate_zoneset(cfg_name, fabric_vsan, zone_status) self._cfg_save() except Exception as e: msg = _("Creating and activating zone set failed: " "(Zone set=%(zoneset)s error=%(err)s)." ) % {'zoneset': cfg_name, 'err': six.text_type(e)} LOG.error(msg) raise exception.CiscoZoningCliException(reason=msg) def activate_zoneset(self, cfgname, fabric_vsan, zone_status): """Method to Activate the zone config. Param cfgname - ZonesetName.""" LOG.debug("zone status: %s", zone_status) cmd_list = [['conf'], ['zoneset', 'activate', 'name', cfgname, 'vsan', self.fabric_vsan]] if zone_status['mode'] == 'enhanced': cmd_list.append(['zone', 'commit', 'vsan', fabric_vsan]) cmd_list.append(['end']) return self._ssh_execute(cmd_list, True, 1) def get_zoning_status(self): """Return the zoning mode and session for a zoneset.""" zone_status = {} try: switch_data = self._get_switch_info( [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed getting zone status " "from fabric %s"), self.switch_ip) try: for line in switch_data: # Split on non-word characters, line_split = re.split('[\s\[\]]+', line) if 'mode:' in line_split: # mode: <enhanced|basic> zone_status['mode'] = line_split[line_split.index('mode:') + 1] continue if 'session:' in line_split: # session: <none|a value other than none> zone_status['session'] = \ line_split[line_split.index('session:') + 1] continue except Exception as ex: # In case of parsing error here, it should be malformed cli output. msg = _("Malformed zone status: (switch=%(switch)s " "zone_config=%(zone_config)s)." ) % {'switch': self.switch_ip, 'zone_status': switch_data} LOG.error(msg) exc_msg = _("Exception: %s") % six.text_type(ex) LOG.error(exc_msg) raise exception.FCZoneDriverException(reason=msg) return zone_status def delete_zones(self, zone_names, activate, fabric_vsan, active_zone_set, zone_status): """Delete zones from fabric. Method to delete the active zone config zones params zone_names: zoneNames separated by semicolon params activate: True/False """ LOG.debug("zone_names %s", zone_names) active_zoneset_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] cmds = [['conf'], ['zoneset', 'name', active_zoneset_name, 'vsan', fabric_vsan]] try: for zone in set(zone_names.split(';')): cmds.append(['no', 'zone', 'name', zone]) cmds.append(['end']) LOG.debug("Delete zones: Config cmd to run: %s", cmds) self._ssh_execute(cmds, True, 1) if activate: self.activate_zoneset(active_zoneset_name, fabric_vsan, zone_status) self._cfg_save() except Exception as e: msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)." ) % {'cmd': cmds, 'err': six.text_type(e)} LOG.error(msg) raise exception.CiscoZoningCliException(reason=msg) def get_nameserver_info(self): """Get name server data from fabric. This method will return the connected node port wwn list(local and remote) for the given switch fabric show fcns database """ cli_output = None return_list = [] try: cli_output = self._get_switch_info([ZoneConstant.FCNS_SHOW, self.fabric_vsan]) except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed collecting fcns database " "info for fabric %s"), self.switch_ip) if (cli_output): return_list = self._parse_ns_output(cli_output) LOG.info(_LI("Connector returning fcnsinfo-%s"), return_list) return return_list @utils.retry(processutils.ProcessExecutionError, retries=5) def _cfg_save(self): cmd = ['copy', 'running-config', 'startup-config'] self._run_ssh(cmd, True) def _get_switch_info(self, cmd_list): stdout, stderr, sw_data = None, None, None try: stdout, stderr = self._run_ssh(cmd_list, True) LOG.debug("CLI output from ssh - output: %s", stdout) if (stdout): sw_data = stdout.splitlines() return sw_data except processutils.ProcessExecutionError as e: msg = _("Error while getting data via ssh: (command=%(cmd)s " "error=%(err)s).") % {'cmd': cmd_list, 'err': six.text_type(e)} LOG.error(msg) raise exception.CiscoZoningCliException(reason=msg) def _parse_ns_output(self, switch_data): """Parses name server data. Parses nameserver raw data and adds the device port wwns to the list :returns: List -- list of device port wwn from ns info """ return_list = [] for line in switch_data: if not(" N " in line): continue linesplit = line.split() if len(linesplit) > 2: node_port_wwn = linesplit[2] return_list.append(node_port_wwn) else: msg = _("Malformed show fcns database string: %s") % line LOG.error(msg) raise exception.InvalidParameterValue(err=msg) return return_list def _run_ssh(self, cmd_list, check_exit_code=True): command = ' '.join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, min_size=1, max_size=5) try: with self.sshpool.item() as ssh: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception: with excutils.save_and_reraise_exception(): LOG.warning(_LW("Error running SSH command: %s"), command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. Executes CLI commands where status return is expected. cmd_list is a list of commands, where each command is itself a list of parameters. We use utils.check_ssh_injection to check each command, but then join then with " ; " to form a single command. """ # Check that each command is secure for cmd in cmd_list: utils.check_ssh_injection(cmd) # Combine into a single command. command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list)) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, min_size=1, max_size=5) stdin, stdout, stderr = None, None, None LOG.debug("Executing command via ssh: %s", command) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: stdin, stdout, stderr = ssh.exec_command(command) channel = stdout.channel exit_status = channel.recv_exit_status() LOG.debug("Exit Status from ssh: %s", exit_status) # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s', exit_status) if check_exit_code and exit_status != 0: raise processutils.ProcessExecutionError( exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=command) else: return True else: return True except Exception as e: LOG.exception(_LE('Error executing SSH command.')) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) LOG.debug("Handling error case after SSH: %s", last_exception) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error executing command via ssh.")) finally: if stdin: stdin.flush() stdin.close() if stdout: stdout.close() if stderr: stderr.close() def cleanup(self): self.sshpool = None
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import sys import uuid import eventlet import mock from oslo_config import cfg import oslo_messaging import testtools from neutron.agent.common import config from neutron.agent.dhcp import agent as dhcp_agent from neutron.agent.dhcp import config as dhcp_config from neutron.agent import dhcp_agent as entry from neutron.agent.linux import dhcp from neutron.agent.linux import interface from neutron.common import config as common_config from neutron.common import constants as const from neutron.common import exceptions from neutron.common import utils from neutron import context from neutron.tests import base HOSTNAME = 'hostname' dev_man = dhcp.DeviceManager rpc_api = dhcp_agent.DhcpPluginApi DEVICE_MANAGER = '%s.%s' % (dev_man.__module__, dev_man.__name__) DHCP_PLUGIN = '%s.%s' % (rpc_api.__module__, rpc_api.__name__) fake_tenant_id = 'aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa' fake_subnet1_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.9.2', end='172.9.9.254')) fake_subnet1 = dhcp.DictModel(dict(id='bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb', network_id='12345678-1234-5678-1234567890ab', cidr='172.9.9.0/24', enable_dhcp=True, name='', tenant_id=fake_tenant_id, gateway_ip='172.9.9.1', host_routes=[], dns_nameservers=[], ip_version=4, ipv6_ra_mode=None, ipv6_address_mode=None, allocation_pools=fake_subnet1_allocation_pools)) fake_subnet2_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.8.2', end='172.9.8.254')) fake_subnet2 = dhcp.DictModel(dict(id='dddddddd-dddd-dddd-dddddddddddd', network_id='12345678-1234-5678-1234567890ab', cidr='172.9.8.0/24', enable_dhcp=False, name='', tenant_id=fake_tenant_id, gateway_ip='172.9.8.1', host_routes=[], dns_nameservers=[], ip_version=4, allocation_pools=fake_subnet2_allocation_pools)) fake_subnet3 = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb', network_id='12345678-1234-5678-1234567890ab', cidr='192.168.1.1/24', enable_dhcp=True)) fake_ipv6_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb', network_id='12345678-1234-5678-1234567890ab', cidr='2001:0db8::0/64', enable_dhcp=True, tenant_id=fake_tenant_id, gateway_ip='2001:0db8::1', ip_version=6, ipv6_ra_mode='slaac', ipv6_address_mode=None)) fake_meta_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb', network_id='12345678-1234-5678-1234567890ab', cidr='169.254.169.252/30', gateway_ip='169.254.169.253', enable_dhcp=True)) fake_fixed_ip1 = dhcp.DictModel(dict(id='', subnet_id=fake_subnet1.id, ip_address='172.9.9.9')) fake_fixed_ip2 = dhcp.DictModel(dict(id='', subnet_id=fake_subnet1.id, ip_address='172.9.9.10')) fake_fixed_ipv6 = dhcp.DictModel(dict(id='', subnet_id=fake_ipv6_subnet.id, ip_address='2001:db8::a8bb:ccff:fedd:ee99')) fake_meta_fixed_ip = dhcp.DictModel(dict(id='', subnet=fake_meta_subnet, ip_address='169.254.169.254')) fake_allocation_pool_subnet1 = dhcp.DictModel(dict(id='', start='172.9.9.2', end='172.9.9.254')) fake_port1 = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab', device_id='dhcp-12345678-1234-aaaa-1234567890ab', device_owner='', allocation_pools=fake_subnet1_allocation_pools, mac_address='aa:bb:cc:dd:ee:ff', network_id='12345678-1234-5678-1234567890ab', fixed_ips=[fake_fixed_ip1])) fake_dhcp_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789022', device_id='dhcp-12345678-1234-aaaa-123456789022', device_owner=const.DEVICE_OWNER_DHCP, allocation_pools=fake_subnet1_allocation_pools, mac_address='aa:bb:cc:dd:ee:22', network_id='12345678-1234-5678-1234567890ab', fixed_ips=[fake_fixed_ip2])) fake_port2 = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789000', device_id='dhcp-12345678-1234-aaaa-123456789000', device_owner='', mac_address='aa:bb:cc:dd:ee:99', network_id='12345678-1234-5678-1234567890ab', fixed_ips=[fake_fixed_ip2])) fake_ipv6_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789000', device_owner='', mac_address='aa:bb:cc:dd:ee:99', network_id='12345678-1234-5678-1234567890ab', fixed_ips=[fake_fixed_ipv6])) fake_meta_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab', mac_address='aa:bb:cc:dd:ee:ff', network_id='12345678-1234-5678-1234567890ab', device_owner=const.DEVICE_OWNER_ROUTER_INTF, device_id='forzanapoli', fixed_ips=[fake_meta_fixed_ip])) fake_meta_dvr_port = dhcp.DictModel(fake_meta_port.copy()) fake_meta_dvr_port.device_owner = const.DEVICE_OWNER_DVR_INTERFACE fake_dist_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab', mac_address='aa:bb:cc:dd:ee:ff', network_id='12345678-1234-5678-1234567890ab', device_owner=const.DEVICE_OWNER_DVR_INTERFACE, device_id='forzanapoli', fixed_ips=[fake_meta_fixed_ip])) FAKE_NETWORK_UUID = '12345678-1234-5678-1234567890ab' FAKE_NETWORK_DHCP_NS = "qdhcp-%s" % FAKE_NETWORK_UUID fake_network = dhcp.NetModel(dict(id=FAKE_NETWORK_UUID, tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_subnet1, fake_subnet2], ports=[fake_port1])) fake_network_ipv6 = dhcp.NetModel(dict( id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_ipv6_subnet], ports=[fake_ipv6_port])) fake_network_ipv6_ipv4 = dhcp.NetModel(dict( id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_ipv6_subnet, fake_subnet1], ports=[fake_port1])) isolated_network = dhcp.NetModel( dict( id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_subnet1], ports=[fake_port1])) nonisolated_dist_network = dhcp.NetModel( dict( id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_subnet1], ports=[fake_port1, fake_port2])) empty_network = dhcp.NetModel( dict( id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_subnet1], ports=[])) fake_meta_network = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_meta_subnet], ports=[fake_meta_port])) fake_meta_dvr_network = dhcp.NetModel(fake_meta_network.copy()) fake_meta_dvr_network.ports = [fake_meta_dvr_port] fake_dist_network = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[fake_meta_subnet], ports=[fake_meta_port, fake_dist_port])) fake_down_network = dhcp.NetModel( dict(id='12345678-dddd-dddd-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=False, subnets=[], ports=[])) class TestDhcpAgent(base.BaseTestCase): def setUp(self): super(TestDhcpAgent, self).setUp() entry.register_options(cfg.CONF) cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') # disable setting up periodic state reporting cfg.CONF.set_override('report_interval', 0, 'AGENT') self.driver_cls_p = mock.patch( 'neutron.agent.dhcp.agent.importutils.import_class') self.driver = mock.Mock(name='driver') self.driver.existing_dhcp_networks.return_value = [] self.driver_cls = self.driver_cls_p.start() self.driver_cls.return_value = self.driver self.mock_makedirs_p = mock.patch("os.makedirs") self.mock_makedirs = self.mock_makedirs_p.start() self.mock_ip_wrapper_p = mock.patch("neutron.agent.linux.ip_lib." "IPWrapper") self.mock_ip_wrapper = self.mock_ip_wrapper_p.start() def test_init_host(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp, 'sync_state') as sync_state: dhcp.init_host() sync_state.assert_called_once_with() def test_dhcp_agent_manager(self): state_rpc_str = 'neutron.agent.rpc.PluginReportStateAPI' # sync_state is needed for this test cfg.CONF.set_override('report_interval', 1, 'AGENT') with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport, 'sync_state', autospec=True) as mock_sync_state: with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport, 'periodic_resync', autospec=True) as mock_periodic_resync: with mock.patch(state_rpc_str) as state_rpc: with mock.patch.object(sys, 'argv') as sys_argv: sys_argv.return_value = [ 'dhcp', '--config-file', base.etcdir('neutron.conf')] cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS) config.register_interface_driver_opts_helper(cfg.CONF) config.register_agent_state_opts_helper(cfg.CONF) cfg.CONF.register_opts(interface.OPTS) common_config.init(sys.argv[1:]) agent_mgr = dhcp_agent.DhcpAgentWithStateReport( 'testhost') eventlet.greenthread.sleep(1) agent_mgr.after_start() mock_sync_state.assert_called_once_with(agent_mgr) mock_periodic_resync.assert_called_once_with(agent_mgr) state_rpc.assert_has_calls( [mock.call(mock.ANY), mock.call().report_state(mock.ANY, mock.ANY, mock.ANY)]) def test_dhcp_agent_main_agent_manager(self): logging_str = 'neutron.agent.common.config.setup_logging' launcher_str = 'oslo_service.service.ServiceLauncher' with mock.patch(logging_str): with mock.patch.object(sys, 'argv') as sys_argv: with mock.patch(launcher_str) as launcher: sys_argv.return_value = ['dhcp', '--config-file', base.etcdir('neutron.conf')] entry.main() launcher.assert_has_calls( [mock.call(cfg.CONF), mock.call().launch_service(mock.ANY), mock.call().wait()]) def test_run_completes_single_pass(self): with mock.patch(DEVICE_MANAGER): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) attrs_to_mock = dict( [(a, mock.DEFAULT) for a in ['sync_state', 'periodic_resync']]) with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks: dhcp.run() mocks['sync_state'].assert_called_once_with() mocks['periodic_resync'].assert_called_once_with() def test_call_driver(self): network = mock.Mock() network.id = '1' dhcp = dhcp_agent.DhcpAgent(cfg.CONF) self.assertTrue(dhcp.call_driver('foo', network)) self.driver.assert_called_once_with(cfg.CONF, mock.ANY, mock.ANY, mock.ANY, mock.ANY) def _test_call_driver_failure(self, exc=None, trace_level='exception', expected_sync=True): network = mock.Mock() network.id = '1' self.driver.return_value.foo.side_effect = exc or Exception dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp, 'schedule_resync') as schedule_resync: self.assertIsNone(dhcp.call_driver('foo', network)) self.driver.assert_called_once_with(cfg.CONF, mock.ANY, mock.ANY, mock.ANY, mock.ANY) self.assertEqual(expected_sync, schedule_resync.called) def test_call_driver_ip_address_generation_failure(self): error = oslo_messaging.RemoteError( exc_type='IpAddressGenerationFailure') self._test_call_driver_failure(exc=error, expected_sync=False) def test_call_driver_failure(self): self._test_call_driver_failure() def test_call_driver_remote_error_net_not_found(self): self._test_call_driver_failure( exc=oslo_messaging.RemoteError(exc_type='NetworkNotFound'), trace_level='warning') def test_call_driver_network_not_found(self): self._test_call_driver_failure( exc=exceptions.NetworkNotFound(net_id='1'), trace_level='warning') def test_call_driver_conflict(self): self._test_call_driver_failure( exc=exceptions.Conflict(), trace_level='warning', expected_sync=False) def _test_sync_state_helper(self, known_net_ids, active_net_ids): active_networks = set(mock.Mock(id=netid) for netid in active_net_ids) with mock.patch(DHCP_PLUGIN) as plug: mock_plugin = mock.Mock() mock_plugin.get_active_networks_info.return_value = active_networks plug.return_value = mock_plugin dhcp = dhcp_agent.DhcpAgent(HOSTNAME) attrs_to_mock = dict([(a, mock.DEFAULT) for a in ['disable_dhcp_helper', 'cache', 'safe_configure_dhcp_for_network']]) with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks: mocks['cache'].get_network_ids.return_value = known_net_ids dhcp.sync_state() diff = set(known_net_ids) - set(active_net_ids) exp_disable = [mock.call(net_id) for net_id in diff] mocks['cache'].assert_has_calls([mock.call.get_network_ids()]) mocks['disable_dhcp_helper'].assert_has_calls(exp_disable) def test_sync_state_initial(self): self._test_sync_state_helper([], ['a']) def test_sync_state_same(self): self._test_sync_state_helper(['a'], ['a']) def test_sync_state_disabled_net(self): self._test_sync_state_helper(['b'], ['a']) def test_sync_state_waitall(self): with mock.patch.object(dhcp_agent.eventlet.GreenPool, 'waitall') as w: active_net_ids = ['1', '2', '3', '4', '5'] known_net_ids = ['1', '2', '3', '4', '5'] self._test_sync_state_helper(known_net_ids, active_net_ids) w.assert_called_once_with() def test_sync_state_for_all_networks_plugin_error(self): with mock.patch(DHCP_PLUGIN) as plug: mock_plugin = mock.Mock() mock_plugin.get_active_networks_info.side_effect = Exception plug.return_value = mock_plugin with mock.patch.object(dhcp_agent.LOG, 'exception') as log: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp, 'schedule_resync') as schedule_resync: dhcp.sync_state() self.assertTrue(log.called) self.assertTrue(schedule_resync.called) def test_sync_state_for_one_network_plugin_error(self): with mock.patch(DHCP_PLUGIN) as plug: mock_plugin = mock.Mock() exc = Exception() mock_plugin.get_active_networks_info.side_effect = exc plug.return_value = mock_plugin with mock.patch.object(dhcp_agent.LOG, 'exception') as log: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp, 'schedule_resync') as schedule_resync: dhcp.sync_state(['foo_network']) self.assertTrue(log.called) schedule_resync.assert_called_with(exc, 'foo_network') def test_periodic_resync(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp_agent.eventlet, 'spawn') as spawn: dhcp.periodic_resync() spawn.assert_called_once_with(dhcp._periodic_resync_helper) def test_report_state_revival_logic(self): dhcp = dhcp_agent.DhcpAgentWithStateReport(HOSTNAME) with mock.patch.object(dhcp.state_rpc, 'report_state') as report_state,\ mock.patch.object(dhcp, "run"): report_state.return_value = const.AGENT_ALIVE dhcp._report_state() self.assertEqual({}, dhcp.needs_resync_reasons) report_state.return_value = const.AGENT_REVIVED dhcp._report_state() self.assertEqual(dhcp.needs_resync_reasons[None], ['Agent has just been revived']) def test_periodic_resync_helper(self): with mock.patch.object(dhcp_agent.eventlet, 'sleep') as sleep: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) resync_reasons = collections.OrderedDict( (('a', 'reason1'), ('b', 'reason2'))) dhcp.needs_resync_reasons = resync_reasons with mock.patch.object(dhcp, 'sync_state') as sync_state: sync_state.side_effect = RuntimeError with testtools.ExpectedException(RuntimeError): dhcp._periodic_resync_helper() sync_state.assert_called_once_with(resync_reasons.keys()) sleep.assert_called_once_with(dhcp.conf.resync_interval) self.assertEqual(len(dhcp.needs_resync_reasons), 0) def test_populate_cache_on_start_without_active_networks_support(self): # emul dhcp driver that doesn't support retrieving of active networks self.driver.existing_dhcp_networks.side_effect = NotImplementedError with mock.patch.object(dhcp_agent.LOG, 'debug') as log: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) self.driver.existing_dhcp_networks.assert_called_once_with( dhcp.conf, ) self.assertFalse(dhcp.cache.get_network_ids()) self.assertTrue(log.called) def test_populate_cache_on_start(self): networks = ['aaa', 'bbb'] self.driver.existing_dhcp_networks.return_value = networks dhcp = dhcp_agent.DhcpAgent(HOSTNAME) self.driver.existing_dhcp_networks.assert_called_once_with( dhcp.conf, ) self.assertEqual(set(networks), set(dhcp.cache.get_network_ids())) def test_none_interface_driver(self): cfg.CONF.set_override('interface_driver', None) self.assertRaises(SystemExit, dhcp.DeviceManager, cfg.CONF, None) def test_nonexistent_interface_driver(self): # Temporarily turn off mock, so could use the real import_class # to import interface_driver. self.driver_cls_p.stop() self.addCleanup(self.driver_cls_p.start) cfg.CONF.set_override('interface_driver', 'foo.bar') self.assertRaises(SystemExit, dhcp.DeviceManager, cfg.CONF, None) class TestLogArgs(base.BaseTestCase): def test_log_args_without_log_dir_and_file(self): conf_dict = {'debug': True, 'verbose': False, 'log_dir': None, 'log_file': None, 'use_syslog': True, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--use-syslog', '--syslog-log-facility=LOG_USER'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) def test_log_args_without_log_file(self): conf_dict = {'debug': True, 'verbose': True, 'log_dir': '/etc/tests', 'log_file': None, 'use_syslog': False, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--verbose', '--log-file=log_file_name', '--log-dir=/etc/tests'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) def test_log_args_with_log_dir_and_file(self): conf_dict = {'debug': True, 'verbose': False, 'log_dir': '/etc/tests', 'log_file': 'tests/filelog', 'use_syslog': False, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--log-file=log_file_name', '--log-dir=/etc/tests/tests'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) def test_log_args_without_log_dir(self): conf_dict = {'debug': True, 'verbose': False, 'log_file': 'tests/filelog', 'log_dir': None, 'use_syslog': False, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--log-file=log_file_name', '--log-dir=tests'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) def test_log_args_with_filelog_and_syslog(self): conf_dict = {'debug': True, 'verbose': True, 'log_file': 'tests/filelog', 'log_dir': '/etc/tests', 'use_syslog': True, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--verbose', '--log-file=log_file_name', '--log-dir=/etc/tests/tests'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) class TestDhcpAgentEventHandler(base.BaseTestCase): def setUp(self): super(TestDhcpAgentEventHandler, self).setUp() config.register_interface_driver_opts_helper(cfg.CONF) cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') entry.register_options(cfg.CONF) # register all dhcp cfg options self.plugin_p = mock.patch(DHCP_PLUGIN) plugin_cls = self.plugin_p.start() self.plugin = mock.Mock() plugin_cls.return_value = self.plugin self.cache_p = mock.patch('neutron.agent.dhcp.agent.NetworkCache') cache_cls = self.cache_p.start() self.cache = mock.Mock() cache_cls.return_value = self.cache self.mock_makedirs_p = mock.patch("os.makedirs") self.mock_makedirs = self.mock_makedirs_p.start() self.mock_init_p = mock.patch('neutron.agent.dhcp.agent.' 'DhcpAgent._populate_networks_cache') self.mock_init = self.mock_init_p.start() self.dhcp = dhcp_agent.DhcpAgent(HOSTNAME) self.call_driver_p = mock.patch.object(self.dhcp, 'call_driver') self.call_driver = self.call_driver_p.start() self.schedule_resync_p = mock.patch.object(self.dhcp, 'schedule_resync') self.schedule_resync = self.schedule_resync_p.start() self.external_process_p = mock.patch( 'neutron.agent.linux.external_process.ProcessManager' ) self.external_process = self.external_process_p.start() def _process_manager_constructor_call(self, ns=FAKE_NETWORK_DHCP_NS): return mock.call(conf=cfg.CONF, uuid=FAKE_NETWORK_UUID, namespace=ns, default_cmd_callback=mock.ANY) def _enable_dhcp_helper(self, network, enable_isolated_metadata=False, is_isolated_network=False): self.dhcp._process_monitor = mock.Mock() if enable_isolated_metadata: cfg.CONF.set_override('enable_isolated_metadata', True) self.plugin.get_network_info.return_value = network self.dhcp.enable_dhcp_helper(network.id) self.plugin.assert_has_calls([ mock.call.get_network_info(network.id)]) self.call_driver.assert_called_once_with('enable', network) self.cache.assert_has_calls([mock.call.put(network)]) if is_isolated_network and enable_isolated_metadata: self.external_process.assert_has_calls([ self._process_manager_constructor_call(), mock.call().enable()]) elif not enable_isolated_metadata: self.external_process.assert_has_calls([ self._process_manager_constructor_call(ns=None), mock.call().disable()]) else: self.assertFalse(self.external_process.call_count) def test_enable_dhcp_helper_enable_metadata_isolated_network(self): self._enable_dhcp_helper(isolated_network, enable_isolated_metadata=True, is_isolated_network=True) def test_enable_dhcp_helper_enable_metadata_no_gateway(self): isolated_network_no_gateway = copy.deepcopy(isolated_network) isolated_network_no_gateway.subnets[0].gateway_ip = None self._enable_dhcp_helper(isolated_network_no_gateway, enable_isolated_metadata=True, is_isolated_network=True) def test_enable_dhcp_helper_enable_metadata_nonisolated_network(self): nonisolated_network = copy.deepcopy(isolated_network) nonisolated_network.ports[0].device_owner = ( const.DEVICE_OWNER_ROUTER_INTF) nonisolated_network.ports[0].fixed_ips[0].ip_address = '172.9.9.1' self._enable_dhcp_helper(nonisolated_network, enable_isolated_metadata=True, is_isolated_network=False) def test_enable_dhcp_helper_enable_metadata_nonisolated_dist_network(self): nonisolated_dist_network.ports[0].device_owner = ( const.DEVICE_OWNER_ROUTER_INTF) nonisolated_dist_network.ports[0].fixed_ips[0].ip_address = '172.9.9.1' nonisolated_dist_network.ports[1].device_owner = ( const.DEVICE_OWNER_DVR_INTERFACE) nonisolated_dist_network.ports[1].fixed_ips[0].ip_address = '172.9.9.1' self._enable_dhcp_helper(nonisolated_dist_network, enable_isolated_metadata=True, is_isolated_network=False) def test_enable_dhcp_helper_enable_metadata_empty_network(self): self._enable_dhcp_helper(empty_network, enable_isolated_metadata=True, is_isolated_network=True) def test_enable_dhcp_helper_enable_metadata_ipv6_ipv4_network(self): self._enable_dhcp_helper(fake_network_ipv6_ipv4, enable_isolated_metadata=True, is_isolated_network=True) def test_enable_dhcp_helper_driver_failure_ipv6_ipv4_network(self): self.plugin.get_network_info.return_value = fake_network_ipv6_ipv4 self.call_driver.return_value = False cfg.CONF.set_override('enable_isolated_metadata', True) with mock.patch.object( self.dhcp, 'enable_isolated_metadata_proxy') as enable_metadata: self.dhcp.enable_dhcp_helper(fake_network_ipv6_ipv4.id) self.plugin.assert_has_calls( [mock.call.get_network_info(fake_network_ipv6_ipv4.id)]) self.call_driver.assert_called_once_with('enable', fake_network_ipv6_ipv4) self.assertFalse(self.cache.called) self.assertFalse(enable_metadata.called) self.assertFalse(self.external_process.called) def test_enable_dhcp_helper(self): self._enable_dhcp_helper(fake_network) def test_enable_dhcp_helper_ipv6_network(self): self._enable_dhcp_helper(fake_network_ipv6) def test_enable_dhcp_helper_down_network(self): self.plugin.get_network_info.return_value = fake_down_network self.dhcp.enable_dhcp_helper(fake_down_network.id) self.plugin.assert_has_calls( [mock.call.get_network_info(fake_down_network.id)]) self.assertFalse(self.call_driver.called) self.assertFalse(self.cache.called) self.assertFalse(self.external_process.called) def test_enable_dhcp_helper_network_none(self): self.plugin.get_network_info.return_value = None self.dhcp.enable_dhcp_helper('fake_id') self.plugin.assert_has_calls( [mock.call.get_network_info('fake_id')]) self.assertFalse(self.call_driver.called) self.assertFalse(self.dhcp.schedule_resync.called) def test_enable_dhcp_helper_exception_during_rpc(self): self.plugin.get_network_info.side_effect = Exception with mock.patch.object(dhcp_agent.LOG, 'exception') as log: self.dhcp.enable_dhcp_helper(fake_network.id) self.plugin.assert_has_calls( [mock.call.get_network_info(fake_network.id)]) self.assertFalse(self.call_driver.called) self.assertTrue(log.called) self.assertTrue(self.schedule_resync.called) self.assertFalse(self.cache.called) self.assertFalse(self.external_process.called) def test_enable_dhcp_helper_driver_failure(self): self.plugin.get_network_info.return_value = fake_network self.call_driver.return_value = False cfg.CONF.set_override('enable_isolated_metadata', True) self.dhcp.enable_dhcp_helper(fake_network.id) self.plugin.assert_has_calls( [mock.call.get_network_info(fake_network.id)]) self.call_driver.assert_called_once_with('enable', fake_network) self.assertFalse(self.cache.called) self.assertFalse(self.external_process.called) def _disable_dhcp_helper_known_network(self, isolated_metadata=False): if isolated_metadata: cfg.CONF.set_override('enable_isolated_metadata', True) self.cache.get_network_by_id.return_value = fake_network self.dhcp.disable_dhcp_helper(fake_network.id) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_network.id)]) self.call_driver.assert_called_once_with('disable', fake_network) if isolated_metadata: self.external_process.assert_has_calls([ self._process_manager_constructor_call(ns=None), mock.call().disable()]) else: self.assertFalse(self.external_process.call_count) def test_disable_dhcp_helper_known_network_isolated_metadata(self): self._disable_dhcp_helper_known_network(isolated_metadata=True) def test_disable_dhcp_helper_known_network(self): self._disable_dhcp_helper_known_network() def test_disable_dhcp_helper_unknown_network(self): self.cache.get_network_by_id.return_value = None self.dhcp.disable_dhcp_helper('abcdef') self.cache.assert_has_calls( [mock.call.get_network_by_id('abcdef')]) self.assertEqual(0, self.call_driver.call_count) self.assertFalse(self.external_process.called) def _disable_dhcp_helper_driver_failure(self, isolated_metadata=False): if isolated_metadata: cfg.CONF.set_override('enable_isolated_metadata', True) self.cache.get_network_by_id.return_value = fake_network self.call_driver.return_value = False self.dhcp.disable_dhcp_helper(fake_network.id) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_network.id)]) self.call_driver.assert_called_once_with('disable', fake_network) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_network.id)]) if isolated_metadata: self.external_process.assert_has_calls([ self._process_manager_constructor_call(ns=None), mock.call().disable() ]) else: self.assertFalse(self.external_process.call_count) def test_disable_dhcp_helper_driver_failure_isolated_metadata(self): self._disable_dhcp_helper_driver_failure(isolated_metadata=True) def test_disable_dhcp_helper_driver_failure(self): self._disable_dhcp_helper_driver_failure() def test_enable_isolated_metadata_proxy(self): self.dhcp._process_monitor = mock.Mock() self.dhcp.enable_isolated_metadata_proxy(fake_network) self.external_process.assert_has_calls([ self._process_manager_constructor_call(), mock.call().enable() ]) def test_disable_isolated_metadata_proxy(self): method_path = ('neutron.agent.metadata.driver.MetadataDriver' '.destroy_monitored_metadata_proxy') with mock.patch(method_path) as destroy: self.dhcp.disable_isolated_metadata_proxy(fake_network) destroy.assert_called_once_with(self.dhcp._process_monitor, fake_network.id, cfg.CONF) def _test_enable_isolated_metadata_proxy(self, network): cfg.CONF.set_override('enable_metadata_network', True) cfg.CONF.set_override('debug', True) cfg.CONF.set_override('verbose', False) cfg.CONF.set_override('log_file', 'test.log') method_path = ('neutron.agent.metadata.driver.MetadataDriver' '.spawn_monitored_metadata_proxy') with mock.patch(method_path) as spawn: self.dhcp.enable_isolated_metadata_proxy(network) spawn.assert_called_once_with(self.dhcp._process_monitor, network.namespace, dhcp.METADATA_PORT, cfg.CONF, router_id='forzanapoli') def test_enable_isolated_metadata_proxy_with_metadata_network(self): self._test_enable_isolated_metadata_proxy(fake_meta_network) def test_enable_isolated_metadata_proxy_with_metadata_network_dvr(self): self._test_enable_isolated_metadata_proxy(fake_meta_dvr_network) def test_enable_isolated_metadata_proxy_with_dist_network(self): self._test_enable_isolated_metadata_proxy(fake_dist_network) def _test_disable_isolated_metadata_proxy(self, network): cfg.CONF.set_override('enable_metadata_network', True) method_path = ('neutron.agent.metadata.driver.MetadataDriver' '.destroy_monitored_metadata_proxy') with mock.patch(method_path) as destroy: self.dhcp.enable_isolated_metadata_proxy(network) self.dhcp.disable_isolated_metadata_proxy(network) destroy.assert_called_once_with(self.dhcp._process_monitor, 'forzanapoli', cfg.CONF) def test_disable_isolated_metadata_proxy_with_metadata_network(self): self._test_disable_isolated_metadata_proxy(fake_meta_network) def test_disable_isolated_metadata_proxy_with_metadata_network_dvr(self): self._test_disable_isolated_metadata_proxy(fake_meta_dvr_network) def test_disable_isolated_metadata_proxy_with_dist_network(self): self._test_disable_isolated_metadata_proxy(fake_dist_network) def test_network_create_end(self): payload = dict(network=dict(id=fake_network.id)) with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable: self.dhcp.network_create_end(None, payload) enable.assert_called_once_with(fake_network.id) def test_network_update_end_admin_state_up(self): payload = dict(network=dict(id=fake_network.id, admin_state_up=True)) with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable: self.dhcp.network_update_end(None, payload) enable.assert_called_once_with(fake_network.id) def test_network_update_end_admin_state_down(self): payload = dict(network=dict(id=fake_network.id, admin_state_up=False)) with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable: self.dhcp.network_update_end(None, payload) disable.assert_called_once_with(fake_network.id) def test_network_delete_end(self): payload = dict(network_id=fake_network.id) with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable: self.dhcp.network_delete_end(None, payload) disable.assert_called_once_with(fake_network.id) def test_refresh_dhcp_helper_no_dhcp_enabled_networks(self): network = dhcp.NetModel(dict(id='net-id', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[], ports=[])) self.cache.get_network_by_id.return_value = network self.plugin.get_network_info.return_value = network with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable: self.dhcp.refresh_dhcp_helper(network.id) disable.assert_called_once_with(network.id) self.assertFalse(self.cache.called) self.assertFalse(self.call_driver.called) self.cache.assert_has_calls( [mock.call.get_network_by_id('net-id')]) def test_refresh_dhcp_helper_exception_during_rpc(self): network = dhcp.NetModel(dict(id='net-id', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', admin_state_up=True, subnets=[], ports=[])) self.cache.get_network_by_id.return_value = network self.plugin.get_network_info.side_effect = Exception with mock.patch.object(dhcp_agent.LOG, 'exception') as log: self.dhcp.refresh_dhcp_helper(network.id) self.assertFalse(self.call_driver.called) self.cache.assert_has_calls( [mock.call.get_network_by_id('net-id')]) self.assertTrue(log.called) self.assertTrue(self.dhcp.schedule_resync.called) def test_subnet_update_end(self): payload = dict(subnet=dict(network_id=fake_network.id)) self.cache.get_network_by_id.return_value = fake_network self.plugin.get_network_info.return_value = fake_network self.dhcp.subnet_update_end(None, payload) self.cache.assert_has_calls([mock.call.put(fake_network)]) self.call_driver.assert_called_once_with('reload_allocations', fake_network) def test_subnet_update_end_restart(self): new_state = dhcp.NetModel(dict(id=fake_network.id, tenant_id=fake_network.tenant_id, admin_state_up=True, subnets=[fake_subnet1, fake_subnet3], ports=[fake_port1])) payload = dict(subnet=dict(network_id=fake_network.id)) self.cache.get_network_by_id.return_value = fake_network self.plugin.get_network_info.return_value = new_state self.dhcp.subnet_update_end(None, payload) self.cache.assert_has_calls([mock.call.put(new_state)]) self.call_driver.assert_called_once_with('restart', new_state) def test_subnet_update_end_delete_payload(self): prev_state = dhcp.NetModel(dict(id=fake_network.id, tenant_id=fake_network.tenant_id, admin_state_up=True, subnets=[fake_subnet1, fake_subnet3], ports=[fake_port1])) payload = dict(subnet_id=fake_subnet1.id) self.cache.get_network_by_subnet_id.return_value = prev_state self.cache.get_network_by_id.return_value = prev_state self.plugin.get_network_info.return_value = fake_network self.dhcp.subnet_delete_end(None, payload) self.cache.assert_has_calls([ mock.call.get_network_by_subnet_id( 'bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb'), mock.call.get_network_by_id('12345678-1234-5678-1234567890ab'), mock.call.put(fake_network)]) self.call_driver.assert_called_once_with('restart', fake_network) def test_port_update_end(self): payload = dict(port=fake_port2) self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port2 self.dhcp.port_update_end(None, payload) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_port2.network_id), mock.call.put_port(mock.ANY)]) self.call_driver.assert_called_once_with('reload_allocations', fake_network) def test_port_update_change_ip_on_port(self): payload = dict(port=fake_port1) self.cache.get_network_by_id.return_value = fake_network updated_fake_port1 = copy.deepcopy(fake_port1) updated_fake_port1.fixed_ips[0].ip_address = '172.9.9.99' self.cache.get_port_by_id.return_value = updated_fake_port1 self.dhcp.port_update_end(None, payload) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_port1.network_id), mock.call.put_port(mock.ANY)]) self.call_driver.assert_has_calls( [mock.call.call_driver('reload_allocations', fake_network)]) def test_port_update_change_ip_on_dhcp_agents_port(self): self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port1 payload = dict(port=copy.deepcopy(fake_port1)) device_id = utils.get_dhcp_agent_device_id( payload['port']['network_id'], self.dhcp.conf.host) payload['port']['fixed_ips'][0]['ip_address'] = '172.9.9.99' payload['port']['device_id'] = device_id self.dhcp.port_update_end(None, payload) self.call_driver.assert_has_calls( [mock.call.call_driver('restart', fake_network)]) def test_port_update_on_dhcp_agents_port_no_ip_change(self): self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port1 payload = dict(port=fake_port1) device_id = utils.get_dhcp_agent_device_id( payload['port']['network_id'], self.dhcp.conf.host) payload['port']['device_id'] = device_id self.dhcp.port_update_end(None, payload) self.call_driver.assert_has_calls( [mock.call.call_driver('reload_allocations', fake_network)]) def test_port_delete_end(self): payload = dict(port_id=fake_port2.id) self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port2 self.dhcp.port_delete_end(None, payload) self.cache.assert_has_calls( [mock.call.get_port_by_id(fake_port2.id), mock.call.get_network_by_id(fake_network.id), mock.call.remove_port(fake_port2)]) self.call_driver.assert_has_calls( [mock.call.call_driver('reload_allocations', fake_network)]) def test_port_delete_end_unknown_port(self): payload = dict(port_id='unknown') self.cache.get_port_by_id.return_value = None self.dhcp.port_delete_end(None, payload) self.cache.assert_has_calls([mock.call.get_port_by_id('unknown')]) self.assertEqual(self.call_driver.call_count, 0) class TestDhcpPluginApiProxy(base.BaseTestCase): def _test_dhcp_api(self, method, **kwargs): ctxt = context.get_admin_context() proxy = dhcp_agent.DhcpPluginApi('foo', ctxt, host='foo') with mock.patch.object(proxy.client, 'call') as rpc_mock,\ mock.patch.object(proxy.client, 'prepare') as prepare_mock: prepare_mock.return_value = proxy.client rpc_mock.return_value = kwargs.pop('return_value', []) prepare_args = {} if 'version' in kwargs: prepare_args['version'] = kwargs.pop('version') retval = getattr(proxy, method)(**kwargs) self.assertEqual(retval, rpc_mock.return_value) prepare_mock.assert_called_once_with(**prepare_args) kwargs['host'] = proxy.host rpc_mock.assert_called_once_with(ctxt, method, **kwargs) def test_get_active_networks_info(self): self._test_dhcp_api('get_active_networks_info', version='1.1') def test_get_network_info(self): self._test_dhcp_api('get_network_info', network_id='fake_id', return_value=None) def test_create_dhcp_port(self): self._test_dhcp_api('create_dhcp_port', port='fake_port', return_value=None, version='1.1') def test_update_dhcp_port(self): self._test_dhcp_api('update_dhcp_port', port_id='fake_id', port='fake_port', return_value=None, version='1.1') def test_release_dhcp_port(self): self._test_dhcp_api('release_dhcp_port', network_id='fake_id', device_id='fake_id_2') class TestNetworkCache(base.BaseTestCase): def test_put_network(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) self.assertEqual(nc.cache, {fake_network.id: fake_network}) self.assertEqual(nc.subnet_lookup, {fake_subnet1.id: fake_network.id, fake_subnet2.id: fake_network.id}) self.assertEqual(nc.port_lookup, {fake_port1.id: fake_network.id}) def test_put_network_existing(self): prev_network_info = mock.Mock() nc = dhcp_agent.NetworkCache() with mock.patch.object(nc, 'remove') as remove: nc.cache[fake_network.id] = prev_network_info nc.put(fake_network) remove.assert_called_once_with(prev_network_info) self.assertEqual(nc.cache, {fake_network.id: fake_network}) self.assertEqual(nc.subnet_lookup, {fake_subnet1.id: fake_network.id, fake_subnet2.id: fake_network.id}) self.assertEqual(nc.port_lookup, {fake_port1.id: fake_network.id}) def test_remove_network(self): nc = dhcp_agent.NetworkCache() nc.cache = {fake_network.id: fake_network} nc.subnet_lookup = {fake_subnet1.id: fake_network.id, fake_subnet2.id: fake_network.id} nc.port_lookup = {fake_port1.id: fake_network.id} nc.remove(fake_network) self.assertEqual(len(nc.cache), 0) self.assertEqual(len(nc.subnet_lookup), 0) self.assertEqual(len(nc.port_lookup), 0) def test_get_network_by_id(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) self.assertEqual(nc.get_network_by_id(fake_network.id), fake_network) def test_get_network_ids(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) self.assertEqual(list(nc.get_network_ids()), [fake_network.id]) def test_get_network_by_subnet_id(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) self.assertEqual(nc.get_network_by_subnet_id(fake_subnet1.id), fake_network) def test_get_network_by_port_id(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) self.assertEqual(nc.get_network_by_port_id(fake_port1.id), fake_network) def test_put_port(self): fake_net = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', subnets=[fake_subnet1], ports=[fake_port1])) nc = dhcp_agent.NetworkCache() nc.put(fake_net) nc.put_port(fake_port2) self.assertEqual(len(nc.port_lookup), 2) self.assertIn(fake_port2, fake_net.ports) def test_put_port_existing(self): fake_net = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', subnets=[fake_subnet1], ports=[fake_port1, fake_port2])) nc = dhcp_agent.NetworkCache() nc.put(fake_net) nc.put_port(fake_port2) self.assertEqual(len(nc.port_lookup), 2) self.assertIn(fake_port2, fake_net.ports) def test_remove_port_existing(self): fake_net = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', subnets=[fake_subnet1], ports=[fake_port1, fake_port2])) nc = dhcp_agent.NetworkCache() nc.put(fake_net) nc.remove_port(fake_port2) self.assertEqual(len(nc.port_lookup), 1) self.assertNotIn(fake_port2, fake_net.ports) def test_get_port_by_id(self): nc = dhcp_agent.NetworkCache() nc.put(fake_network) self.assertEqual(nc.get_port_by_id(fake_port1.id), fake_port1) class FakePort1(object): id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' class FakeV4Subnet(object): id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' ip_version = 4 cidr = '192.168.0.0/24' gateway_ip = '192.168.0.1' enable_dhcp = True class FakeV4SubnetOutsideGateway(FakeV4Subnet): gateway_ip = '192.168.1.1' class FakeV4SubnetNoGateway(object): id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' ip_version = 4 cidr = '192.168.1.0/24' gateway_ip = None enable_dhcp = True class FakeV4Network(object): id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' subnets = [FakeV4Subnet()] ports = [FakePort1()] namespace = 'qdhcp-aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' class FakeV4NetworkOutsideGateway(FakeV4Network): subnets = [FakeV4SubnetOutsideGateway()] class FakeV4NetworkNoSubnet(object): id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' subnets = [] ports = [] class FakeV4NetworkNoGateway(object): id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' subnets = [FakeV4SubnetNoGateway()] ports = [FakePort1()] class TestDeviceManager(base.BaseTestCase): def setUp(self): super(TestDeviceManager, self).setUp() config.register_interface_driver_opts_helper(cfg.CONF) cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS) cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') cfg.CONF.set_override('enable_isolated_metadata', True) self.ensure_device_is_ready_p = mock.patch( 'neutron.agent.linux.ip_lib.ensure_device_is_ready') self.ensure_device_is_ready = (self.ensure_device_is_ready_p.start()) self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') self.iproute_cls_p = mock.patch('neutron.agent.linux.' 'ip_lib.IpRouteCommand') driver_cls = self.dvr_cls_p.start() iproute_cls = self.iproute_cls_p.start() self.mock_driver = mock.MagicMock() self.mock_driver.DEV_NAME_LEN = ( interface.LinuxInterfaceDriver.DEV_NAME_LEN) self.mock_driver.use_gateway_ips = False self.mock_iproute = mock.MagicMock() driver_cls.return_value = self.mock_driver iproute_cls.return_value = self.mock_iproute iptables_cls_p = mock.patch( 'neutron.agent.linux.iptables_manager.IptablesManager') iptables_cls = iptables_cls_p.start() self.iptables_inst = mock.Mock() iptables_cls.return_value = self.iptables_inst self.mangle_inst = mock.Mock() self.iptables_inst.ipv4 = {'mangle': self.mangle_inst} self.mock_ip_wrapper_p = mock.patch("neutron.agent.linux.ip_lib." "IPWrapper") self.mock_ip_wrapper = self.mock_ip_wrapper_p.start() def _test_setup_helper(self, device_is_ready, net=None, port=None): net = net or fake_network port = port or fake_port1 plugin = mock.Mock() plugin.create_dhcp_port.return_value = port or fake_port1 self.ensure_device_is_ready.return_value = device_is_ready self.mock_driver.get_device_name.return_value = 'tap12345678-12' dh = dhcp.DeviceManager(cfg.CONF, plugin) dh._set_default_route = mock.Mock() dh._cleanup_stale_devices = mock.Mock() interface_name = dh.setup(net) self.assertEqual(interface_name, 'tap12345678-12') plugin.assert_has_calls([ mock.call.create_dhcp_port( {'port': {'name': '', 'admin_state_up': True, 'network_id': net.id, 'tenant_id': net.tenant_id, 'fixed_ips': [{'subnet_id': port.fixed_ips[0].subnet_id}], 'device_id': mock.ANY}})]) if port == fake_ipv6_port: expected_ips = ['169.254.169.254/16'] else: expected_ips = ['172.9.9.9/24', '169.254.169.254/16'] expected = [ mock.call.get_device_name(port), mock.call.init_l3( 'tap12345678-12', expected_ips, namespace=net.namespace)] if not device_is_ready: expected.insert(1, mock.call.plug(net.id, port.id, 'tap12345678-12', 'aa:bb:cc:dd:ee:ff', namespace=net.namespace, mtu=None)) self.mock_driver.assert_has_calls(expected) dh._set_default_route.assert_called_once_with(net, 'tap12345678-12') def test_setup(self): cfg.CONF.set_override('enable_metadata_network', False) self._test_setup_helper(False) cfg.CONF.set_override('enable_metadata_network', True) self._test_setup_helper(False) def test_setup_calls_fill_dhcp_udp_checksums(self): self._test_setup_helper(False) rule = ('-p udp -m udp --dport %d -j CHECKSUM --checksum-fill' % const.DHCP_RESPONSE_PORT) expected = [mock.call.add_rule('POSTROUTING', rule)] self.mangle_inst.assert_has_calls(expected) def test_setup_create_dhcp_port(self): with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None net = copy.deepcopy(fake_network) plugin.create_dhcp_port.return_value = fake_dhcp_port dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.setup(net) plugin.assert_has_calls([ mock.call.create_dhcp_port( {'port': {'name': '', 'admin_state_up': True, 'network_id': net.id, 'tenant_id': net.tenant_id, 'fixed_ips': [{'subnet_id': fake_dhcp_port.fixed_ips[0].subnet_id}], 'device_id': mock.ANY}})]) self.assertIn(fake_dhcp_port, net.ports) def test_setup_plug_exception(self): plugin = mock.Mock() plugin.create_dhcp_port.return_value = fake_dhcp_port self.ensure_device_is_ready.return_value = False self.mock_driver.get_device_name.return_value = 'tap12345678-12' dh = dhcp.DeviceManager(cfg.CONF, plugin) dh._set_default_route = mock.Mock() dh._cleanup_stale_devices = mock.Mock() dh.driver = mock.Mock() dh.driver.plug.side_effect = OSError() net = copy.deepcopy(fake_network) self.assertRaises(OSError, dh.setup, net) plugin.release_dhcp_port.assert_called_once_with( net.id, mock.ANY) def test_setup_ipv6(self): self._test_setup_helper(True, net=fake_network_ipv6, port=fake_ipv6_port) def test_setup_device_is_ready(self): self._test_setup_helper(True) def test_create_dhcp_port_raise_conflict(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) plugin.create_dhcp_port.return_value = None self.assertRaises(exceptions.Conflict, dh.setup_dhcp_port, fake_network) def test_create_dhcp_port_create_new(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) plugin.create_dhcp_port.return_value = fake_network.ports[0] dh.setup_dhcp_port(fake_network) plugin.assert_has_calls([ mock.call.create_dhcp_port( {'port': {'name': '', 'admin_state_up': True, 'network_id': fake_network.id, 'tenant_id': fake_network.tenant_id, 'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id}], 'device_id': mock.ANY}})]) def test_create_dhcp_port_update_add_subnet(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) fake_network_copy.subnets[1].enable_dhcp = True plugin.update_dhcp_port.return_value = fake_network.ports[0] dh.setup_dhcp_port(fake_network_copy) port_body = {'port': { 'network_id': fake_network.id, 'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id, 'ip_address': fake_fixed_ip1.ip_address}, {'subnet_id': fake_subnet2.id}]}} plugin.assert_has_calls([ mock.call.update_dhcp_port(fake_network_copy.ports[0].id, port_body)]) def test_update_dhcp_port_raises_conflict(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) fake_network_copy.subnets[1].enable_dhcp = True plugin.update_dhcp_port.return_value = None self.assertRaises(exceptions.Conflict, dh.setup_dhcp_port, fake_network_copy) def test_create_dhcp_port_no_update_or_create(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) dh.setup_dhcp_port(fake_network_copy) self.assertFalse(plugin.setup_dhcp_port.called) self.assertFalse(plugin.update_dhcp_port.called) def test_setup_dhcp_port_with_non_enable_dhcp_subnet(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) plugin.update_dhcp_port.return_value = fake_port1 self.assertEqual(fake_subnet1.id, dh.setup_dhcp_port(fake_network_copy).fixed_ips[0].subnet_id) def test_destroy(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')) with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls: mock_driver = mock.MagicMock() mock_driver.get_device_name.return_value = 'tap12345678-12' dvr_cls.return_value = mock_driver plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.destroy(fake_net, 'tap12345678-12') dvr_cls.assert_called_once_with(cfg.CONF) mock_driver.assert_has_calls( [mock.call.unplug('tap12345678-12', namespace='qdhcp-' + fake_net.id)]) plugin.assert_has_calls( [mock.call.release_dhcp_port(fake_net.id, mock.ANY)]) def test_destroy_with_none(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')) with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls: mock_driver = mock.MagicMock() mock_driver.get_device_name.return_value = 'tap12345678-12' dvr_cls.return_value = mock_driver plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.destroy(fake_net, None) dvr_cls.assert_called_once_with(cfg.CONF) plugin.assert_has_calls( [mock.call.release_dhcp_port(fake_net.id, mock.ANY)]) self.assertFalse(mock_driver.called) def test_get_interface_name(self): fake_net = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')) fake_port = dhcp.DictModel( dict(id='12345678-1234-aaaa-1234567890ab', mac_address='aa:bb:cc:dd:ee:ff')) with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls: mock_driver = mock.MagicMock() mock_driver.get_device_name.return_value = 'tap12345678-12' dvr_cls.return_value = mock_driver plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.get_interface_name(fake_net, fake_port) dvr_cls.assert_called_once_with(cfg.CONF) mock_driver.assert_has_calls( [mock.call.get_device_name(fake_port)]) self.assertEqual(len(plugin.mock_calls), 0) def test_get_device_id(self): fake_net = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ab', tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')) expected = ('dhcp1ae5f96c-c527-5079-82ea-371a01645457-12345678-1234-' '5678-1234567890ab') # the DHCP port name only contains the hostname and not the domain name local_hostname = cfg.CONF.host.split('.')[0] with mock.patch('uuid.uuid5') as uuid5: uuid5.return_value = '1ae5f96c-c527-5079-82ea-371a01645457' dh = dhcp.DeviceManager(cfg.CONF, None) self.assertEqual(dh.get_device_id(fake_net), expected) uuid5.assert_called_once_with(uuid.NAMESPACE_DNS, local_hostname) def test_update(self): # Try with namespaces and no metadata network cfg.CONF.set_override('enable_metadata_network', False) dh = dhcp.DeviceManager(cfg.CONF, None) dh._set_default_route = mock.Mock() network = mock.Mock() dh.update(network, 'ns-12345678-12') dh._set_default_route.assert_called_once_with(network, 'ns-12345678-12') # Meta data network enabled, don't interfere with its gateway. cfg.CONF.set_override('enable_metadata_network', True) dh = dhcp.DeviceManager(cfg.CONF, None) dh._set_default_route = mock.Mock() dh.update(FakeV4Network(), 'ns-12345678-12') self.assertTrue(dh._set_default_route.called) def test_set_default_route(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None # Basic one subnet with gateway. network = FakeV4Network() dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertFalse(device.route.delete_gateway.called) device.route.add_gateway.assert_called_once_with('192.168.0.1') def test_set_default_route_outside_subnet(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None # Basic one subnet with gateway outside the subnet. network = FakeV4NetworkOutsideGateway() dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertFalse(device.route.delete_gateway.called) device.route.add_route.assert_called_once_with('192.168.1.1', scope='link') device.route.add_gateway.assert_called_once_with('192.168.1.1') def test_set_default_route_no_subnet(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None network = FakeV4NetworkNoSubnet() network.namespace = 'qdhcp-1234' dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertFalse(device.route.delete_gateway.called) self.assertFalse(device.route.add_gateway.called) def test_set_default_route_no_subnet_delete_gateway(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = dict(gateway='192.168.0.1') network = FakeV4NetworkNoSubnet() network.namespace = 'qdhcp-1234' dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) device.route.delete_gateway.assert_called_once_with('192.168.0.1') self.assertFalse(device.route.add_gateway.called) def test_set_default_route_no_gateway(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = dict(gateway='192.168.0.1') network = FakeV4NetworkNoGateway() network.namespace = 'qdhcp-1234' dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) device.route.delete_gateway.assert_called_once_with('192.168.0.1') self.assertFalse(device.route.add_gateway.called) def test_set_default_route_do_nothing(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = dict(gateway='192.168.0.1') network = FakeV4Network() dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertFalse(device.route.delete_gateway.called) self.assertFalse(device.route.add_gateway.called) def test_set_default_route_change_gateway(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = dict(gateway='192.168.0.2') network = FakeV4Network() dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertFalse(device.route.delete_gateway.called) device.route.add_gateway.assert_called_once_with('192.168.0.1') def test_set_default_route_change_gateway_outside_subnet(self): dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.list_onlink_routes.return_value = ( [{'cidr': '192.168.2.1'}]) device.route.get_gateway.return_value = dict(gateway='192.168.2.1') network = FakeV4NetworkOutsideGateway() dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertEqual(device.route.list_onlink_routes.call_count, 2) self.assertFalse(device.route.delete_gateway.called) device.route.delete_route.assert_called_once_with('192.168.2.1', scope='link') device.route.add_route.assert_called_once_with('192.168.1.1', scope='link') device.route.add_gateway.assert_called_once_with('192.168.1.1') def test_set_default_route_two_subnets(self): # Try two subnets. Should set gateway from the first. dh = dhcp.DeviceManager(cfg.CONF, None) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None network = FakeV4Network() subnet2 = FakeV4Subnet() subnet2.gateway_ip = '192.168.1.1' network.subnets = [subnet2, FakeV4Subnet()] dh._set_default_route(network, 'tap-name') self.assertEqual(device.route.get_gateway.call_count, 1) self.assertFalse(device.route.delete_gateway.called) device.route.add_gateway.assert_called_once_with('192.168.1.1') class TestDictModel(base.BaseTestCase): def test_basic_dict(self): d = dict(a=1, b=2) m = dhcp.DictModel(d) self.assertEqual(m.a, 1) self.assertEqual(m.b, 2) def test_dict_has_sub_dict(self): d = dict(a=dict(b=2)) m = dhcp.DictModel(d) self.assertEqual(m.a.b, 2) def test_dict_contains_list(self): d = dict(a=[1, 2]) m = dhcp.DictModel(d) self.assertEqual(m.a, [1, 2]) def test_dict_contains_list_of_dicts(self): d = dict(a=[dict(b=2), dict(c=3)]) m = dhcp.DictModel(d) self.assertEqual(m.a[0].b, 2) self.assertEqual(m.a[1].c, 3)
# ================================================================================================== # Copyright 2014 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================================================================================== from collections import namedtuple import struct from .util import ( parent_path, ParsingError, read_bool, read_buffer, read_int_bool_int, read_int_long_int_long, read_long, read_number, read_string, StringTooLong, ) from .zookeeper import ( AUTH_XID, can_set_watch, DeserializationError, has_path, MultiHeader, OpCodes, PING_XID, read_opcode, read_path, req_type_to_str, SET_WATCHES_XID, ZK_VALID_PROTOCOL_VERSIONS, ZK_WRITE_OPS, ) class ClientMessageType(type): TYPES = {} OPCODE = None def __new__(cls, clsname, bases, dct): obj = super(ClientMessageType, cls).__new__(cls, clsname, bases, dct) if obj.OPCODE in cls.TYPES: raise ValueError("Duplicate class/opcode name: %s" % obj.OPCODE) else: if obj.OPCODE is not None: cls.TYPES[obj.OPCODE] = obj return obj @classmethod def get(cls, key, default=None): return cls.TYPES.get(key, default) class ClientMessage(ClientMessageType('ClientMessageType', (object,), {})): """ client and server are ipaddr:port - it could be IPv6 so deal with that """ __slots__ = ("size", "xid", "path", "client", "timestamp", "watch", "auth", "server") def __init__(self, size, xid, path, client, watch, server): self.size = size self.xid = xid self.path = intern(path) self.client = client self.server = server self.watch = watch self.timestamp = 0 # this will be set by caller later on self.auth = "" # ditto MAX_REQUEST_SIZE = 100 * 1024 * 1024 @classmethod def with_params(cls, xid, path, watch, data, offset, size, client, server): """ Build a ClientMessage with the given params, possibly parsing some more. This must be overridden by subclasses so the specific parameters to each subclass can be extracted. :param xid: the transaction id associated with this ClientMessage :param path: the path associated with this ClientMessage, if any :param watch: a boolean indicating if watch on the path should be set :param data: the remaining data of the associated data packet :param offset: the offset from which the data should be read :param size: the total size of this ClientMessage :param client: the ip:port generating this ClientMessage :returns: Returns an instance of the specific ClientMessage subclass. :raises DeserializationError: if parsing the ClientMessage fails. """ return cls(size, xid, path, client, watch, server) @classmethod def from_payload(cls, data, client, server): length, offset = read_number(data, 0) # Note: the C library doesn't include the length at the start if length >= cls.MAX_REQUEST_SIZE or length in (PING_XID, AUTH_XID, SET_WATCHES_XID): xid = length length = 0 elif length == 0: length = 0 return ConnectRequest.with_params(None, None, None, data, 0, length, client, server) elif length < 0: raise DeserializationError("Bad request length: %d" % (length)) else: offs_start = offset # if the xid is 0, it's a Connect so save the offset xid, offset = read_number(data, offset) if xid not in (PING_XID, AUTH_XID, SET_WATCHES_XID) and xid < 0: raise DeserializationError("Wrong XID: %d" % (xid)) elif xid in ZK_VALID_PROTOCOL_VERSIONS: try: return ConnectRequest.with_params(None, None, None, data, offs_start, length, client, server) except DeserializationError: pass opcode, offset = read_opcode(data, offset) path, offset = read_path(data, offset) if has_path(opcode) else ("", offset) length, offset = read_number(data, offset) if length == 0 else (length, offset) watch, offset = read_bool(data, offset) if can_set_watch(opcode) else (False, offset) handler = ClientMessageType.get(opcode, cls) return handler.with_params(xid, path, watch, data, offset, length, client, server) @property def name(self): return req_type_to_str(self.opcode) @property def ip(self): """ client is ipaddr:port (maybe IPv6) """ return self.client.rsplit(":", 1)[0] @property def port(self): """ client is ipaddr:port (maybe IPv6) """ p = self.client.rfind(":") return self.client[p + 1:] def parent_path(self, level): return parent_path(self.path, level) @property def is_write(self): return self.opcode in ZK_WRITE_OPS @property def is_ping(self): return self.xid == PING_XID @property def is_auth(self): return self.xid == AUTH_XID @property def is_close(self): return self.opcode == OpCodes.CLOSE @property def opcode(self): return self.OPCODE def __str__(self): return "%s(xid=%d, path=%s, watch=%s, size=%d, client=%s)\n" % ( self.name, self.xid, self.path, self.watch, self.size, self.client) # TODO: Connect, Ping and Auth should probably inherit from ClientMessage since # they are not properly requests "on the ZK Datatree". class Request(ClientMessage): pass class PingRequest(Request): OPCODE = OpCodes.PING def __init__(self, client, server): super(PingRequest, self).__init__(0, PING_XID, "", client, False, server) @classmethod def with_params(cls, xid, path, watch, data, offset, size, client, server): return cls(client, server) def __str__(self): return "%s(client=%s)\n" % (self.name, self.client) class ConnectRequest(Request): OPCODE = OpCodes.CONNECT def __init__(self, size, client, protocol, readonly, session, password, zxid, timeout, server): self.protocol = protocol self.readonly = readonly self.session = session self.password = password self.zxid = zxid self.timeout = timeout super(ConnectRequest, self).__init__(size, 0, "", client, False, server) @classmethod def with_params(cls, xid, path, watch, data, offset, size, client, server): try: (protocol, zxid, timeout, session), offset = read_int_long_int_long(data, offset) except struct.error: raise DeserializationError("Couldn't read connect request") if protocol not in ZK_VALID_PROTOCOL_VERSIONS: raise DeserializationError("Bad protocol version: %d" % (protocol)) password, offset = read_buffer(data, offset) readonly, offset = read_bool(data, offset) return cls(size, client, protocol, readonly, session, password, zxid, timeout, server) @property def name(self): return "%s%s" % ("Re" if self.is_reconnect else "", super(ConnectRequest, self).name) @property def is_reconnect(self): return int(self.session) != 0 def __str__(self): return "%s(ver=%s, zxid=%s, timeout=%s, session=0x%x, readonly=%s, client=%s)\n" % ( self.name, self.protocol, self.zxid, self.timeout, self.session, self.readonly, self.client) class SetAuthRequest(Request): OPCODE = OpCodes.SETAUTH def __init__(self, auth_type, scheme, credential, size, client, server): self.auth_type = auth_type self.scheme = scheme self.credential = credential # HACK: use part of the cred as the path so we can track auth requests path = "/%s" % (self.credential if self.credential else "") super(SetAuthRequest, self).__init__(size, AUTH_XID, path, client, False, server) @classmethod def with_params(cls, xid, path, watch, data, offset, size, client, server): auth_type, offset = read_number(data, offset) scheme = "very-long-scheme" credential = "very-long-credential" try: scheme, offset = read_string(data, offset) credential, offset = read_string(data, offset) except StringTooLong: pass return cls(auth_type, intern(scheme), intern(credential), size, client, server) def __str__(self): return "%s(type=%s, scheme=%s, credential=%s, client=%s)\n" % ( self.name, self.auth_type, self.scheme, self.credential, self.client) class CloseRequest(Request): OPCODE = OpCodes.CLOSE def __init__(self, size, xid, client, server): super(CloseRequest, self).__init__(size, xid, "", client, False, server) @classmethod def with_params(cls, xid, path, watch, data, offset, size, client, server): return cls(size, xid, client, server) def __str__(self): return "%s(client=%s, server=%s)\n" % (self.name, self.client, self.server) class Acl(namedtuple("Acl", "perm scheme cred")): pass class CreateRequest(Request): OPCODE = OpCodes.CREATE MAX_ACLS = 10 MAX_PKT_SIZE = 8192 def __init__(self, size, xid, path, client, watch, ephemeral, sequence, acls, server): super(CreateRequest, self).__init__(size, xid, path, client, watch, server) self.ephemeral = ephemeral self.sequence = sequence self.acls = acls @classmethod def with_params(cls, xid, path, watch, data, offset, size, client, server): acls = [] ephemeral = False sequence = False data_length, offset = read_number(data, offset) if data_length >= 0 and data_length < cls.MAX_PKT_SIZE: offset += data_length acls_count, offset = read_number(data, offset) if acls_count < cls.MAX_ACLS: bad_acls = False for i in range(0, acls_count): perms, offset = read_number(data, offset) try: scheme, offset = read_string(data, offset) cred, offset = read_string(data, offset) except StringTooLong: bad_acls = True break acls.append(Acl(perms, scheme, cred)) if not bad_acls: flags, offset = read_number(data, offset) ephemeral = flags & 0x1 == 1 sequence = flags & 0x2 == 2 return cls(size, xid, path, client, watch, ephemeral, sequence, acls, server) @property def name(self): return "CreateEphemeralRequest" if self.ephemeral else self.__class__.__name__ def __str__(self): return "%s(size=%d, xid=%d, path=%s, ephemeral=%s, sequence=%s, client=%s)\n" % ( self.name, self.size, self.xid, self.path, self.ephemeral, self.sequence, self.client) class Create2Request(CreateRequest): OPCODE = OpCodes.CREATE2 class ReconfigRequest(Request): OPCODE = OpCodes.RECONFIG def __init__(self, size, xid, client, server, joiningServers, leavingServers, newMembers): super(ReconfigRequest, self).__init__(size, xid, "", client, False, server) self.joiningServers = joiningServers self.leavingServers = leavingServers self.newMembers = newMembers @classmethod def with_params(cls, xid, path, watch, data, offset, size, client, server): joiningServers, offset = read_string(data, offset) offset = offset + 4 if joiningServers == "" else offset leavingServers, offset = read_string(data, offset) offset = offset + 4 if leavingServers == "" else offset newMembers, offset = read_string(data, offset) return cls(size, xid, client, server, joiningServers, leavingServers, newMembers) def __str__(self): return "%s(xid=%d, joiningServers=%s, leavingServers=%s, newMembers=%s)\n" % ( self.name, self.xid, self.joiningServers, self.leavingServers, self.newMembers) class DeleteRequest(Request): OPCODE = OpCodes.DELETE class SetWatchesRequest(Request): OPCODE = OpCodes.SETWATCHES MAX_WATCHES = 100 class TooManyWatches(ParsingError): pass def __init__(self, size, xid, path, client, relzxid, data, exist, child, server): super(SetWatchesRequest, self).__init__(size, xid, path, client, True, server) self.relzxid = relzxid self.data = data self.exist = exist self.child = child @classmethod def with_params(cls, xid, path, watch, data, offset, size, client, server): relzxid, offset = read_long(data, offset) dataw = existw = childw = [] try: dataw, offset = cls.read_strings(data, offset) existw, offset = cls.read_strings(data, offset) childw, offset = cls.read_strings(data, offset) except ParsingError: pass return cls(size, xid, path, client, relzxid, dataw, existw, childw, server) @classmethod def read_strings(cls, data, offset): """ reads a list<str> Note: this might return early if a very long string is found. So the returned offset might not be the actual offset. """ strs = [] num_strs, offset = read_number(data, offset) if num_strs > cls.MAX_WATCHES: raise cls.TooManyWatches() for i in range(0, num_strs): s, offset = read_string(data, offset) strs.append(s) return (strs, offset) def __str__(self): return "%s(relzxid=%d, data=%s, exist=%s, child=%s, client=%s)\n" % ( self.name, self.relzxid, self.data, self.exist, self.child, self.client) class MultiRequest(Request): OPCODE = OpCodes.MULTI def __init__(self, size, xid, client, first_header, server, path): super(MultiRequest, self).__init__(size, xid, path, client, True, server) self.headers = [first_header] @classmethod def with_params(cls, xid, path, watch, data, offset, size, client, server): (first_opcode, done, err), offset = read_int_bool_int(data, offset) # get path from 1st op path = "/" if has_path(first_opcode): path, _ = read_path(data, offset) return cls(size, xid, client, MultiHeader(first_opcode, done, err), server, path) def __str__(self): return "%s(%s, path=%s, client=%s)\n" % (self.name, self.headers[0], self.path, self.client) class GetChildrenRequest(Request): OPCODE = OpCodes.GETCHILDREN class GetChildren2Request(Request): OPCODE = OpCodes.GETCHILDREN2 class Check(Request): OPCODE = OpCodes.CHECK class GetDataRequest(Request): OPCODE = OpCodes.GETDATA class ExistsRequest(Request): OPCODE = OpCodes.EXISTS class SyncRequest(Request): OPCODE = OpCodes.SYNC class SetDataRequest(Request): OPCODE = OpCodes.SETDATA class GetAclRequest(Request): OPCODE = OpCodes.GETACL class SetAclRequest(Request): OPCODE = OpCodes.SETACL
""" Script to test database capabilities and the DB-API interface for functionality and memory leaks. Adapted from a script by M-A Lemburg. """ import sys from time import time try: import unittest2 as unittest except ImportError: import unittest PY2 = sys.version_info[0] == 2 class DatabaseTest(unittest.TestCase): db_module = None connect_args = () connect_kwargs = dict(use_unicode=True, charset="utf8mb4", binary_prefix=True) create_table_extra = "ENGINE=INNODB CHARACTER SET UTF8MB4" rows = 10 debug = False def setUp(self): db = self.db_module.connect(*self.connect_args, **self.connect_kwargs) self.connection = db self.cursor = db.cursor() self.BLOBText = ''.join([chr(i) for i in range(256)] * 100); if PY2: self.BLOBUText = unicode().join(unichr(i) for i in range(16834)) else: self.BLOBUText = "".join(chr(i) for i in range(16834)) data = bytearray(range(256)) * 16 self.BLOBBinary = self.db_module.Binary(data) leak_test = True def tearDown(self): if self.leak_test: import gc del self.cursor orphans = gc.collect() self.assertFalse(orphans, "%d orphaned objects found after deleting cursor" % orphans) del self.connection orphans = gc.collect() self.assertFalse(orphans, "%d orphaned objects found after deleting connection" % orphans) def table_exists(self, name): try: self.cursor.execute('select * from %s where 1=0' % name) except Exception: return False else: return True def quote_identifier(self, ident): return '"%s"' % ident def new_table_name(self): i = id(self.cursor) while True: name = self.quote_identifier('tb%08x' % i) if not self.table_exists(name): return name i = i + 1 def create_table(self, columndefs): """ Create a table using a list of column definitions given in columndefs. generator must be a function taking arguments (row_number, col_number) returning a suitable data object for insertion into the table. """ self.table = self.new_table_name() self.cursor.execute('CREATE TABLE %s (%s) %s' % (self.table, ',\n'.join(columndefs), self.create_table_extra)) def check_data_integrity(self, columndefs, generator): # insert self.create_table(columndefs) insert_statement = ('INSERT INTO %s VALUES (%s)' % (self.table, ','.join(['%s'] * len(columndefs)))) data = [ [ generator(i,j) for j in range(len(columndefs)) ] for i in range(self.rows) ] if self.debug: print(data) self.cursor.executemany(insert_statement, data) self.connection.commit() # verify self.cursor.execute('select * from %s' % self.table) l = self.cursor.fetchall() if self.debug: print(l) self.assertEqual(len(l), self.rows) try: for i in range(self.rows): for j in range(len(columndefs)): self.assertEqual(l[i][j], generator(i,j)) finally: if not self.debug: self.cursor.execute('drop table %s' % (self.table)) def test_transactions(self): columndefs = ( 'col1 INT', 'col2 VARCHAR(255)') def generator(row, col): if col == 0: return row else: return ('%i' % (row%10))*255 self.create_table(columndefs) insert_statement = ('INSERT INTO %s VALUES (%s)' % (self.table, ','.join(['%s'] * len(columndefs)))) data = [ [ generator(i,j) for j in range(len(columndefs)) ] for i in range(self.rows) ] self.cursor.executemany(insert_statement, data) # verify self.connection.commit() self.cursor.execute('select * from %s' % self.table) l = self.cursor.fetchall() self.assertEqual(len(l), self.rows) for i in range(self.rows): for j in range(len(columndefs)): self.assertEqual(l[i][j], generator(i,j)) delete_statement = 'delete from %s where col1=%%s' % self.table self.cursor.execute(delete_statement, (0,)) self.cursor.execute('select col1 from %s where col1=%s' % \ (self.table, 0)) l = self.cursor.fetchall() self.assertFalse(l, "DELETE didn't work") self.connection.rollback() self.cursor.execute('select col1 from %s where col1=%s' % \ (self.table, 0)) l = self.cursor.fetchall() self.assertTrue(len(l) == 1, "ROLLBACK didn't work") self.cursor.execute('drop table %s' % (self.table)) def test_truncation(self): columndefs = ( 'col1 INT', 'col2 VARCHAR(255)') def generator(row, col): if col == 0: return row else: return ('%i' % (row%10))*((255-self.rows//2)+row) self.create_table(columndefs) insert_statement = ('INSERT INTO %s VALUES (%s)' % (self.table, ','.join(['%s'] * len(columndefs)))) try: self.cursor.execute(insert_statement, (0, '0'*256)) except Warning: if self.debug: print(self.cursor.messages) except self.connection.DataError: pass else: self.fail("Over-long column did not generate warnings/exception with single insert") self.connection.rollback() try: for i in range(self.rows): data = [] for j in range(len(columndefs)): data.append(generator(i,j)) self.cursor.execute(insert_statement,tuple(data)) except Warning: if self.debug: print(self.cursor.messages) except self.connection.DataError: pass else: self.fail("Over-long columns did not generate warnings/exception with execute()") self.connection.rollback() try: data = [ [ generator(i,j) for j in range(len(columndefs)) ] for i in range(self.rows) ] self.cursor.executemany(insert_statement, data) except Warning: if self.debug: print(self.cursor.messages) except self.connection.DataError: pass else: self.fail("Over-long columns did not generate warnings/exception with executemany()") self.connection.rollback() self.cursor.execute('drop table %s' % (self.table)) def test_CHAR(self): # Character data def generator(row,col): return ('%i' % ((row+col) % 10)) * 255 self.check_data_integrity( ('col1 char(255)','col2 char(255)'), generator) def test_INT(self): # Number data def generator(row,col): return row*row self.check_data_integrity( ('col1 INT',), generator) def test_DECIMAL(self): # DECIMAL def generator(row,col): from decimal import Decimal return Decimal("%d.%02d" % (row, col)) self.check_data_integrity( ('col1 DECIMAL(5,2)',), generator) def test_DATE(self): ticks = time() def generator(row,col): return self.db_module.DateFromTicks(ticks+row*86400-col*1313) self.check_data_integrity( ('col1 DATE',), generator) def test_TIME(self): ticks = time() def generator(row,col): return self.db_module.TimeFromTicks(ticks+row*86400-col*1313) self.check_data_integrity( ('col1 TIME',), generator) def test_DATETIME(self): ticks = time() def generator(row,col): return self.db_module.TimestampFromTicks(ticks+row*86400-col*1313) self.check_data_integrity( ('col1 DATETIME',), generator) def test_TIMESTAMP(self): ticks = time() def generator(row,col): return self.db_module.TimestampFromTicks(ticks+row*86400-col*1313) self.check_data_integrity( ('col1 TIMESTAMP',), generator) def test_fractional_TIMESTAMP(self): ticks = time() def generator(row,col): return self.db_module.TimestampFromTicks(ticks+row*86400-col*1313+row*0.7*col/3.0) self.check_data_integrity( ('col1 TIMESTAMP',), generator) def test_LONG(self): def generator(row,col): if col == 0: return row else: return self.BLOBUText # 'BLOB Text ' * 1024 self.check_data_integrity( ('col1 INT', 'col2 LONG'), generator) def test_TEXT(self): def generator(row,col): if col == 0: return row else: return self.BLOBUText[:5192] # 'BLOB Text ' * 1024 self.check_data_integrity( ('col1 INT', 'col2 TEXT'), generator) def test_LONG_BYTE(self): def generator(row,col): if col == 0: return row else: return self.BLOBBinary # 'BLOB\000Binary ' * 1024 self.check_data_integrity( ('col1 INT','col2 LONG BYTE'), generator) def test_BLOB(self): def generator(row,col): if col == 0: return row else: return self.BLOBBinary # 'BLOB\000Binary ' * 1024 self.check_data_integrity( ('col1 INT','col2 BLOB'), generator)
from collections import MutableMapping from past.builtins import basestring from .registers import * class CacheWrap(MutableMapping, object): ''' A class designed to immitate the contents it holds with a capability to reload, rebuild, destroy, or save it's contents without disrupting any references to the cache object. ''' CALLBACK_NAMES = ['loader', 'async_presaver', 'async_saver', 'async_cleaner', 'saver', 'builder', 'deleter', 'pre_processor', 'post_processor', 'validator'] def __init__(self, cache_name, contents=None, dependents=None, cache_manager=None, async=False, async_timeout=60, save_on_blank_cache=True, **kwargs): if cache_manager: self.manager = cache_manager else: from .cacher import get_cache_manager # Import here to avoid circular import self.manager = get_cache_manager() self.contents = contents self.name = cache_name self.dependents = set([self._convert_dependent_to_name(d) for d in dependents] if dependents else []) self.async = async self.async_timeout = async_timeout self.save_on_blank = save_on_blank_cache for name in CacheWrap.CALLBACK_NAMES: setattr(self, name, kwargs[name] if name in kwargs else getattr(self, name, None)) if not self.manager.cache_registered(self.name): self.manager.register_cache(self.name, contents=self) if self.contents is None: self.load_or_build() def __del__(self): if not hasattr(self, 'delete_triggered'): # Avoid infinite recursion if dependent objects trigger delete chains self.delete_triggered = True self.save() if self.name in self.manager.cache_by_name: del self.manager.cache_by_name[self.name] def __enter__(self): return self def __exit__(self, type, value, traceback): self.save() def __getattr__(self, name): ''' If a method or attribute is missing, use the content's attributes ''' for getter in ['__getattribute__', '__getattr__']: if hasattr(self.contents, getter): try: return getattr(self.contents, getter)(name) except AttributeError: pass try: return getattr(self.contents, name) except AttributeError: pass raise AttributeError("'{}' and '{}' objects have no attribute '{}'".format(self.__class__.__name__, self.contents.__class__.__name__, name)) def _check_contents_present(self): if self.contents is None: raise AttributeError("No cache contents defined for '{}'".format(self.name)) def __contains__(self, *args, **kwargs): if self.contents is None: return False return self.contents.__contains__(*args, **kwargs) def __getitem__(self, *args, **kwargs): self._check_contents_present() return self.contents.__getitem__(*args, **kwargs) def __setitem__(self, *args, **kwargs): self._check_contents_present() return self.contents.__setitem__(*args, **kwargs) def __delitem__(self, *args, **kwargs): self._check_contents_present() return self.contents.__delitem__(*args, **kwargs) def __iter__(self): self._check_contents_present() return self.contents.__iter__() def __len__(self): self._check_contents_present() return self.contents.__len__() def __str__(self): return "{}<{}>".format(self.__class__.__name__, self.contents.__str__()) def __repr__(self): return "{}<{}>".format(self.__class__.__name__, self.contents.__repr__()) def _manager_pickle_loader(self, name): return pickle_loader(self.manager.cache_directory, self.name) def _manager_pickle_saver(self, name, contents): return pickle_saver(self.manager.cache_directory, name, contents) def _manager_pickle_async_presaver(self, name, contents, extensions): return pickle_pre_saver(self.manager.cache_directory, name, contents, extensions) def _manager_pickle_async_mover(self, name, contents, extensions): return pickle_mover(self.manager.cache_directory, name, contents, extensions) def _manager_pickle_async_cleaner(self, name, extensions): return pickle_cleaner(self.manager.cache_directory, name, extensions) def _manager_pickle_deleter(self, name): return pickle_deleter(self.manager.cache_directory, name) def _retrieve_dependent_caches(self, seen_dependents=None): for dependent in self.dependents: if seen_dependents is None or dependent not in seen_dependents: cache = self.manager.retrieve_cache(dependent) if cache is not None: yield cache def _add_seen_cache(self, seen_caches): if seen_caches is None: seen_caches = set() seen_caches.add(self.name) return seen_caches def _convert_dependent_to_name(self, dependent): return dependent if isinstance(dependent, basestring) else dependent.name def _pre_process(self, contents): if self.pre_processor: proc_contents = self.pre_processor(contents) if proc_contents is not None: contents = proc_contents return contents def _post_process(self, contents): if self.post_processor: proc_contents = self.post_processor(contents) if proc_contents is not None: contents = proc_contents return contents def _build(self): if not self.builder: self.contents = self._post_process(dict_loader()) else: self.contents = self._post_process(self.builder(self.name)) self.save() return self.contents def _async_save(self, name, contents): fork_content_save(name, contents, self.async_presaver, self.async_saver, self.async_cleaner, self.async_timeout, self.manager.async_pid_cache) def load(self, apply_to_dependents=False, seen_caches=None): if seen_caches and self.name in seen_caches: return seen_caches = self._add_seen_cache(seen_caches) if apply_to_dependents: for dependent in self._retrieve_dependent_caches(seen_caches): dependent.load(apply_to_dependents, seen_caches) if self.loader: self.contents = self.loader(self.name) if self.contents is None: self.contents = None elif self.validator: try: if not self.validator(self.contents): self.contents = None except: self.contents = None if self.contents is not None: self.contents = self._post_process(self.contents) else: self.contents = None return self.contents def save(self, apply_to_dependents=False, seen_caches=None): if seen_caches and self.name in seen_caches: return seen_caches = self._add_seen_cache(seen_caches) if apply_to_dependents: for dependent in self._retrieve_dependent_caches(seen_caches): dependent.save(apply_to_dependents, seen_caches) contents = self._pre_process(self.contents) if not self.save_on_blank and not contents: return contents # Determine if we're doing an async save or not saver = self._async_save if self.async else self.saver return (saver and saver(self.name, contents)) or contents def invalidate(self, apply_to_dependents=True, seen_caches=None): return self.load(apply_to_dependents, seen_caches) def delete_saved_content(self, apply_to_dependents=True, seen_caches=None): ''' Does NOT delete memory cache -- use invalidate_and_rebuild to delete both ''' if seen_caches and self.name in seen_caches: return seen_caches = self._add_seen_cache(seen_caches) if apply_to_dependents: for dependent in self._retrieve_dependent_caches(seen_caches): dependent.delete_saved_content(apply_to_dependents, seen_caches) if self.deleter: self.deleter(self.name) def invalidate_and_rebuild(self, apply_to_dependents=True, seen_caches=None): if seen_caches and self.name in seen_caches: return seen_caches = self._add_seen_cache(seen_caches) self.invalidate(False) self.delete_saved_content(False) self._build() if apply_to_dependents: for dependent in self._retrieve_dependent_caches(seen_caches): dependent.invalidate_and_rebuild(apply_to_dependents, seen_caches) def load_or_build(self, apply_to_dependents=True, seen_caches=None): if seen_caches and self.name in seen_caches: return seen_caches = self._add_seen_cache(seen_caches) if apply_to_dependents: for dependent in self._retrieve_dependent_caches(seen_caches): dependent.load_or_build(apply_to_dependents, seen_caches) loaded = self.load() is not None if not loaded: self._build() return loaded, self.contents def add_dependent(self, dependent): self.dependents.add(dependent) class NonPersistentCache(CacheWrap): ''' Currently CacheWrap acts like a NonPersistentCache by default, but it might change in the future. ''' def __init__(self, cache_name, **kwargs): CacheWrap.__init__(self, cache_name, **kwargs) def loader(self, *args, **kwargs): return {} def builder(self, *args, **kwargs): return {} class LazyBuildNonPersistentCache(NonPersistentCache): ''' Emulates a NonPersistence cache with a lazy builder that loads data on demand. ''' def __init__(self, *args, **kwargs): builder = kwargs.get('builder') if builder: del kwargs['builder'] NonPersistentCache.__init__(self, *args, **kwargs) if builder: self.contents = None setattr(self, 'builder', builder) self.lazy_loading = False def _check_contents_present(self): if self.contents is None: self._build() if self.contents is None: raise AttributeError("No cache contents defined for '{}'".format(self.name)) def loader(self, *args, **kwargs): return None def __contains__(self, *args, **kwargs): if self.contents is None: self._build() return NonPersistentCache.__contains__(self, *args, **kwargs) def __getattr__(self, name): if object.__getattribute__(self, 'contents') is None and not object.__getattribute__(self, 'lazy_loading'): self.lazy_loading = True self._build() self.lazy_loading = False return NonPersistentCache.__getattr__(self, name) def __str__(self): if self.contents is None: self._build() return NonPersistentCache.__str__(self) def __repr__(self): if self.contents is None: self._build() return NonPersistentCache.__repr__(self) class PersistentCache(CacheWrap): ''' A persistent cache which saves and loads from pickle files. ''' def __init__(self, cache_name, **kwargs): CacheWrap.__init__(self, cache_name, **kwargs) def loader(self, name): return self._manager_pickle_loader(name) def saver(self, name, contents): return self._manager_pickle_saver(name, contents) def deleter(self, name): return self._manager_pickle_deleter(name) def async_presaver(self, name, contents, extensions): return self._manager_pickle_async_presaver(name, contents, extensions) def async_saver(self, name, contents, extensions): return self._manager_pickle_async_mover(name, contents, extensions) def async_cleaner(self, name, extensions): return self._manager_pickle_async_cleaner(name, extensions)
############################################################################ ## ## Copyright (C) 2006-2007 University of Utah. All rights reserved. ## ## This file is part of VisTrails. ## ## This file may be used under the terms of the GNU General Public ## License version 2.0 as published by the Free Software Foundation ## and appearing in the file LICENSE.GPL included in the packaging of ## this file. Please review the following to ensure GNU General Public ## Licensing requirements will be met: ## http://www.opensource.org/licenses/gpl-license.php ## ## If you are unsure which license is appropriate for your use (for ## instance, you are interested in developing a commercial derivative ## of VisTrails), please contact us at contact@vistrails.org. ## ## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE ## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. ## ############################################################################ """This package defines a set of methods that perform the command-line tools necesary for the task designed for the first Provenance Challenge: http://twiki.ipaw.info This VisTrails package requires three software packages to be installed: AIR - Automated Image Registration: http://bishopw.loni.ucla.edu/AIR5/ FSL - http://www.fmrib.ox.ac.uk/fsl/ netpbm - http://netpbm.sourceforge.net/ """ from core.modules.vistrails_module import Module, ModuleError, new_module import core.modules import core.modules.basic_modules import core.modules.module_registry import core.utils from core.system import list2cmdline import os import os.path import stat import subprocess version = '0.9.0' name = 'Provenance Challenge' identifier = 'edu.utah.sci.vistrails.provenance' ################################################################################ global_airpath = "" global_fslpath = "" global_netpbmpath = "" class ProvenanceChallenge(Module): """ProvenanceChallenge is the base Module for all Modules in the provenance package. It simply define helper methods for subclasses.""" __quiet = True __programsQuiet = True def air_cmd_line(self, cmd, *params): """Runs a command-line command for the AIR tools.""" return list2cmdline([global_airpath + cmd] + list(params)) def fsl_cmd_line(self, cmd, *params): """Runs a command-line command for the FSL tools.""" return ('FSLOUTPUTTYPE=NIFTI_GZ ' + list2cmdline([global_fslpath + cmd] + list(params))) def run(self, cmd): if not self.__quiet: print cmd if self.__programsQuiet: cmd = '(' + cmd + ') 2>&1 >/dev/null' r = os.system(cmd) if r != 0: raise ModuleError(self, "system call failed: '%s'" % cmd) class AIRHeaderFile(core.modules.basic_modules.File): """AIRHeaderFile subclasses File to annotate the execution with header file information for later querying.""" def get_header_annotations(self): """Returns the header information for the file using the AIR scanheader tool.""" process = subprocess.Popen(global_airpath + 'scanheader ' + self.name, shell=True, stdout=subprocess.PIPE) if process.wait() != 0: raise ModuleError(self, "Could not open header file " + self.name) result = {} lines = core.utils.no_interrupt(process.stdout.readlines) for l in lines: l = l[:-1] if not l: continue entries = l.split('=') if len(entries) != 2: raise ModuleError(self, "Error parsing line '%s' of header %s" % (l[:-1], self.name)) result[entries[0]] = entries[1] return result def compute(self): core.modules.basic_modules.File.compute(self) d = self.get_header_annotations() self.annotate(d) class AlignWarp(ProvenanceChallenge): """AlignWarp executes the AIR warping tool on the input.""" def compute(self): image = self.get_input("image") ref = self.get_input("reference") model = self.get_input("model") o = self.interpreter.filePool.create_file(suffix='.warp') cmd = self.air_cmd_line('align_warp', image.name, ref.name, o.name, '-m', str(model), '-q') self.run(cmd) self.set_output("output", o) class Reslice(ProvenanceChallenge): """AlignWarp executes the AIR reslicing tool on the input.""" def compute(self): warp = self.get_input("warp") o = self.interpreter.filePool.create_file() cmd = self.air_cmd_line('reslice', warp.name, o.name) self.run(cmd) self.set_output("output", o) class SoftMean(ProvenanceChallenge): """SoftMean executes the AIR softmean averaging tool on the input.""" def compute(self): imageList = self.get_input("imageList") o = self.interpreter.filePool.create_file(suffix='.hdr') cmd = self.air_cmd_line('softmean', o.name, 'y', 'null', *[f.name for f in imageList]) self.run(cmd) self.set_output('output', o) class Slicer(ProvenanceChallenge): """Slicer executes the FSL slicer tool on the input.""" def compute(self): cmd = ['slicer'] i = self.get_input("input") cmd.append(i.name) if self.has_input("slice_x"): cmd.append('-x') cmd.append(str(self.get_input("slice_x"))) elif self.has_input("slice_y"): cmd.append('-y') cmd.append(str(self.get_input("slice_y"))) elif self.has_input("slice_z"): cmd.append('-z') cmd.append(str(self.get_input("slice_z"))) o = self.interpreter.filePool.create_file(suffix='.pgm') cmd.append(o.name) self.run(self.fsl_cmd_line(*cmd)) self.set_output('output', o) class PGMToPPM(ProvenanceChallenge): """PGMToPPM executes the netpbm pgmtoppm tool on the input.""" def compute(self): cmd = ['pgmtoppm', 'white'] i = self.get_input("input") cmd.append(i.name) o = self.interpreter.filePool.create_file(suffix='.ppm') cmd.append(' >') cmd.append(o.name) self.run(list2cmdline(cmd)) self.set_output('output', o) class PNMToJpeg(ProvenanceChallenge): """PGMToPPM executes the netpbm pnmtojpeg tool on the input.""" def compute(self): cmd = ['pnmtojpeg'] i = self.get_input("input") cmd.append(i.name) o = self.interpreter.filePool.create_file(suffix='.jpg') cmd.append(' >') cmd.append(o.name) self.run(list2cmdline(cmd)) self.set_output('output', o) ################################################################################ def checkProgram(fileName): return os.path.isfile(fileName) and os.stat(fileName) & 005 def initialize(airpath=None, fslpath=None, netpbmpath=None, *args, **kwargs): print "Initializing Provenance Challenge Package" if not airpath: print "airpath not specified or incorrect: Will assume AIR tools are on the path" else: print "Will use AIR tools from ", airpath global global_airpath global_airpath = airpath + '/' if not fslpath: print "fslpath not specified or incorrect: Will assume fsl tools are on the path" else: print "Will use FSL tools from ", fslpath global global_fslpath global_fslpath = fslpath + '/' if not netpbmpath: print "netpbmpath not specified or incorrect: Will assume netpbm tools are on the path" else: print "Will use netpbm tools from ", netpbmpath global global_netpbmpath global_netpbmpath = netpbmpath + '/' reg = core.modules.module_registry basic = core.modules.basic_modules reg.add_module(ProvenanceChallenge) reg.add_module(AlignWarp) reg.add_input_port(AlignWarp, "image", (basic.File, 'the image file to be deformed')) reg.add_input_port(AlignWarp, "image_header", (basic.File, 'the header of the image to be deformed')) reg.add_input_port(AlignWarp, "reference", (basic.File, 'the reference image')) reg.add_input_port(AlignWarp, "reference_header", (basic.File, 'the header of the reference image')) reg.add_input_port(AlignWarp, "model", (basic.Integer, 'the deformation model')) reg.add_output_port(AlignWarp, "output", (basic.File, 'the output deformation')) reg.add_module(Reslice) reg.add_input_port(Reslice, "warp", (basic.File, 'the warping to be resliced')) reg.add_output_port(Reslice, "output", (basic.File, 'the new slice')) reg.add_module(SoftMean) reg.add_input_port(SoftMean, "imageList", (basic.List, 'image files')) reg.add_output_port(SoftMean, "output", (basic.File, 'average image')) reg.add_module(Slicer) reg.add_input_port(Slicer, "input", (basic.File, 'the input file to be sliced')) reg.add_input_port(Slicer, "slice_x", (basic.Float, 'sagittal slice with given value')) reg.add_input_port(Slicer, "slice_y", (basic.Float, 'coronal slice with given value')) reg.add_input_port(Slicer, "slice_z", (basic.Float, 'axial slice with given value')) reg.add_output_port(Slicer, "output", (basic.File, 'slice output')) reg.add_module(PGMToPPM) reg.add_input_port(PGMToPPM, "input", (basic.File, "input")) reg.add_output_port(PGMToPPM, "output", (basic.File, "output")) reg.add_module(PNMToJpeg) reg.add_input_port(PNMToJpeg, "input", (basic.File, "input")) reg.add_output_port(PNMToJpeg, "output", (basic.File, "output")) reg.add_module(AIRHeaderFile)
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Sampling functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.training import input as input_ops from tensorflow.python.training import queue_runner __all__ = ['rejection_sample', 'stratified_sample',] def rejection_sample(tensors, accept_prob_fn, batch_size, queue_threads=1, enqueue_many=False, prebatch_capacity=16, prebatch_threads=1, runtime_checks=False, name=None): """Stochastically creates batches by rejection sampling. Each list of non-batched tensors is evaluated by `accept_prob_fn`, to produce a scalar tensor between 0 and 1. This tensor corresponds to the probability of being accepted. When `batch_size` tensor groups have been accepted, the batch queue will return a mini-batch. Args: tensors: List of tensors for data. All tensors are either one item or a batch, according to enqueue_many. accept_prob_fn: A python lambda that takes a non-batch tensor from each item in `tensors`, and produces a scalar tensor. batch_size: Size of batch to be returned. queue_threads: The number of threads for the queue that will hold the final batch. enqueue_many: Bool. If true, interpret input tensors as having a batch dimension. prebatch_capacity: Capacity for the large queue that is used to convert batched tensors to single examples. prebatch_threads: Number of threads for the large queue that is used to convert batched tensors to single examples. runtime_checks: Bool. If true, insert runtime checks on the output of `accept_prob_fn`. Using `True` might have a performance impact. name: Optional prefix for ops created by this function. Raises: ValueError: enqueue_many is True and labels doesn't have a batch dimension, or if enqueue_many is False and labels isn't a scalar. ValueError: enqueue_many is True, and batch dimension on data and labels don't match. ValueError: if a zero initial probability class has a nonzero target probability. Returns: A list of tensors of the same length as `tensors`, with batch dimension `batch_size`. Example: # Get tensor for a single data and label example. data, label = data_provider.Get(['data', 'label']) # Get stratified batch according to data tensor. accept_prob_fn = lambda x: (tf.tanh(x[0]) + 1) / 2 data_batch = tf.contrib.training.rejection_sample( [data, label], accept_prob_fn, 16) # Run batch through network. ... """ with variable_scope.variable_scope(name, 'rejection_sample', tensors): tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensors) # Reduce the case of a batched example to that of a batch of a single # example by taking a batch of size one. if enqueue_many: # Validate that batch dimension of the input is consistent. tensor_list = _verify_data_inputs(tensor_list) # Make a single queue to hold input examples. Reshape output so examples # don't have singleton batch dimension. batched = input_ops.batch(tensor_list, batch_size=1, num_threads=prebatch_threads, capacity=prebatch_capacity, enqueue_many=True) tensor_list = [array_ops.squeeze(x, [0]) for x in batched] # Set up a queue containing batches that have the distribution. cur_prob = accept_prob_fn(tensor_list) if runtime_checks: cur_prob = array_ops.identity(control_flow_ops.with_dependencies( [check_ops.assert_less_equal(0.0, cur_prob), check_ops.assert_less_equal(cur_prob, 1.0)], cur_prob), name='prob_with_checks') keep_input = random_ops.random_uniform([]) < cur_prob return _conditional_batch( tensor_list, keep_input, batch_size, num_threads=queue_threads) def stratified_sample(tensors, labels, target_probs, batch_size, init_probs=None, enqueue_many=False, queue_capacity=16, threads_per_queue=1, name=None): """Stochastically creates batches based on per-class probabilities. This method discards examples. Internally, it creates one queue to amortize the cost of disk reads, and one queue to hold the properly-proportioned batch. Args: tensors: List of tensors for data. All tensors are either one item or a batch, according to enqueue_many. labels: Tensor for label of data. Label is a single integer or a batch, depending on enqueue_many. It is not a one-hot vector. target_probs: Target class proportions in batch. An object whose type has a registered Tensor conversion function. batch_size: Size of batch to be returned. init_probs: Class proportions in the data. An object whose type has a registered Tensor conversion function, or `None` for estimating the initial distribution. enqueue_many: Bool. If true, interpret input tensors as having a batch dimension. queue_capacity: Capacity of the large queue that holds input examples. threads_per_queue: Number of threads for the large queue that holds input examples and for the final queue with the proper class proportions. name: Optional prefix for ops created by this function. Raises: ValueError: enqueue_many is True and labels doesn't have a batch dimension, or if enqueue_many is False and labels isn't a scalar. ValueError: enqueue_many is True, and batch dimension on data and labels don't match. ValueError: if probs don't sum to one. ValueError: if a zero initial probability class has a nonzero target probability. TFAssertion: if labels aren't integers in [0, num classes). Returns: (data_batch, label_batch), where data_batch is a list of tensors of the same length as `tensors` Example: # Get tensor for a single data and label example. data, label = data_provider.Get(['data', 'label']) # Get stratified batch according to per-class probabilities. target_probs = [...distribution you want...] [data_batch], labels = tf.contrib.training.stratified_sample( [data], label, target_probs) # Run batch through network. ... """ with ops.name_scope(name, 'stratified_sample', tensors + [labels]): tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensors) labels = ops.convert_to_tensor(labels) target_probs = ops.convert_to_tensor(target_probs, dtype=dtypes.float32) # Reduce the case of a single example to that of a batch of size 1. if not enqueue_many: tensor_list = [array_ops.expand_dims(tensor, 0) for tensor in tensor_list] labels = array_ops.expand_dims(labels, 0) # If `init_probs` is `None`, set up online estimation of data distribution. if init_probs is None: # We use `target_probs` to get the number of classes, so its shape must be # fully defined at graph construction time. target_probs.get_shape().assert_is_fully_defined() init_probs = _estimate_data_distribution( labels, target_probs.get_shape().num_elements()) else: init_probs = ops.convert_to_tensor(init_probs, dtype=dtypes.float32) # Validate that input is consistent. tensor_list, labels, [init_probs, target_probs] = _verify_input( tensor_list, labels, [init_probs, target_probs]) # Check that all zero initial probabilities also have zero target # probabilities. assert_op = control_flow_ops.Assert( math_ops.reduce_all(math_ops.logical_or( math_ops.not_equal(init_probs, 0), math_ops.equal(target_probs, 0))), ['All classes with zero initial probability must also have zero target ' 'probability: ', init_probs, target_probs]) init_probs = control_flow_ops.with_dependencies([assert_op], init_probs) # Calculate acceptance sampling probabilities. accept_probs = _calculate_acceptance_probabilities(init_probs, target_probs) proportion_rejected = math_ops.reduce_sum((1 - accept_probs) * init_probs) accept_probs = control_flow_ops.cond( math_ops.less(proportion_rejected, .5), lambda: accept_probs, lambda: logging_ops.Print( # pylint: disable=g-long-lambda accept_probs, [accept_probs], message='Proportion of examples rejected by sampler is high.', first_n=10)) # Make a single queue to hold input examples. Reshape output so examples # don't have singleton batch dimension. batched = input_ops.batch(tensor_list + [labels], batch_size=1, num_threads=threads_per_queue, capacity=queue_capacity, enqueue_many=True) val_list = [array_ops.squeeze(x, [0]) for x in batched[:-1]] label = array_ops.squeeze(batched[-1], [0]) # Set up second queue containing batches that have the desired class # proportions. cur_prob = array_ops.gather(accept_probs, label) keep_input = random_ops.random_uniform([]) < cur_prob batched = _conditional_batch( val_list + [label], keep_input, batch_size, num_threads=threads_per_queue) return batched[:-1], batched[-1] def _estimate_data_distribution(labels, num_classes, smoothing_constant=10): """Estimate data distribution as labels are seen.""" # Variable to track running count of classes. Smooth by a nonzero value to # avoid division-by-zero. Higher values provide more stability at the cost of # slower convergence. if smoothing_constant <= 0: raise ValueError('smoothing_constant must be nonzero.') num_examples_per_class_seen = variables.Variable( initial_value=[smoothing_constant] * num_classes, trainable=False, name='class_count', dtype=dtypes.int64) # Update the class-count based on what labels are seen in batch. num_examples_per_class_seen = num_examples_per_class_seen.assign_add( math_ops.reduce_sum(array_ops.one_hot(labels, num_classes, dtype=dtypes.int64), 0)) # Normalize count into a probability. # NOTE: Without the `+= 0` line below, the test # `testMultiThreadedEstimateDataDistribution` fails. The reason is that # before this line, `num_examples_per_class_seen` is a Tensor that shares a # buffer with an underlying `ref` object. When the `ref` is changed by another # thread, `num_examples_per_class_seen` changes as well. Since this can happen # in the middle of the normalization computation, we get probabilities that # are very far from summing to one. Adding `+= 0` copies the contents of the # tensor to a new buffer, which will be consistent from the start to the end # of the normalization computation. num_examples_per_class_seen += 0 init_prob_estimate = math_ops.truediv( num_examples_per_class_seen, math_ops.reduce_sum(num_examples_per_class_seen)) # Must return float32 (not float64) to agree with downstream `_verify_input` # checks. return math_ops.cast(init_prob_estimate, dtypes.float32) def _verify_data_inputs(tensor_list): """Verify that batched data inputs are well-formed.""" for tensor in tensor_list: # Data tensor should have a batch dimension. tensor_shape = tensor.get_shape().with_rank_at_least(1) # Data batch dimensions must be compatible. tensor_shape[0].assert_is_compatible_with(tensor_list[0].get_shape()[0]) return tensor_list def _verify_input(tensor_list, labels, probs_list): """Verify that batched inputs are well-formed.""" checked_probs_list = [] for probs in probs_list: # Since number of classes shouldn't change at runtime, probalities shape # should be fully defined. probs.get_shape().assert_is_fully_defined() # Probabilities must be 1D. probs.get_shape().assert_has_rank(1) # Probabilities must be nonnegative and sum to one. tol = 1e-6 prob_sum = math_ops.reduce_sum(probs) checked_probs = control_flow_ops.with_dependencies( [check_ops.assert_non_negative(probs), check_ops.assert_less(prob_sum, 1.0 + tol), check_ops.assert_less(1.0 - tol, prob_sum)], probs) checked_probs_list.append(checked_probs) # All probabilities should be the same length. prob_length = checked_probs_list[0].get_shape().num_elements() for checked_prob in checked_probs_list: if checked_prob.get_shape().num_elements() != prob_length: raise ValueError('Probability parameters must have the same length.') # Labels tensor should only have batch dimension. labels.get_shape().assert_has_rank(1) for tensor in tensor_list: # Data tensor should have a batch dimension. tensor_shape = tensor.get_shape().with_rank_at_least(1) # Data and label batch dimensions must be compatible. tensor_shape[0].assert_is_compatible_with(labels.get_shape()[0]) # Data and labels must have the same, strictly positive batch size. Since we # can't assume we know the batch size at graph creation, add runtime checks. labels_batch_size = array_ops.shape(labels)[0] lbl_assert = check_ops.assert_positive(labels_batch_size) # Make each tensor depend on its own checks. labels = control_flow_ops.with_dependencies([lbl_assert], labels) tensor_list = [control_flow_ops.with_dependencies( [lbl_assert, check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size)], x) for x in tensor_list] # Label's classes must be integers 0 <= x < num_classes. labels = control_flow_ops.with_dependencies( [check_ops.assert_integer(labels), check_ops.assert_non_negative(labels), check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype))], labels) return tensor_list, labels, checked_probs_list def _calculate_acceptance_probabilities(init_probs, target_probs): """Calculate the per-class acceptance rates. Args: init_probs: The class probabilities of the data. target_probs: The desired class proportion in minibatches. Returns: A list of the per-class acceptance probabilities. This method is based on solving the following analysis: Let F be the probability of a rejection (on any example). Let p_i be the proportion of examples in the data in class i (init_probs) Let a_i is the rate the rejection sampler should *accept* class i Let t_i is the target proportion in the minibatches for class i (target_probs) ``` F = sum_i(p_i * (1-a_i)) = 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1 ``` An example with class `i` will be accepted if `k` rejections occur, then an example with class `i` is seen by the rejector, and it is accepted. This can be written as follows: ``` t_i = sum_k=0^inf(F^k * p_i * a_i) = p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1 = p_i * a_i / sum_j(p_j * a_j) using F from above ``` Note that the following constraints hold: ``` 0 <= p_i <= 1, sum_i(p_i) = 1 0 <= a_i <= 1 0 <= t_i <= 1, sum_i(t_i) = 1 ``` A solution for a_i in terms of the other variabes is the following: ```a_i = (t_i / p_i) / max_i[t_i / p_i]``` """ # Make list of t_i / p_i. ratio_l = target_probs / init_probs # Replace NaNs with 0s. ratio_l = math_ops.select(math_ops.is_nan(ratio_l), array_ops.zeros_like(ratio_l), ratio_l) # Calculate list of acceptance probabilities. max_ratio = math_ops.reduce_max(ratio_l) return ratio_l / max_ratio def _conditional_batch(tensors, keep_input, batch_size, num_threads=10): """Conditionally enqueue tensors based on accept_prob. Specifically, enqueue the element if accept_prob > rand_unif([0, 1]). Args: tensors: List of tensors to enqueue. keep_input: Bool. Whether to enqueue or not. batch_size: Size of batch. num_threads: Number of enqueueing threads. Returns: List of batched tensors. Raises: ValueError: `accept_prob` isn't 0D. """ keep_input.get_shape().assert_has_rank(0) # Determine shapes and types of to-be-enqueued-tensors. shapes_list = [] dtypes_list = [] for tensor in tensors: cur_shape = tensor.get_shape() cur_shape.assert_is_fully_defined() shapes_list.append(cur_shape) dtypes_list.append(tensor.dtype) final_q = data_flow_ops.FIFOQueue(capacity=batch_size, shapes=shapes_list, dtypes=dtypes_list, name='batched_queue') logging_ops.scalar_summary('queue/%s/size' % final_q.name, final_q.size()) # Conditionally enqueue. # Reshape enqueue op to match no_op's shape. conditional_enqueue = control_flow_ops.cond( keep_input, lambda: final_q.enqueue(tensors), control_flow_ops.no_op) queue_runner.add_queue_runner(queue_runner.QueueRunner( final_q, [conditional_enqueue] * num_threads)) out_tensor = final_q.dequeue_many(batch_size) # Queues return a single tensor if the list of enqued tensors is one. Since we # want the type to be the same in all cases, always return a list. if isinstance(out_tensor, ops.Tensor): out_tensor = [out_tensor] return out_tensor
# Copyright 2022 The DDSP Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for ddsp.training.eval_util.""" from unittest import mock from absl.testing import parameterized import ddsp from ddsp.test_util import gen_np_batched_sinusoids from ddsp.test_util import gen_np_sinusoid import ddsp.training.metrics as ddsp_metrics import numpy as np import tensorflow.compat.v2 as tf class ComputeAudioFeaturesTest(parameterized.TestCase, tf.test.TestCase): def setUp(self): """Create some common default values for the test sinusoid.""" super().setUp() self.amp = 0.75 self.frequency = 440.0 self.frame_rate = 250 self.sample_rate = 16000 def expected_length(self, audio): n_t = audio.shape[-1] frame_size = ddsp.spectral_ops.CREPE_FRAME_SIZE hop_size = int(self.sample_rate // self.frame_rate) expected_len, _ = ddsp.spectral_ops.get_framed_lengths( n_t, frame_size, hop_size) return expected_len def validate_output_shapes(self, audio_features, expected_feature_lengths): for feat, expected_len in expected_feature_lengths.items(): arr = audio_features[feat] try: self.assertLen(arr, expected_len) except AssertionError as e: raise AssertionError('%s feature: %s' % (e, feat)) from e self.assertTrue(np.all(np.isfinite(arr))) @parameterized.named_parameters( ('0.21secs', .21), ('0.4secs', .4), ) def test_correct_shape_compute_af_at_sample_rate(self, audio_len_sec): audio_sin = gen_np_sinusoid(self.frequency, self.amp, self.sample_rate, audio_len_sec) exp_length = self.expected_length(audio_sin) audio_features = ddsp_metrics.compute_audio_features( audio_sin, frame_rate=self.frame_rate) self.validate_output_shapes( audio_features, { 'audio': audio_len_sec * self.sample_rate, 'f0_hz': exp_length, 'f0_confidence': exp_length, 'loudness_db': exp_length, }) class MetricsObjectsTest(parameterized.TestCase, tf.test.TestCase): @classmethod def setUpClass(cls): """Create common default batch of noise and sinusoids.""" super().setUpClass() cls.frequency = 440 cls.sample_rate = 16000 cls.frame_rate = 250 cls.batch_size = 2 cls.audio_len_sec = 0.25 # To make tests with f0 CREPE run in shorter time cls.amp = 0.8 cls.batch_of_noise = cls.gen_batch_of_noise(cls.amp) cls.batch_of_noise_feats = cls.gen_batch_of_features(cls.batch_of_noise) cls.batch_of_sin = gen_np_batched_sinusoids(cls.frequency, cls.amp * 0.5, cls.sample_rate, cls.audio_len_sec, cls.batch_size) cls.batch_of_sin_feats = cls.gen_batch_of_features(cls.batch_of_sin) @classmethod def gen_batch_of_noise(cls, amp): noise_audio = np.random.uniform( low=-amp, high=amp, size=(1, int(cls.sample_rate * cls.audio_len_sec))) return np.tile(noise_audio, [cls.batch_size, 1]) @classmethod def gen_batch_of_features(cls, batch_of_audio): batch_size = batch_of_audio.shape[0] audio = batch_of_audio[0] feats = ddsp_metrics.compute_audio_features( audio, frame_rate=cls.frame_rate) for k, v in feats.items(): feats[k] = np.tile(v[np.newaxis, :], [batch_size, 1]) return feats def test_loudness_metrics_has_expected_values(self): loudness_metrics = ddsp_metrics.LoudnessMetrics(self.sample_rate, self.frame_rate) # Dummy batch 1: known noise features vs. known noise audio. # Since audio is the same, loudness distance should be 0. loudness_metrics.update_state(self.batch_of_noise_feats, self.batch_of_noise) self.assertAllClose(loudness_metrics.metrics['loudness_db'].result(), 0) # Dummy batch 2: known noise features vs. quiet batch of sin audio # Since audio is different, loudness distance should greater than 0. loudness_metrics.update_state(self.batch_of_noise_feats, self.batch_of_sin) self.assertGreater(loudness_metrics.metrics['loudness_db'].result(), 0) loudness_metrics.flush(step=1) @mock.patch('ddsp.spectral_ops.compute_f0') def test_f0_crepe_metrics_has_expected_values(self, mock_compute_f0): """Test F0CrepeMetrics. F0CrepeMetrics makes an expensive call to compute_f0 (which in turn calls CREPE) for every generated example during update_state. To avoid this, we mock out compute_f0 and replace the return values (via side_effect) with precomputed f0_hz and confidence values. Args: mock_compute_f0: The mock object for compute_f0, automatically injected by mock.patch. """ f0_crepe_metrics = ddsp_metrics.F0CrepeMetrics(self.sample_rate, self.frame_rate) # Batch 1: correct f0 crepe_f0 = self.batch_of_sin_feats['f0_hz'] crepe_conf = np.ones_like(crepe_f0) mock_compute_f0.side_effect = zip(crepe_f0, crepe_conf) f0_crepe_metrics.update_state(self.batch_of_sin_feats, self.batch_of_sin) self.assertAllClose(f0_crepe_metrics.metrics['f0_dist'].result(), 0) self.assertAllClose( f0_crepe_metrics.metrics['outlier_ratio'].result(), 0) # Batch 2: incorrect f0 crepe_f0 = self.batch_of_sin_feats['f0_hz'] * 2 crepe_conf = np.ones_like(crepe_f0) mock_compute_f0.side_effect = zip(crepe_f0, crepe_conf) f0_crepe_metrics.update_state(self.batch_of_sin_feats, self.batch_of_sin) self.assertGreater(f0_crepe_metrics.metrics['f0_dist'].result(), 0) self.assertAllClose( f0_crepe_metrics.metrics['outlier_ratio'].result(), 0) # Batch 3: low crepe confidence crepe_f0 = np.zeros_like(self.batch_of_sin_feats['f0_hz']) crepe_conf = np.ones_like(crepe_f0) mock_compute_f0.side_effect = zip(crepe_f0, crepe_conf) f0_crepe_metrics.update_state(self.batch_of_sin_feats, self.batch_of_noise) self.assertGreater(f0_crepe_metrics.metrics['f0_dist'].result(), 0) self.assertGreater( f0_crepe_metrics.metrics['outlier_ratio'].result(), 0) f0_crepe_metrics.flush(step=1) def test_f0_metrics_has_expected_values(self): f0_metrics = ddsp_metrics.F0Metrics(self.sample_rate, self.frame_rate) # Batch 1: known sin features vs. batch of known sin f0_hz f0_metrics.update_state(self.batch_of_sin_feats, self.batch_of_sin_feats['f0_hz']) self.assertAllClose(f0_metrics.metrics['f0_dist'].result(), 0) # Batch 2: known sin features vs. batch of f0_hz at different f0 f0_metrics.update_state(self.batch_of_sin_feats, self.batch_of_sin_feats['f0_hz'] * 3) self.assertGreater(f0_metrics.metrics['f0_dist'].result(), 0) f0_metrics.flush(step=1) def test_f0_metrics_resamples_f0_hz_predictions_to_expected_length(self): f0_metrics = ddsp_metrics.F0Metrics(self.sample_rate, self.frame_rate) expected_len = self.frame_rate * self.audio_len_sec shorter_len = int(expected_len * 0.8) # Batch 1: known sin features vs. batch of shorter f0_hz at different f0 f0_metrics.update_state( self.batch_of_sin_feats, 3 * self.batch_of_sin_feats['f0_hz'][:, :shorter_len]) self.assertGreater(f0_metrics.metrics['f0_dist'].result(), 0) f0_metrics.flush(step=1) def test_rpa_has_expected_values_exact_match(self): rpa = ddsp_metrics.F0Metrics(self.sample_rate, self.frame_rate) f0 = self.batch_of_sin_feats['f0_hz'] rpa.update_state(self.batch_of_sin_feats, f0) self.assertEqual(rpa.metrics['raw_pitch_accuracy'].result(), 1.0) self.assertEqual(rpa.metrics['raw_chroma_accuracy'].result(), 1.0) def test_rpa_has_expected_values_octave_error(self): rpa = ddsp_metrics.F0Metrics(self.sample_rate, self.frame_rate) f0 = self.batch_of_sin_feats['f0_hz'] rpa.update_state(self.batch_of_sin_feats, f0 * 2) self.assertEqual(rpa.metrics['raw_pitch_accuracy'].result(), 0.0) self.assertEqual(rpa.metrics['raw_chroma_accuracy'].result(), 1.0) def test_rpa_has_expected_values_error_within_threshold(self): rpa = ddsp_metrics.F0Metrics(self.sample_rate, self.frame_rate) f0 = self.batch_of_sin_feats['f0_hz'] rpa.update_state(self.batch_of_sin_feats, f0 + 10) self.assertEqual(rpa.metrics['raw_pitch_accuracy'].result(), 1.0) self.assertEqual(rpa.metrics['raw_chroma_accuracy'].result(), 1.0) def test_rpa_has_expected_values_error_outside_threshold(self): rpa = ddsp_metrics.F0Metrics(self.sample_rate, self.frame_rate) f0 = self.batch_of_sin_feats['f0_hz'] rpa.update_state(self.batch_of_sin_feats, f0 + 220) self.assertEqual(rpa.metrics['raw_pitch_accuracy'].result(), 0.0) self.assertEqual(rpa.metrics['raw_chroma_accuracy'].result(), 0.0) if __name__ == '__main__': tf.test.main()
# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from jacket.compute.conductor import manager as conductor_manager from jacket import context from jacket.db import compute from jacket.objects import compute from jacket.tests.compute.functional.api_sample_tests import test_servers from jacket.tests.compute.unit import fake_instance class ServerMigrationsSampleJsonTest(test_servers.ServersSampleBase): extension_name = 'server-migrations' scenarios = [('v2_22', {'api_major_version': 'v2.1'})] extra_extensions_to_load = ["os-migrate-server", "os-access-ips"] def setUp(self): """setUp method for server usage.""" super(ServerMigrationsSampleJsonTest, self).setUp() self.uuid = self._post_server() self.api.microversion = '2.22' @mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate') @mock.patch.object(compute, 'service_get_by_compute_host') @mock.patch.object(compute.Migration, 'get_by_id_and_instance') @mock.patch('compute.compute.manager.ComputeManager.' 'live_migration_force_complete') def test_live_migrate_force_complete(self, live_migration_pause_instance, get_by_id_and_instance, service_get_by_compute_host, _live_migrate): migration = compute.Migration() migration.id = 1 migration.status = 'running' get_by_id_and_instance.return_value = migration self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server', {'hostname': self.compute.host}) response = self._do_post('servers/%s/migrations/%s/action' % (self.uuid, '3'), 'force_complete', {}) self.assertEqual(202, response.status_code) def test_get_migration(self): response = self._do_get('servers/fake_id/migrations/1234') self.assertEqual(404, response.status_code) def test_list_migrations(self): response = self._do_get('servers/fake_id/migrations') self.assertEqual(404, response.status_code) class ServerMigrationsSamplesJsonTestV2_23(test_servers.ServersSampleBase): ADMIN_API = True extension_name = "server-migrations" microversion = '2.23' scenarios = [('v2_23', {'api_major_version': 'v2.1'})] UUID_1 = '4cfba335-03d8-49b2-8c52-e69043d1e8fe' UUID_2 = '058fc419-a8a8-4e08-b62c-a9841ef9cd3f' fake_migrations = [ { 'source_node': 'node1', 'dest_node': 'node2', 'source_compute': 'compute1', 'dest_compute': 'compute2', 'dest_host': '1.2.3.4', 'status': 'running', 'instance_uuid': UUID_1, 'migration_type': 'live-migration', 'hidden': False, 'memory_total': 123456, 'memory_processed': 12345, 'memory_remaining': 120000, 'disk_total': 234567, 'disk_processed': 23456, 'disk_remaining': 230000, 'created_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2), 'updated_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2), 'deleted_at': None, 'deleted': False }, { 'source_node': 'node10', 'dest_node': 'node20', 'source_compute': 'compute10', 'dest_compute': 'compute20', 'dest_host': '5.6.7.8', 'status': 'migrating', 'instance_uuid': UUID_2, 'migration_type': 'resize', 'hidden': False, 'memory_total': 456789, 'memory_processed': 56789, 'memory_remaining': 45000, 'disk_total': 96789, 'disk_processed': 6789, 'disk_remaining': 96000, 'created_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2), 'updated_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2), 'deleted_at': None, 'deleted': False } ] def setUp(self): super(ServerMigrationsSamplesJsonTestV2_23, self).setUp() fake_context = context.RequestContext('fake', 'fake') self.mig1 = compute.Migration( context=fake_context, **self.fake_migrations[0]) self.mig1.create() self.mig2 = compute.Migration( context=fake_context, **self.fake_migrations[1]) self.mig2.create() fake_ins = fake_instance.fake_db_instance(uuid=self.UUID_1) fake_ins.pop("pci_devices") fake_ins.pop("security_groups") fake_ins.pop("services") fake_ins.pop("tags") fake_ins.pop("info_cache") fake_ins.pop("id") self.instance = compute.Instance( context=fake_context, **fake_ins) self.instance.create() def test_get_migration(self): response = self._do_get('servers/%s/migrations/%s' % (self.fake_migrations[0]["instance_uuid"], self.mig1.id)) self.assertEqual(200, response.status_code) self._verify_response('migrations-get', {"server_uuid": self.UUID_1}, response, 200) def test_list_migrations(self): response = self._do_get('servers/%s/migrations' % self.fake_migrations[0]["instance_uuid"]) self.assertEqual(200, response.status_code) self._verify_response('migrations-index', {"server_uuid_1": self.UUID_1}, response, 200) class ServerMigrationsSampleJsonTestV2_24(test_servers.ServersSampleBase): ADMIN_API = True extension_name = "server-migrations" scenarios = [('v2_24', {'api_major_version': 'v2.1'})] extra_extensions_to_load = ["os-migrate-server", "os-access-ips"] def setUp(self): """setUp method for server usage.""" super(ServerMigrationsSampleJsonTestV2_24, self).setUp() self.api.microversion = '2.24' self.uuid = self._post_server() self.context = context.RequestContext('fake', 'fake') fake_migration = { 'source_node': self.compute.host, 'dest_node': 'node10', 'source_compute': 'compute1', 'dest_compute': 'compute12', 'migration_type': 'live-migration', 'instance_uuid': self.uuid, 'status': 'running'} self.migration = compute.Migration(context=self.context, **fake_migration) self.migration.create() @mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate') def test_live_migrate_abort(self, _live_migrate): self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server', {'hostname': self.compute.host}) uri = 'servers/%s/migrations/%s' % (self.uuid, self.migration.id) response = self._do_delete(uri) self.assertEqual(202, response.status_code) @mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate') def test_live_migrate_abort_migration_not_found(self, _live_migrate): self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server', {'hostname': self.compute.host}) uri = 'servers/%s/migrations/%s' % (self.uuid, '45') response = self._do_delete(uri) self.assertEqual(404, response.status_code) @mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate') def test_live_migrate_abort_migration_not_running(self, _live_migrate): self.migration.status = 'completed' self.migration.save() self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server', {'hostname': self.compute.host}) uri = 'servers/%s/migrations/%s' % (self.uuid, self.migration.id) response = self._do_delete(uri) self.assertEqual(400, response.status_code)
import sys import logging import os import time import numpy as np import theano.tensor as T from theano import config import theano from blocks.algorithms import (GradientDescent, Adam, CompositeRule, StepClipping) from blocks.extensions import FinishAfter, Printing, ProgressBar from blocks.bricks.cost import CategoricalCrossEntropy, MisclassificationRate from blocks.extensions.monitoring import (TrainingDataMonitoring, DataStreamMonitoring) from blocks.bricks import Rectifier, Softmax, MLP from blocks.main_loop import MainLoop from blocks.model import Model from utils import (SaveLog, SaveParams, Glorot, visualize_attention, LRDecay, ErrorPerVideo) from blocks.initialization import Constant from blocks.graph import ComputationGraph, apply_noise from LSTM_attention_model import LSTMAttention from blocks.monitoring import aggregation from blocks.filter import VariableFilter from blocks.roles import WEIGHT from visualize import analyze floatX = theano.config.floatX logger = logging.getLogger('main') def setup_model(configs): tensor5 = theano.tensor.TensorType(config.floatX, (False,) * 5) # shape: T x B x C x X x Y input_ = tensor5('features') # shape: B x Classes target = T.ivector('targets') # shape: B x Classes unites = T.ivector('unites') model = LSTMAttention( configs, weights_init=Glorot(), biases_init=Constant(0)) model.initialize() (h, c, location, scale, alpha, patch, downn_sampled_input, conved_part_1, conved_part_2, pre_lstm) = model.apply(input_) model.location = location model.scale = scale model.alpha = alpha model.patch = patch model.downn_sampled_input = downn_sampled_input classifier = MLP( [Rectifier(), Softmax()], configs['classifier_dims'], weights_init=Glorot(), biases_init=Constant(0)) classifier.initialize() probabilities = classifier.apply(h[-1]) cost = CategoricalCrossEntropy().apply(target, probabilities) cost.name = 'CE' error_rate = MisclassificationRate().apply(target, probabilities) error_rate.name = 'ER' model.cost = cost model.error_rate = error_rate model.probabilities = probabilities model.targets = target model.unites = unites if configs['load_pretrained']: blocks_model = Model(model.cost) all_params = blocks_model.parameters with open('VGG_CNN_params.npz') as f: loaded = np.load(f) all_conv_params = loaded.keys() for param in all_params: if param.name in loaded.keys(): assert param.get_value().shape == loaded[param.name].shape param.set_value(loaded[param.name]) all_conv_params.pop(all_conv_params.index(param.name)) print "the following parameters did not match: " + str(all_conv_params) if configs['test_model']: print "\nTESTING THE MODEL: CHECK THE INPUT SIZE!" cg = ComputationGraph(model.cost) f = theano.function(cg.inputs, [model.cost], on_unused_input='ignore', allow_input_downcast=True) data = configs['get_streams'](configs[ 'batch_size'])[0].get_epoch_iterator().next() f(data[1], data[0]) print "TEST PASSED! ;)\n" model.monitorings = [cost, error_rate] return model def train(model, configs): get_streams = configs['get_streams'] save_path = configs['save_path'] num_epochs = configs['num_epochs'] batch_size = configs['batch_size'] lrs = configs['lrs'] until_which_epoch = configs['until_which_epoch'] grad_clipping = configs['grad_clipping'] monitorings = model.monitorings # Training if configs['weight_noise'] > 0: cg = ComputationGraph(model.cost) weights = VariableFilter(roles=[WEIGHT])(cg.variables) cg = apply_noise(cg, weights, configs['weight_noise']) model.cost = cg.outputs[0].copy(name='CE') if configs['l2_reg'] > 0: cg = ComputationGraph(model.cost) weights = VariableFilter(roles=[WEIGHT])(cg.variables) new_cost = model.cost + configs['l2_reg'] * sum([ (weight ** 2).sum() for weight in weights]) model.cost = new_cost.copy(name='CE') blocks_model = Model(model.cost) all_params = blocks_model.parameters print "Number of found parameters:" + str(len(all_params)) print all_params default_lr = np.float32(configs['lrs'][0]) lr_var = theano.shared(default_lr, name="learning_rate") clipping = StepClipping(threshold=np.cast[floatX](grad_clipping)) # sgd_momentum = Momentum( # learning_rate=0.0001, # momentum=0.95) # step_rule = CompositeRule([clipping, sgd_momentum]) adam = Adam(learning_rate=lr_var) step_rule = CompositeRule([clipping, adam]) training_algorithm = GradientDescent( cost=model.cost, parameters=all_params, step_rule=step_rule, on_unused_sources='warn') monitored_variables = [ lr_var, aggregation.mean(training_algorithm.total_gradient_norm)] + monitorings for param in all_params: name = param.tag.annotations[0].name + "." + param.name to_monitor = training_algorithm.gradients[param].norm(2) to_monitor.name = name + "_grad_norm" monitored_variables.append(to_monitor) to_monitor = param.norm(2) to_monitor.name = name + "_norm" monitored_variables.append(to_monitor) train_data_stream, valid_data_stream = get_streams(batch_size) train_monitoring = TrainingDataMonitoring( variables=monitored_variables, prefix="train", after_epoch=True) valid_monitoring = DataStreamMonitoring( variables=monitored_variables, data_stream=valid_data_stream, prefix="valid", after_epoch=True) main_loop = MainLoop( algorithm=training_algorithm, data_stream=train_data_stream, model=blocks_model, extensions=[ train_monitoring, valid_monitoring, FinishAfter(after_n_epochs=num_epochs), SaveParams('valid_CE', blocks_model, save_path, after_epoch=True), SaveLog(after_epoch=True), ProgressBar(), # ErrorPerVideo(model, after_epoch=True, on_interrupt=True), LRDecay(lr_var, lrs, until_which_epoch, after_epoch=True), Printing(after_epoch=True)]) main_loop.run() def evaluate(model, load_path, configs): print "FIX THIS : NOT BEST" with open(load_path + 'trained_params.npz') as f: loaded = np.load(f) blocks_model = Model(model.cost) params_dicts = blocks_model.get_parameter_dict() params_names = params_dicts.keys() for param_name in params_names: param = params_dicts[param_name] # '/f_6_.W' --> 'f_6_.W' slash_index = param_name.find('/') param_name = param_name[slash_index + 1:] # if param_name in ['initial_location', 'initial_scale', 'initial_alpha']: # param_name = 'lstmattention.' + param_name if param.get_value().shape == loaded[param_name].shape: param.set_value(loaded[param_name]) else: print param_name inps = ComputationGraph(model.error_rate).inputs eval_function = theano.function( inps, [model.error_rate, model.probabilities]) # tds, vds = configs['get_streams'](100) # it = tds.get_epoch_iterator() # data = it.next() # print eval_function(data[0], data[1]) return eval_function train_probs = [] valid_probs = [] train_unites = [] valid_unites = [] train_labels = [] valid_labels = [] it = tds.get_epoch_iterator() for batch in range(6): print batch data = it.next() train_probs.append(eval_function(data[0], data[1])[1]) train_unites.append(data[2]) train_labels.append(data[1]) it = vds.get_epoch_iterator() for batch in range(2): print batch data = it.next() valid_probs.append(eval_function(data[0], data[1])[1]) valid_unites.append(data[2]) valid_labels.append(data[1]) train_probs = np.vstack(train_probs) valid_probs = np.vstack(valid_probs) train_labels = np.hstack(train_labels) valid_labels = np.hstack(valid_labels) train_unites = np.hstack(train_unites) valid_unites = np.hstack(valid_unites) # For training map_vid_to_onehot = {} for j in list(set(train_unites)): map_vid_to_onehot[j] = [] for i in train_unites: for j in list(set(train_unites)): if i == j: map_vid_to_onehot[j].append(1) else: map_vid_to_onehot[j].append(0) map_vid_to_class = {} for j in list(set(train_unites)): onehot = np.array(map_vid_to_onehot[j])[:, np.newaxis] masked = onehot * train_probs map_vid_to_class[j] = np.argmax(np.sum(masked, axis=0)) predicted_labels = [] for i in train_unites: predicted_labels.append(map_vid_to_class[i]) incorrect = 0 for label, predicted_label in zip(train_labels, predicted_labels): if label != predicted_label: incorrect = incorrect + 1 print float(incorrect) / train_unites.shape[0] map_vid_to_onehot = {} for j in list(set(train_unites)): map_vid_to_onehot[j] = [] for i in train_unites: for j in list(set(train_unites)): if i == j: map_vid_to_onehot[j].append(1) else: map_vid_to_onehot[j].append(0) # For validation map_vid_to_onehot = {} for j in list(set(valid_unites)): map_vid_to_onehot[j] = [] for i in valid_unites: for j in list(set(valid_unites)): if i == j: map_vid_to_onehot[j].append(1) else: map_vid_to_onehot[j].append(0) map_vid_to_class = {} for j in list(set(valid_unites)): onehot = np.array(map_vid_to_onehot[j])[:, np.newaxis] masked = onehot * valid_probs map_vid_to_class[j] = np.argmax(np.sum(masked, axis=0)) predicted_labels = [] for i in valid_unites: predicted_labels.append(map_vid_to_class[i]) incorrect = 0 for label, predicted_label in zip(valid_labels, predicted_labels): if label != predicted_label: incorrect = incorrect + 1 print float(incorrect) / valid_unites.shape[0] return eval_function if __name__ == "__main__": dataset = str(sys.argv[1]) logging.basicConfig(level=logging.INFO) configs = {} if dataset == 'bmnist': from datasets import get_bmnist_streams configs['get_streams'] = get_bmnist_streams configs['save_path'] = 'results/Test_' configs['num_epochs'] = 600 configs['batch_size'] = 100 configs['lrs'] = [1e-4, 1e-5, 1e-6] configs['until_which_epoch'] = [150, 400, configs['num_epochs']] configs['grad_clipping'] = 2 configs['weight_noise'] = 0.0 # configs['conv_layers'] = [] configs['conv_layers'] = [ # 1 x 28 x 28 ['conv_1', (20, 1, 5, 5), (2, 2), None], # 20 x 16 x 16 ['conv_2', (50, 20, 5, 5), (2, 2), None], # 50 x 10 x 10 ['conv_3', (80, 50, 3, 3), (2, 2), None]] # 80 x 6 x 6 configs['num_layers_first_half_of_conv'] = 0 configs['fc_layers'] = [['fc', (784, 128), 'relu']] configs['lstm_dim'] = 128 configs['attention_mlp_hidden_dims'] = [128] configs['cropper_input_shape'] = (100, 100) configs['patch_shape'] = (28, 28) configs['num_channels'] = 1 configs['classifier_dims'] = [configs['lstm_dim'], 64, 10] configs['load_pretrained'] = False configs['test_model'] = True configs['l2_reg'] = 0.001 elif dataset == 'cooking': from datasets import get_cooking_streams configs['get_streams'] = get_cooking_streams configs['save_path'] = 'results/Cook_4' configs['num_epochs'] = 600 configs['batch_size'] = 100 configs['lrs'] = [1e-4, 1e-5, 1e-6] configs['until_which_epoch'] = [10, 400, configs['num_epochs']] configs['grad_clipping'] = 2 configs['weight_noise'] = 0.01 configs['conv_layers'] = [] configs['conv_layers'] = [ # 3 x 40 x 40 ['conv_1', (20, 3, 5, 5), (2, 2), None], # 20 x 19 x 19 ['conv_2', (50, 20, 5, 5), (2, 2), None], # 50 x 8 x 8 ['conv_3', (80, 50, 3, 3), (2, 2), None]] # 80 x 4 x 4 configs['num_layers_first_half_of_conv'] = 0 configs['fc_layers'] = [['fc', (720, 400), 'relu']] configs['lstm_dim'] = 64 configs['attention_mlp_hidden_dims'] = [100] configs['cropper_input_shape'] = (200, 320) configs['patch_shape'] = (32, 32) configs['num_channels'] = 3 configs['classifier_dims'] = [configs['lstm_dim'], 64, 31] configs['load_pretrained'] = False configs['test_model'] = True configs['l2_reg'] = 0.001 timestr = time.strftime("%Y_%m_%d_at_%H_%M") save_path = configs['save_path'] + timestr configs['save_path'] = save_path log_path = os.path.join(save_path, 'log.txt') os.makedirs(save_path) fh = logging.FileHandler(filename=log_path) fh.setLevel(logging.DEBUG) logger.addHandler(fh) for item in configs: logger.info(item + ': %s' % str(configs[item])) model = setup_model(configs) eval_ = False if eval_: eval_function = evaluate( model, 'results/Cook_32016_03_10_at_20_40/', configs) # analyze('results/Cook_2_CNN2016_03_06_at_23_56/') visualize_attention(model, configs, eval_function) else: # evaluate(model, 'results/Cook_n_2016_03_05_at_00_42/', configs) train(model, configs)
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Microbenchmarks for Keras components in eager mode.""" import time import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.eager.context import get_executor from tensorflow.python.keras.utils import tf_inspect from tensorflow.python.platform import benchmark # pylint: disable=unused-import def _run_benchmark(func, num_iters, execution_mode=None): with context.execution_mode(execution_mode): # call func to warm up func() if execution_mode == context.ASYNC: get_executor().wait() start = time.time() for _ in range(num_iters): func() if execution_mode == context.ASYNC: get_executor().wait() end = time.time() return end - start class MicroBenchmarksBase(tf.test.Benchmark): """Run and report benchmark results.""" def run_report(self, run_benchmark, func, num_iters, execution_mode=None): """Run and report benchmark results.""" total_time = run_benchmark(func, num_iters, execution_mode) mean_us = total_time * 1e6 / num_iters metrics = [{ "name": "exp_per_sec", "value": float("{0:.3f}".format(num_iters / total_time)) }, { "name": "us_per_exp", "value": float("{0:.3f}".format(total_time * 1e6 / num_iters)) }] benchmark_name = self._get_benchmark_name() self.report_benchmark( iters=num_iters, wall_time=mean_us, metrics=metrics, name=benchmark_name) def _get_benchmark_name(self): """Mostly copied from benchmark.py _get_name().""" stack = tf_inspect.stack() name = None for frame in stack[::-1]: f_locals = frame[0].f_locals f_self = f_locals.get("self", None) if isinstance(f_self, tf.test.Benchmark): name = frame[3] # Get the method name # This is a hack to get around the fact that some methods might have a # disable_tfrt decorator around them. In that case a function called # 'decorated' wraps the real called function underneath and so we # peek one deeper into the stack to get the real name. if name == "decorated": continue else: break if name is None: raise ValueError("Unable to determine calling Benchmark function.") if context.is_tfrt_enabled(): name = name + "_tfrt" return name def _run(self, func, num_iters, execution_mode=None): self.run_report(_run_benchmark, func, num_iters, execution_mode) def benchmark_layers_call_overhead(self): class OnlyOverheadLayer(tf.keras.layers.Layer): def call(self, x): return x layer = OnlyOverheadLayer() x = tf.convert_to_tensor([[1.]]) def fn(): layer(x) # pylint: disable=not-callable self._run(fn, 10000) def benchmark_op_layer_call_overhead(self): model_input = tf.keras.Input(shape=(1,)) model_output = model_input x = tf.convert_to_tensor([[1.1]]) for _ in range(20): model_output = tf.multiply(model_output, x) model = tf.keras.Model(inputs=model_input, outputs=model_output) def fn(): model(x) # pylint: disable=not-callable fn() self._run(fn, 100) def benchmark_model_predict_tensorlike_overhead(self): class OnlyOverheadLayer(tf.keras.layers.Layer): def call(self, x): return x model = tf.keras.Sequential([OnlyOverheadLayer()]) x = tf.convert_to_tensor([[1.]]) def fn(): model.predict(x) self._run(fn, 20) def benchmark_layers_embeddings_embedding_overhead(self): layer = tf.keras.layers.Embedding(1, 1) x = tf.zeros((1, 1), dtype="int32") def fn(): layer(x) self._run(fn, 10000) class KerasLayerCallOverheadBenchmarks( # pylint: disable=undefined-variable MicroBenchmarksBase, metaclass=benchmark.ParameterizedBenchmark): # The set of layers for benchmarking. To add benchmarks for new layers, # please add the parameter configs to "_benchmark_paramters". # The parameter of each layer benchmark is a tuple contains: # 1) The benchmark name with convention "{module_name}_{layer_name}"; # 2) The layer instance; # 3) The shape of the input to the layer; # 4) The kwargs used in the benchmark. It can include the number of # iterations to run the benchmarks, and kwargs used in the layer call. # By default, # of iteratons is 10000. _benchmark_parameters = [ ("advanced_activations_leaky_relu", tf.keras.layers.LeakyReLU(), (1, 1)), ("advanced_activations_prelu", tf.keras.layers.PReLU(), (1, 1)), ("advanced_activations_elu", tf.keras.layers.ELU(), (1, 1)), ("advanced_activations_thresholded_relu", tf.keras.layers.ThresholdedReLU(), (1, 1)), ("advanced_activations_softmax", tf.keras.layers.Softmax(), (1, 1)), ("advanced_activations_relu", tf.keras.layers.ReLU(), (1, 1)), ("core_masking", tf.keras.layers.Masking(), (1, 1)), ("core_dropout", tf.keras.layers.Dropout(0.5), (1, 1), { "training": True }), ("core_flatten", tf.keras.layers.Flatten(), (1, 1, 1)), ("core_dense", tf.keras.layers.Dense(1), (1, 1)), ("convolutional_conv1d", tf.keras.layers.Conv1D(1, (1,)), (1, 1, 1)), ("convolutional_conv2d", tf.keras.layers.Conv2D(1, (1, 1)), (1, 1, 1, 1)), ("convolutional_conv3d", tf.keras.layers.Conv3D( 1, (1, 1, 1)), (1, 1, 1, 1, 1)), ("batch_norm_fused_inf", tf.keras.layers.BatchNormalization(fused=True), (1, 1, 1, 1)), ("batch_norm_fused_train", tf.keras.layers.BatchNormalization(fused=True), (1, 1, 1, 1), {"training": True}), ("batch_norm_nonfused_inf", tf.keras.layers.BatchNormalization(fused=False), (1, 1, 1, 1)), ("batch_norm_nonfused_train", tf.keras.layers.BatchNormalization(fused=False), (1, 1, 1, 1), {"training": True}), ("normalization_layer_normalization", tf.keras.layers.LayerNormalization(), (1, 1), {"iters": 100, "training": True}), ] def benchmark_layer(self, layer, input_shape, kwargs=None): x = tf.ones(input_shape) def fn(): layer(x, **(kwargs or {})) default_iters = 10000 iters = kwargs.pop("iters", default_iters) if kwargs else default_iters self._run(fn, iters) if __name__ == "__main__": assert tf.executing_eagerly() tf.test.main()
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorNode for autograd tracing of computations with Tensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from autograd import core as ag_core from tensorflow.python.eager import context from tensorflow.python.eager import custom_gradient from tensorflow.python.eager import tape from tensorflow.python.eager import tensor from tensorflow.python.framework import common_shapes from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops @ag_core.primitive def _tensor_numpy(t): return t.numpy() @ag_core.primitive def _as_gpu_tensor(t, index=0): return t.as_gpu_tensor(gpu_index=index) _as_gpu_tensor.defvjp( lambda g, ans, vs, gvs, t, index: g.as_cpu_tensor(), argnum=0) @custom_gradient.custom_gradient def _tensor_copy(t, ctx=None, device_name=None): def grad(dresult): return dresult._copy(device_name=t.device) # pylint: disable=protected-access return t.value._copy(ctx=ctx, device_name=device_name), grad # pylint: disable=protected-access @ag_core.primitive def _as_cpu_tensor(t): return t.as_cpu_tensor() _as_cpu_tensor.defvjp(lambda g, ans, vs, gvs, t: g.as_gpu_tensor(), argnum=0) # TODO(apassos,ashankar): The operator overrides here need to be kept in sync # with the overrides for ops.Tensor and ops.EagerTensor. # # Note that we cannot use self.value.__op__() because that would result # in an ops.EagerTensor instead of a TensorNode being returned. # # We need to figure out a way to ensure that the two are in sync. class TensorNode(ag_core.Node): """A TensorFlow Tensor.""" __slots__ = [] def __getitem__(self, idx): return array_ops._SliceHelper(self, idx) # pylint: disable=protected-access shape = property(lambda self: self.value.shape) dtype = property(lambda self: self.value.dtype) device = property(lambda self: self.value.device) def get_shape(self): return self.shape def numpy(self): return _tensor_numpy(self) def _shape_tuple(self): return self.value._shape_tuple # pylint: disable=protected-access def as_cpu_tensor(self): return _as_cpu_tensor(self) def as_gpu_tensor(self, gpu_index=0): return _as_gpu_tensor(self, gpu_index) def _copy(self, ctx=None, device_name=None): return _tensor_copy(self, ctx, device_name) def __neg__(self): return math_ops.negative(self) def __abs__(self): return math_ops.abs(self) # pylint: disable=protected-access def __invert__(self): # ops.Tensor used math_ops.logical_not as of August 2017. # Now that bitwise_ops.invert exists, it might make sense # for both ops.Tensor and TensorNode to use that if the # type is compatible. return math_ops.logical_not(self) def __hash__(self): return id(self) def __add__(self, other): if isinstance(self.value, tensor.LazyZero): return other if isinstance(other, tensor.LazyZero): return self return math_ops.add(self, other) def __radd__(self, other): if isinstance(self.value, tensor.LazyZero): return other if isinstance(ag_core.getval(other), tensor.LazyZero): return self return math_ops.add(other, self) def __sub__(self, other): return math_ops.subtract(self, other) def __rsub__(self, other): return math_ops.subtract(other, self) def __mul__(self, other): return math_ops.multiply(self, other) def __rmul__(self, other): return math_ops.multiply(other, self) def __mod__(self, other): return math_ops.floormod(self, other) def __rmod__(self, other): return math_ops.floormod(other, self) def __pow__(self, other): return math_ops.pow(self, other) def __rpow__(self, other): return math_ops.pow(other, self) def __div__(self, other): return math_ops._div_python2(self, other) # pylint: disable=protected-access def __rdiv__(self, other): return math_ops._div_python2(other, self) # pylint: disable=protected-access def __truediv__(self, other): return math_ops._truediv_python3(self, other) # pylint: disable=protected-access def __rtruediv__(self, other): return math_ops._truediv_python3(other, self) # pylint: disable=protected-access def __floordiv__(self, other): return math_ops.floordiv(self, other) def __rfloordiv__(self, other): return math_ops.floordiv(other, self) def __eq__(self, other): # math_ops.equal raises an error if shapes are not compatible, so check that # explicitly first. if common_shapes.is_broadcast_compatible( self.shape, ops.convert_to_tensor(other).shape): return math_ops.equal(self, other) return False def __gt__(self, other): return math_ops.greater(self, other) def __ge__(self, other): return math_ops.greater_equal(self, other) def __lt__(self, other): return math_ops.less(self, other) def __le__(self, other): return math_ops.less_equal(self, other) ag_core.register_node(TensorNode, tensor.Tensor) ag_core.register_node(TensorNode, ops.Tensor) def _zeros(shape, dtype): with context.device("cpu:0"): shape = tensor.Tensor(shape, dtype=dtypes.int32) return array_ops.fill(shape, tensor.Tensor(0, dtype=dtype)) def _ones(shape, dtype): return array_ops.fill( tensor.Tensor(shape, dtype=dtypes.int32), tensor.Tensor(1, dtype=dtype)) def _lazy_zero_tensor(zero): return _zeros(zero.shape, zero.dtype) tensor.LazyZero.tensor = _lazy_zero_tensor def _lazy_zero_to_tensor(lazy_zero, dtype=None, name=None, as_ref=False): del as_ref, name, dtype return _zeros(lazy_zero.shape, lazy_zero.dtype) ops.register_tensor_conversion_function(tensor.LazyZero, _lazy_zero_to_tensor) def _indexed_slices_to_tensor(value): """Converts an IndexedSlices object `value` to a Tensor. Args: value: An ops.IndexedSlices object. Returns: A dense Tensor representing the values in the given IndexedSlices. Raises: ValueError: If the IndexedSlices does not have the same dtype. """ if value.dense_shape is None: raise ValueError( "Tensor conversion requested for IndexedSlices without dense_shape: %s" % str(value)) return math_ops.unsorted_segment_sum(value.values, value.indices, value.dense_shape[0]) class TensorVSpace(ag_core.VSpace): """VSpace for tf/tfe Tensors in autograd.""" def __init__(self, value): if isinstance(value, ops.IndexedSlices): self.shape = tensor_shape.TensorShape(value.dense_shape.numpy()) self.dtype = value.values.dtype self.size = self.shape.num_elements() else: self.shape = value._shape_tuple() # pylint: disable=protected-access if self.shape is None or None in self.shape: # TODO(apassos) we currently don't check the size so this is fine, but # presumably there should be a better way of doing this. self.size = 1 else: self.size = 1 for s in self.shape: self.size *= s self.dtype = value.dtype # TODO(apassos) put gradients on the same device as ops. def __eq__(self, other): if isinstance(other, tape.NoneVSpace): return True if self.dtype == dtypes.resource or other.dtype == dtypes.resource: return True return (type(self) == type(other) # pylint: disable=unidiomatic-typecheck and self.dtype == other.dtype) def __ne__(self, other): return not self.__eq__(other) def zeros(self): return tensor.LazyZero(self.shape, self.dtype) def ones(self): return _ones(self.shape, self.dtype) def standard_basis(self): raise NotImplementedError def flatten(self, value): return array_ops.reshape(value, tensor.Tensor(-1)) def unflatten(self, value): return array_ops.reshape(value, tensor.Tensor(self.shape)) def mut_add(self, x, y): """Add wrapper safe for IndexedSlices and LazyZero.""" if isinstance(ag_core.getval(x), tensor.LazyZero): return y if isinstance(ag_core.getval(y), tensor.LazyZero): return x if isinstance(x, ops.IndexedSlices): x = _indexed_slices_to_tensor(x) if isinstance(y, ops.IndexedSlices): y = _indexed_slices_to_tensor(y) if x is None: return y if y is None: return x return math_ops.add(x, y) ag_core.register_vspace(TensorVSpace, tensor.Tensor) ag_core.register_vspace(TensorVSpace, ops.Tensor) ag_core.register_vspace(TensorVSpace, ops.IndexedSlices) ag_core.register_vspace(TensorVSpace, tensor.LazyZero) ag_core.register_node(TensorNode, tensor.LazyZero)
"""Test check utilities.""" # Authors: MNE Developers # Stefan Appelhoff <stefan.appelhoff@mailbox.org> # # License: BSD-3-Clause import os import os.path as op import sys import numpy as np import pytest from pathlib import Path import mne from mne import read_vectorview_selection from mne.datasets import testing from mne.io.pick import pick_channels_cov, _picks_to_idx from mne.utils import (check_random_state, _check_fname, check_fname, _suggest, _check_subject, _check_info_inv, _check_option, check_version, _path_like, _validate_type, _on_missing, requires_nibabel, _safe_input, _check_ch_locs) data_path = testing.data_path(download=False) base_dir = op.join(data_path, 'MEG', 'sample') fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') fname_event = op.join(base_dir, 'sample_audvis_trunc_raw-eve.fif') fname_fwd = op.join(base_dir, 'sample_audvis_trunc-meg-vol-7-fwd.fif') fname_mgz = op.join(data_path, 'subjects', 'sample', 'mri', 'aseg.mgz') reject = dict(grad=4000e-13, mag=4e-12) @testing.requires_testing_data def test_check(tmp_path): """Test checking functions.""" pytest.raises(ValueError, check_random_state, 'foo') pytest.raises(TypeError, _check_fname, 1) _check_fname(Path('./foo')) fname = tmp_path / 'foo' with open(fname, 'wb'): pass assert op.isfile(fname) _check_fname(fname, overwrite='read', must_exist=True) orig_perms = os.stat(fname).st_mode os.chmod(fname, 0) if not sys.platform.startswith('win'): with pytest.raises(PermissionError, match='read permissions'): _check_fname(fname, overwrite='read', must_exist=True) os.chmod(fname, orig_perms) os.remove(fname) assert not op.isfile(fname) pytest.raises(IOError, check_fname, 'foo', 'tets-dip.x', (), ('.fif',)) pytest.raises(ValueError, _check_subject, None, None) pytest.raises(TypeError, _check_subject, None, 1) pytest.raises(TypeError, _check_subject, 1, None) # smoke tests for permitted types check_random_state(None).choice(1) check_random_state(0).choice(1) check_random_state(np.random.RandomState(0)).choice(1) if check_version('numpy', '1.17'): check_random_state(np.random.default_rng(0)).choice(1) @testing.requires_testing_data @pytest.mark.parametrize('suffix', ('_meg.fif', '_eeg.fif', '_ieeg.fif', '_meg.fif.gz', '_eeg.fif.gz', '_ieeg.fif.gz')) def test_check_fname_suffixes(suffix, tmp_path): """Test checking for valid filename suffixes.""" new_fname = tmp_path / op.basename(fname_raw).replace('_raw.fif', suffix) raw = mne.io.read_raw_fif(fname_raw).crop(0, 0.1) raw.save(new_fname) mne.io.read_raw_fif(new_fname) def _get_data(): """Read in data used in tests.""" # read forward model forward = mne.read_forward_solution(fname_fwd) # read data raw = mne.io.read_raw_fif(fname_raw, preload=True) events = mne.read_events(fname_event) event_id, tmin, tmax = 1, -0.1, 0.15 # decimate for speed left_temporal_channels = read_vectorview_selection('Left-temporal') picks = mne.pick_types(raw.info, meg=True, selection=left_temporal_channels) picks = picks[::2] raw.pick_channels([raw.ch_names[ii] for ii in picks]) del picks raw.info.normalize_proj() # avoid projection warnings epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0.), preload=True, reject=reject) noise_cov = mne.compute_covariance(epochs, tmin=None, tmax=0.) data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.15) return epochs, data_cov, noise_cov, forward @testing.requires_testing_data def test_check_info_inv(): """Test checks for common channels across fwd model and cov matrices.""" epochs, data_cov, noise_cov, forward = _get_data() # make sure same channel lists exist in data to make testing life easier assert epochs.info['ch_names'] == data_cov.ch_names assert epochs.info['ch_names'] == noise_cov.ch_names # check whether bad channels get excluded from the channel selection # info info_bads = epochs.info.copy() info_bads['bads'] = info_bads['ch_names'][1:3] # include two bad channels picks = _check_info_inv(info_bads, forward, noise_cov=noise_cov) assert [1, 2] not in picks # covariance matrix data_cov_bads = data_cov.copy() data_cov_bads['bads'] = data_cov_bads.ch_names[0] picks = _check_info_inv(epochs.info, forward, data_cov=data_cov_bads) assert 0 not in picks # noise covariance matrix noise_cov_bads = noise_cov.copy() noise_cov_bads['bads'] = noise_cov_bads.ch_names[1] picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov_bads) assert 1 not in picks # test whether reference channels get deleted info_ref = epochs.info.copy() info_ref['chs'][0]['kind'] = 301 # pretend to have a ref channel picks = _check_info_inv(info_ref, forward, noise_cov=noise_cov) assert 0 not in picks # pick channels in all inputs and make sure common set is returned epochs.pick_channels([epochs.ch_names[ii] for ii in range(10)]) data_cov = pick_channels_cov(data_cov, include=[data_cov.ch_names[ii] for ii in range(5, 20)]) noise_cov = pick_channels_cov(noise_cov, include=[noise_cov.ch_names[ii] for ii in range(7, 12)]) picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov, data_cov=data_cov) assert list(range(7, 10)) == picks def test_check_option(): """Test checking the value of a parameter against a list of options.""" allowed_values = ['valid', 'good', 'ok'] # Value is allowed assert _check_option('option', 'valid', allowed_values) assert _check_option('option', 'good', allowed_values) assert _check_option('option', 'ok', allowed_values) assert _check_option('option', 'valid', ['valid']) # Check error message for invalid value msg = ("Invalid value for the 'option' parameter. Allowed values are " "'valid', 'good', and 'ok', but got 'bad' instead.") with pytest.raises(ValueError, match=msg): assert _check_option('option', 'bad', allowed_values) # Special error message if only one value is allowed msg = ("Invalid value for the 'option' parameter. The only allowed value " "is 'valid', but got 'bad' instead.") with pytest.raises(ValueError, match=msg): assert _check_option('option', 'bad', ['valid']) def test_path_like(): """Test _path_like().""" str_path = str(base_dir) pathlib_path = Path(base_dir) no_path = dict(foo='bar') assert _path_like(str_path) is True assert _path_like(pathlib_path) is True assert _path_like(no_path) is False def test_validate_type(): """Test _validate_type.""" _validate_type(1, 'int-like') with pytest.raises(TypeError, match='int-like'): _validate_type(False, 'int-like') @requires_nibabel() @testing.requires_testing_data def test_suggest(): """Test suggestions.""" names = mne.get_volume_labels_from_aseg(fname_mgz) sug = _suggest('', names) assert sug == '' # nothing sug = _suggest('Left-cerebellum', names) assert sug == " Did you mean 'Left-Cerebellum-Cortex'?" sug = _suggest('Cerebellum-Cortex', names) assert sug == " Did you mean one of ['Left-Cerebellum-Cortex', 'Right-Cerebellum-Cortex', 'Left-Cerebral-Cortex']?" # noqa: E501 def test_on_missing(): """Test _on_missing.""" msg = 'test' with pytest.raises(ValueError, match=msg): _on_missing('raise', msg) with pytest.warns(RuntimeWarning, match=msg): _on_missing('warn', msg) _on_missing('ignore', msg) with pytest.raises(ValueError, match='Invalid value for the \'on_missing\' parameter'): _on_missing('foo', msg) def _matlab_input(msg): raise EOFError() def test_safe_input(monkeypatch): """Test _safe_input.""" monkeypatch.setattr(mne.utils.check, 'input', _matlab_input) with pytest.raises(RuntimeError, match='Could not use input'): _safe_input('whatever', alt='nothing') assert _safe_input('whatever', use='nothing') == 'nothing' @testing.requires_testing_data def test_check_ch_locs(): """Test _check_ch_locs behavior.""" info = mne.io.read_info(fname_raw) assert _check_ch_locs(info=info) for picks in ([0], [0, 1], None): assert _check_ch_locs(info=info, picks=picks) for ch_type in ('meg', 'mag', 'grad', 'eeg'): assert _check_ch_locs(info=info, ch_type=ch_type) # drop locations for EEG picks_eeg = _picks_to_idx(info=info, picks='eeg') for idx in picks_eeg: info['chs'][idx]['loc'][:3] = np.nan # EEG tests should fail now assert _check_ch_locs(info=info, picks=picks_eeg) is False assert _check_ch_locs(info=info, ch_type='eeg') is False # tests for other (and "all") channels should still pass assert _check_ch_locs(info=info) assert _check_ch_locs(info=info, ch_type='mag')
#!/usr/bin/env python3 USAGE = ''' Usage: lgid [-v...] train --model=PATH [--vectors=DIR] CONFIG INFILE... lgid [-v...] test --model=PATH [--vectors=DIR] CONFIG INFILE... lgid [-v...] validate --model=PATH [--vectors=DIR] CONFIG INFILE... lgid [-v...] classify --model=PATH --out=PATH [--vectors=DIR] CONFIG INFILE... lgid [-v...] get-lg-recall CONFIG INFILE... lgid [-v...] list-model-weights --model=PATH CONFIG lgid [-v...] list-mentions CONFIG INFILE... lgid [-v...] count-mentions CONFIG INFILE... lgid [-v...] find-common-codes CONFIG INFILE... lgid [-v...] download-crubadan-data CONFIG lgid [-v...] build-odin-lm CONFIG Commands: train train a model from supervised data test test on new data using a saved model validate Perform n-fold cross validation on the data classify output predictions on new data using a saved model get-lg-recall find the language mention recall upper bound for a set of files list-model-weights show feature weights in a model and features not used list-mentions just print language mentions from input files count-mentions count the number of mentions of each language found in the input files find-common-codes build the text file at most-common-codes showing the most common code for each language download-crubadan-data fetch the Crubadan language model data from the web build-odin-lm produces language model files from ODIN data Arguments: CONFIG path to a config file INFILE a Freki-formatted text file Options: -h, --help print this usage and exit -v, --verbose increase logging verbosity --model PATH where to save/load a trained model --out PATH where to write freki files with added information --vectors DIR a directory to print feature vectors for inspection Examples: lgid -v train --model=model.gz config.ini 123.freki 456.freki lgid -v test --model=model.gz config.ini 789.freki lgid -v classify --model=model.gz config.ini 1000.freki lgid -v list-mentions config.ini 123.freki lgid -v download-crubadan-data config.ini ''' import time t0 = time.time() import os import errno from configparser import ConfigParser import logging import numpy as np import re import random random.seed(1) import codecs import docopt from freki.serialize import FrekiDoc from lgid.models import ( StringInstance, LogisticRegressionWrapper as Model, chi2 ) from lgid.util import ( read_language_table, encode_instance_id, decode_instance_id, read_crubadan_language_model, read_odin_language_model, spans, find_common_codes, read_language_mapping_table ) from lgid.analyzers import ( language_mentions, ) from lgid.features import ( gl_features, w_features, l_features, g_features, t_features, m_features, get_mention_by_lines ) def main(): args = docopt.docopt(USAGE) logging.basicConfig(level=50 - ((args['--verbose'] + 2) * 10)) config = ConfigParser() config.read(args['CONFIG']) modelpath = args['--model'] vector_dir = args['--vectors'] single_mention = config['parameters']['single-longest-mention'] == 'yes' if vector_dir != None: vector_dir = vector_dir.strip('/') infiles = args['INFILE'] if args['train']: train(infiles, modelpath, vector_dir, config, single_mention) elif args['classify']: output = args['--out'] predictions = classify(infiles, modelpath, config, vector_dir, single_mention) write_to_files(infiles, predictions, output) elif args['get-lg-recall']: calc_mention_recall(infiles, config, single_mention) elif args['test']: test(infiles, modelpath, vector_dir, config, single_mention) elif args['validate']: n_fold_validation(5, infiles, modelpath, vector_dir, config, single_mention) elif args['list-model-weights']: get_feature_weights(modelpath, config) elif args['list-mentions']: list_mentions(infiles, config, single_mention) elif args['count-mentions']: count_mentions(infiles, config, single_mention) elif args['find-common-codes']: find_common_codes(infiles, config) elif args['download-crubadan-data']: download_crubadan_data(config) elif args['build-odin-lm']: build_odin_lm(config) def calc_mention_recall(infiles, config, single_mention, instances=None): """ Calculate the upper bound for language mentions: the percentage of correct labels that are mentioned in the file :param infiles: a list of freki filepaths :param config: a config object :param single_mention: whether to use the longest mention or all mentions from each language mention span :return: none """ if not instances: instances = list(get_instances(infiles, config, None, single_mention)) lgtable = read_language_table(config['locations']['language-table']) caps = config['parameters'].get('mention-capitalization', 'default') lang_mapping_tables = read_language_mapping_table(config) positive = 0 length = 0 file_dict = {} for inst in instances: if inst.label: file_num = inst.id[0] lang = '-'.join(inst.id[-2:]) if file_num in file_dict: file_dict[file_num].append(lang) else: file_dict[file_num] = [lang] for file in infiles: doc = FrekiDoc.read(file) num = doc.get_line(1).block.doc_id if num in file_dict: mentions = list(language_mentions(doc, lgtable, lang_mapping_tables, caps, single_mention)) length += len(file_dict[num]) for label in file_dict[num]: n = label.split('-')[0] n = re.sub('_', ' ', n) c = label.split('-')[1] for mention in mentions: if n == mention.name and c == mention.code: positive += 1 break if length: recall = float(positive)/length else: recall = 1 print("Language mention recall: " + str(recall)) return recall mistake_counts = {} lang_accs = {} lm_sizes = {} file_accs = {} file_mentions = {} def n_fold_validation(n, infiles, modelpath, vector_dir, config, single_mention): instance_dict = {} accs_lang = [] accs_both = [] accs_code = [] recalls = [] groups = {} for file in infiles: choice = random.randint(1, n) if choice in groups: groups[choice].append(file) else: groups[choice] = [file] i = 1 for group in groups: logging.info("Cross validation group " + str(i) + '/' + str(n)) i += 1 training = [] testing = groups[group] for g2 in groups: if g2 != group: training.extend(groups[g2]) train_data = list(cached_get_instances(training, config, vector_dir, instance_dict, single_mention)) train(training, modelpath, vector_dir, config, single_mention, instances=train_data) test_inst = list(cached_get_instances(testing, config, vector_dir, instance_dict, single_mention)) recall = calc_mention_recall(testing, config, single_mention, instances=test_inst) acc_lang, acc_both, acc_code = test(testing, modelpath, vector_dir, config, single_mention, instances=test_inst) accs_lang.append(acc_lang) accs_both.append(acc_both) accs_code.append(acc_both) recalls.append(recall) print('Average and Std Dev of:') print('Language Only:\t' + str(np.average(accs_lang)) + '\t' + str(np.std(accs_lang))) print('Language and Code:\t' + str(np.average(accs_both)) + '\t' + str(np.std(accs_both))) print('Code Only:\t' + str(np.average(accs_code)) + '\t' + str(np.std(accs_code))) print('Language Mention Recall:\t' + str(np.average(recalls)) + '\t' + str(np.std(recalls))) x = [] y = [] for file in file_accs: file_acc = file_accs[file] if file in file_mentions: x.append(file_acc) y.append(file_mentions[file]) #pr.create_stats() #pr.print_stats('cumtime') #pr.disable() def get_time(t): m, s = divmod(time.time() - t, 60) h, m = divmod(m, 60) return "%d:%02d:%02d" % (h, m, s) def write_to_files(infiles, predictions, output): """ Modify freki files to include predicted language names and write to an output directory :param infiles: list of freki filepaths :param predictions: dictionary of instance-id to language name and code prediction :param output: filepath of the output directory :return: none """ os.makedirs(output, exist_ok=True) for file in infiles: doc = FrekiDoc.read(file) f_name = file.split('/')[-1] f_name = re.sub('.freki', '', f_name) for span in spans(doc): l_lines = [] for line in span: if 'L' in line.tag: l_lines.append(line) for l_line in l_lines: key = (str(f_name), l_line.span_id, l_line.lineno) pred = predictions[key].split('-') lang_name = pred[0].title() lang_code = pred[1] for line in span: if line.lineno >= l_line.lineno: line.attrs['lang_code'] = lang_code line.attrs['lang_name'] = lang_name doc.set_line(line.lineno, line) path = output + '/' + file.split('/')[-1] if not os.path.exists(os.path.dirname(path)): try: os.makedirs(os.path.dirname(path), exist_ok=True) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise codecs.open(path, 'w', encoding='utf8').write(str(doc)) def train(infiles, modelpath, vector_dir, config, single_mention, instances=None): """ Train a language-identification model from training data Args: infiles: iterable of Freki file paths modelpath: the path where the model will be written vector_dir: directory where feature vectors will be written config: model parameters """ if not instances: logging.info('Getting instances') instances = list(get_instances(infiles, config, vector_dir, single_mention)) model = Model() model.feat_selector = chi2 logging.info('Training') model.train(instances) logging.info('Saving model') model.save(modelpath) def find_best_and_normalize(instances, dists): """ Normalize probabilities of languages and return the highest Args: instances: a list of instances relevant to a single sample of text dists: a list of Distributions corresponding to the instances """ labels = [] probs = [] for i in range(len(instances)): lang = '-'.join(decode_instance_id(instances[i])[-2:]) labels.append(lang) assigned = bool(dists[i].best_class) prob = dists[i].best_prob if assigned: probs.append(prob) else: probs.append(-prob) probs = np.asarray(probs) probs = (probs - np.amin(probs)) / (np.amax(probs) - np.amin(probs)) highest = np.argmax(probs) return labels[highest] def classify(infiles, modelpath, config, vector_dir, single_mention, instances=None): """ Classify instances found in the given files Args: infiles: iterable of Freki file paths modelpath: the path where the model will be loaded config: model parameters instances: a list of instances passed by test() to streamline """ if not instances: instances = list(get_instances(infiles, config, vector_dir, single_mention)) inst_dict = {} prediction_dict = {} for inst in instances: num = tuple(decode_instance_id(inst)[:-2]) if num in inst_dict: inst_dict[num].append(inst) else: inst_dict[num] = [inst] model = Model() model = model.load(modelpath) for inst_id in inst_dict: results = model.test(inst_dict[inst_id]) top = find_best_and_normalize(inst_dict[inst_id], results) prediction_dict[inst_id] = top return prediction_dict def test(infiles, modelpath, vector_dir, config, single_mention, instances=None): """ Test a language-identification model Args: infiles: iterable of Freki file paths modelpath: the path where the model will be loaded vector_dir: directory where feature vectors will be written config: model parameters """ real_classes = {} if not instances: instances = list(get_instances(infiles, config, vector_dir, single_mention)) for inst in instances: if bool(inst.label): num = tuple(decode_instance_id(inst)[:-2]) real_classes[num] = '-'.join(decode_instance_id(inst)[-2:]) predicted_classes = classify(infiles, modelpath, config, vector_dir, instances) right = 0 right_dialect = 0 right_code = 0 file_counts = {} for key in real_classes: key2 = key[0] if key2 not in file_counts: file_counts[key2] = [0, 0] file_counts[key2][1] += 1 if key in predicted_classes: if real_classes[key].split('-')[1] == predicted_classes[key].split('-')[1]: right_code += 1 if real_classes[key].split('-')[0] == predicted_classes[key].split('-')[0]: right += 1 if real_classes[key] == predicted_classes[key]: right_dialect += 1 file_counts[key2][0] += 1 if real_classes[key] != predicted_classes[key]: print(key) if real_classes[key] in lang_accs: lang_accs[real_classes[key]][1] += 1 else: lang_accs[real_classes[key]] = [0, 1] mistake_key = (real_classes[key], predicted_classes[key]) if mistake_key in mistake_counts: mistake_counts[mistake_key] += 1 else: mistake_counts[mistake_key] = 1 else: if real_classes[key] in lang_accs: lang_accs[real_classes[key]][0] += 1 lang_accs[real_classes[key]][1] += 1 else: lang_accs[real_classes[key]] = [1, 1] for key2 in file_counts: file_acc = float(file_counts[key2][0]) / file_counts[key2][1] logging.info("Accuracy on file " + str(key2) + ':\t' + str(file_acc)) file_accs[key2] = file_acc mistakes = open(config['locations']['classify-error-file'], 'w') mistakes.write('(real, predicted)\tcount\n') for mistake_key in sorted(mistake_counts, key=lambda x: mistake_counts[x], reverse=True): mistakes.write(str(mistake_key) + '\t' + str(mistake_counts[mistake_key]) + '\n') print('Samples:\t' + str(len(real_classes))) acc_lang = right / len(real_classes) acc_both = right_dialect / len(real_classes) acc_code = right_code / len(real_classes) print('Accuracy on Language (Name only):\t' + str(acc_lang)) print('Accuracy on Dialects (Name + Code):\t' + str(acc_both)) print('Accuracy on Code Only:\t' + str(acc_code)) return acc_lang, acc_both, acc_code def get_feature_weights(modelpath, config): """ print config features not used and weights of individual features :param modelpath: path to model :param config: config object :return: none, prints to console """ model = Model() model = model.load(modelpath) print("Features not used:") lower_feats = [] for a_feat in model.feat_names(): lower_feats.append(a_feat.lower()) for feat in config['features']: if config['features'][feat] == 'yes': if str(feat) not in lower_feats: print('\t' + feat) print("Feature weights:") for i in range(len(model.feat_names())): print('\t' + model.feat_names()[i] + ": " + str(model.learner.coef_[0][i])) def list_mentions(infiles, config, single_mention): """ List all language mentions found in the given files Args: infiles: iterable of Freki file paths config: model parameters """ lgtable = read_language_table(config['locations']['language-table']) lang_mapping_tables = read_language_mapping_table(config) caps = config['parameters'].get('mention-capitalization', 'default') for infile in infiles: doc = FrekiDoc.read(infile) lgmentions = list(language_mentions(doc, lgtable, lang_mapping_tables, caps, single_mention)) for m in lgmentions: print('\t'.join(map(str, m))) def count_mentions(infiles, config, single_mention): """ List all languages mentioned found in the given files with the count of how many times each was mentioned Args: infiles: iterable of Freki file paths config: model parameters """ lgtable = read_language_table(config['locations']['language-table']) lang_mapping_tables = read_language_mapping_table(config) caps = config['parameters'].get('mention-capitalization', 'default') mentions = {} for infile in infiles: doc = FrekiDoc.read(infile) lgmentions = list(language_mentions(doc, lgtable, lang_mapping_tables, caps, single_mention)) for m in lgmentions: if m[4] in mentions: mentions[m[4]] += 1 else: mentions[m[4]] = 1 for m in mentions: mentions[m] = int(mentions[m] / len(lgtable[m])) ordered = sorted(mentions, key=lambda x: mentions[x], reverse=True) for m in ordered: print('{}: {}'.format(m, mentions[m])) def print_feature_vector(_id, feats, file): """ print the feature values of a given vector to a file :param _id: instance id :param feats: feature dictionary :param file: file to write to :return: none, writes to file """ file.write('{}: {}\n'.format(_id, ", ".join(feats))) def cached_get_instances(infiles, config, vector_dir, instance_dict, single_mention): locs = config['locations'] lgtable = {} if locs['language-table']: lgtable = read_language_table(locs['language-table']) common_table = {} if locs['most-common-codes']: common_table = read_language_table(locs['most-common-codes']) eng_words = open(locs['english-word-names'], 'r').read().split('\n') insts = [] index = 1 for file in infiles: logging.info("Instances from file " + str(index) + '/' + str(len(infiles))) index += 1 if file not in instance_dict: instance_dict[file] = list(get_instances([file], config, vector_dir, single_mention, lgtable, common_table, eng_words)) insts.extend(instance_dict[file]) return insts def get_instances(infiles, config, vector_dir, single_mention, lgtable=None, common_table=None, eng_words=None): vector_file = None """ Read Freki documents from *infiles* and return training instances Args: infiles: iterable of Freki file paths config: model parameters Yields: training/test instances from Freki documents """ if not lgtable: locs = config['locations'] lgtable = {} if locs['language-table']: lgtable = read_language_table(locs['language-table']) common_table = {} if locs['most-common-codes']: common_table = read_language_table(locs['most-common-codes']) if locs['english-word-names']: eng_words = open(locs['english-word-names'], 'r').read().split('\n') lang_mapping_tables = read_language_mapping_table(config) global t1 i = 1 for infile in infiles: logging.info('File ' + str(i) + '/' + str(len(infiles))) if vector_dir != None: os.makedirs(vector_dir, exist_ok=True) vector_file = open(vector_dir + '/' + os.path.basename(infile) + '.vector', 'w') i += 1 doc = FrekiDoc.read(infile) context = {} context['last-lineno'] = max(x.lineno for x in doc.lines()) caps = config['parameters'].get('mention-capitalization', 'default') lgmentions = list(language_mentions(doc, lgtable, lang_mapping_tables, caps, single_mention)) if not lgmentions: lgmentions = [] mention_dict = get_mention_by_lines(lgmentions) features_template = dict(((m.name, m.code), {}) for m in lgmentions) lang_names = set(m.name for m in lgmentions) file_mentions[doc.blocks[0].doc_id] = len(lang_names) name_code_pairs = list(features_template.keys()) word_clm = read_crubadan_language_model(name_code_pairs, config, 'word') char_clm = read_crubadan_language_model(name_code_pairs, config, 'character') word_olm = read_odin_language_model(name_code_pairs, config, 'word') char_olm = read_odin_language_model(name_code_pairs, config, 'character') morph_olm = read_odin_language_model(name_code_pairs, config, 'morpheme') lms = (word_clm, char_clm, word_olm, char_olm, morph_olm) for pair in name_code_pairs: name = pair[0].replace(' ', '_') + '-' + pair[1] if pair in word_olm and pair in char_olm: lm_sizes[name] = len(word_olm[pair]) + len(char_olm[pair]) for span in spans(doc): if not span: continue context['span-top'] = span[0].lineno context['span-bottom'] = span[-1].lineno features = dict(((m.name, m.code), {}) for m in lgmentions) l_lines = [] for line in span: context['line'] = line if 'L' in line.tag: lgname = line.attrs.get('lang_name', '???').lower() lgcode = line.attrs.get('lang_code', 'und') l_feats = dict(((m.name, m.code), {}) for m in lgmentions) l_features(l_feats, mention_dict, context, lms, config) t1 = time.time() l_lines.append((line, l_feats, lgname, lgcode)) # if L and some other tag co-occur, only record local feats if 'G' in line.tag: g_features(features, mention_dict, context, config) if 'T' in line.tag: t_features(features, mention_dict, context, config) if 'M' in line.tag: m_features(features, mention_dict, context, config) else: # if G or M occur without L, record globally if 'G' in line.tag: g_features(features, mention_dict, context, config) if 'T' in line.tag: t_features(features, mention_dict, context, config) if 'M' in line.tag: m_features(features, mention_dict, context, config) gl_features(features, mention_dict, context, config, common_table, eng_words, len(lang_names)) w_features(features, mention_dict, context, config, len(lang_names), len(list(doc.lines()))) for l_line, l_feats, lgname, lgcode in l_lines: goldpair = (lgname, lgcode) for pair, feats in l_feats.items(): # print(pair, goldpair, pair == goldpair) id_ = encode_instance_id( os.path.splitext(os.path.basename(infile))[0], l_line.span_id, l_line.lineno, pair[0].replace(' ', '_'), pair[1] ) label = True if pair == goldpair else False instfeats = dict(feats) instfeats.update(features[pair]) if vector_dir != None: print_feature_vector(id_, instfeats, vector_file) yield StringInstance(id_, label, instfeats) if vector_file: vector_file.close() def download_crubadan_data(config): """ Download and extract Crubadan language data """ from io import BytesIO from zipfile import ZipFile import csv import requests logging.info('Downloading Crubadan data') index = config['locations']['crubadan-index'] baseuri = config['locations']['crubadan-base-uri'] output_dir = config['locations']['crubadan-language-model'] os.makedirs(output_dir, exist_ok=True) table = open(index, 'r', encoding='utf8') reader = csv.reader(table) i = j = 0 header = next(reader) # discard header row sess = requests.Session() for row in reader: code = row[0] iso_code = row[8].strip() url = requests.compat.urljoin(baseuri, code + '.zip') combined_code = "{}_{}".format(iso_code, code) dest = os.path.join(output_dir, combined_code) logging.debug( 'Downloading Crubadan data for {} from {}'.format(combined_code, url) ) try: response = sess.get(url, timeout=30) except requests.exceptions.Timeout: logging.error( 'Request timed out while trying to download data for {} from {}. Skipping...' .format(combined_code, url) ) continue if response.status_code != requests.codes.ok: logging.error( 'Failed to download the data for {} from {}. Response code {}. Skipping...' .format(combined_code, url, response.status_code) ) continue file = ZipFile(BytesIO(response.content)) i += 1 # basic validation (won't cover every scenario!) if any(os.path.exists(os.path.join(dest, p)) for p in file.namelist()): logging.error( 'Unzipping the archive for {} will overwrite data! Skipping...' .format(combined_code) ) continue file.extractall(dest) j += 1 logging.debug('Successfully extracted data for {}'.format(combined_code)) logging.info('Successfully downloaded {} files from Crubadan'.format(i)) logging.info('Successfully extracted {} files from Crubadan'.format(j)) def build_odin_lm(config): """ Build the LMs from the odin data """ from lgid.buildlms import build_from_odin logging.info('Building ODIN LMs') indirec = config['locations']['odin-source'] outdirec = config['locations']['odin-language-model'] nc = 3 nw = 2 morph_split = config['parameters']['morpheme-delimiter'] build_from_odin(indirec, outdirec, nc, nw, morph_split=morph_split) logging.info('Successfully built ODIN LMs') if __name__ == '__main__': main()
"""Test sorting of entity objects. """ import pytest import icat import icat.config from conftest import getConfig @pytest.fixture(scope="module") def client(): client, _ = getConfig(needlogin=False) return client def test_sort_users(client): """Sort Users. This is the most simple sorting example: Users are sorted by the name attribute. """ u1 = client.new("user", id=728, name="u_a") u2 = client.new("user", id=949, name="u_c") u3 = client.new("user", id=429, name="u_b") users = [ u1, u2, u3 ] users.sort(key=icat.entity.Entity.__sortkey__) assert users == [ u1, u3, u2 ] def test_sort_datafile_only(client): """Sort Datafiles with no Datasets. Datafiles having no relation to a Dataset are sorted by the name attribute. """ df1 = client.new("datafile", id=937, name="df_b") df2 = client.new("datafile", id=391, name="df_d") df3 = client.new("datafile", id=819, name="df_e") df4 = client.new("datafile", id=805, name="df_b") df5 = client.new("datafile", id=579, name="df_d") df6 = client.new("datafile", id=652, name="df_a") df7 = client.new("datafile", id=694, name="df_c") datafiles = [ df1, df2, df3, df4, df5, df6, df7 ] datafiles.sort(key=icat.entity.Entity.__sortkey__) # Note: this relies on the fact that list.sort() is guaranteed to # be stable (in Python 2.3 and newer), e.g. that df1 will be # before df4 and df2 before df5 in the result. assert datafiles == [ df6, df1, df4, df7, df2, df5, df3 ] def test_sort_datafile_dataset(client): """Sort Datafiles with Datasets. Datafiles having a relation to a Dataset (not related to an investigation in turn) are sorted by (dataset.name, name). """ ds1 = client.new("dataset", id=592, name="ds_a") ds2 = client.new("dataset", id=341, name="ds_b") df1 = client.new("datafile", id=429, name="df_b", dataset=ds1) df2 = client.new("datafile", id=229, name="df_d", dataset=ds2) df3 = client.new("datafile", id=286, name="df_e", dataset=ds1) df4 = client.new("datafile", id=584, name="df_b", dataset=ds2) df5 = client.new("datafile", id=432, name="df_d", dataset=ds1) df6 = client.new("datafile", id=477, name="df_a", dataset=ds2) df7 = client.new("datafile", id=404, name="df_c", dataset=ds1) datafiles = [ df1, df2, df3, df4, df5, df6, df7 ] datafiles.sort(key=icat.entity.Entity.__sortkey__) assert datafiles == [ df1, df7, df5, df3, df6, df4, df2 ] # Now lets reverse the order of the datasets and try again. ds1.name = "ds_x" datafiles.sort(key=icat.entity.Entity.__sortkey__) assert datafiles == [ df6, df4, df2, df1, df7, df5, df3 ] def test_sort_datafile_mix(client): """Sort Datafiles with and without Datasets. A mixture of Datafiles, some with and some without a Dataset. This sorts the ones without Dataset first. There used to be a bug in __sortkey__() that triggered a TypeError in Python 3 in this case, fixed in 8f33ae1. """ ds1 = client.new("dataset", id=550, name="ds_a") ds2 = client.new("dataset", id=301, name="ds_b") df1 = client.new("datafile", id=978, name="df_b", dataset=ds1) df2 = client.new("datafile", id=736, name="df_d", dataset=ds2) df3 = client.new("datafile", id=969, name="df_e", dataset=ds1) df4 = client.new("datafile", id=127, name="df_b") df5 = client.new("datafile", id=702, name="df_d") df6 = client.new("datafile", id=631, name="df_a", dataset=ds2) df7 = client.new("datafile", id=765, name="df_c") datafiles = [ df1, df2, df3, df4, df5, df6, df7 ] datafiles.sort(key=icat.entity.Entity.__sortkey__) assert datafiles == [ df4, df7, df5, df1, df3, df6, df2 ] # Now lets reverse the order of the datasets and try again. ds1.name = "ds_x" datafiles.sort(key=icat.entity.Entity.__sortkey__) assert datafiles == [ df4, df7, df5, df6, df2, df1, df3 ] def test_sort_mixed_objects(client): """Sort some objects of various different entity types. Objects of different types are sorted by type and then according to the type specific order within a given type. """ u1 = client.new("user", id=79, name="a") u2 = client.new("user", id=711, name="b") u3 = client.new("user", id=554, name="c") inv1 = client.new("investigation", id=14, name="a") ds1 = client.new("dataset", id=982, name="a") ds2 = client.new("dataset", id=652, name="c") ds3 = client.new("dataset", id=150, name="b", investigation=inv1) df1 = client.new("datafile", id=809, name="b") df2 = client.new("datafile", id=161, name="c") df3 = client.new("datafile", id=634, name="d") df4 = client.new("datafile", id=98, name="b", dataset=ds1) df5 = client.new("datafile", id=935, name="e", dataset=ds1) df6 = client.new("datafile", id=226, name="a", dataset=ds2) df7 = client.new("datafile", id=988, name="d", dataset=ds2) objects = [ df3, u2, u3, ds2, ds1, inv1, df5, df4, df6, df7, ds3, df2, u1, df1 ] objects.sort(key=icat.entity.Entity.__sortkey__) assert objects == [ df1, df2, df3, df4, df5, df6, df7, ds1, ds2, ds3, inv1, u1, u2, u3 ] def test_sort_datacollection_datafile(client): """Sort DataCollections with Datafiles. DataCollection does not have any attributes or many to one relationships. The only criterion that could be used for sorting are one to many relationships. DataCollections are sorted by Datasets first and then by Datafiles. There used to be a bug in the code such that __sortkey__() was thoroughly broken in this case, fixed in 0df5832. """ # First test with only Datafiles. df1 = client.new("datafile", id=143, name="df_a") df2 = client.new("datafile", id=306, name="df_b") df3 = client.new("datafile", id=765, name="df_c") df4 = client.new("datafile", id=871, name="df_d") dcdf1 = client.new("dataCollectionDatafile", id=790, datafile=df1) dcdf2 = client.new("dataCollectionDatafile", id=895, datafile=df2) dcdf3 = client.new("dataCollectionDatafile", id=611, datafile=df3) dcdf4 = client.new("dataCollectionDatafile", id=28, datafile=df4) dc1 = client.new("dataCollection", id=658) dc1.dataCollectionDatafiles=[ dcdf3 ] dc2 = client.new("dataCollection", id=424) dc2.dataCollectionDatafiles=[ dcdf2, dcdf4 ] dc3 = client.new("dataCollection", id=172) dc3.dataCollectionDatafiles=[ dcdf1 ] dc4 = client.new("dataCollection", id=796) dc4.dataCollectionDatafiles=[ dcdf4 ] dc5 = client.new("dataCollection", id=797) dc5.dataCollectionDatafiles=[ dcdf2 ] dc6 = client.new("dataCollection", id=607) dc6.dataCollectionDatafiles=[] dc7 = client.new("dataCollection", id=485) dc7.dataCollectionDatafiles=[ dcdf2, dcdf3, dcdf4 ] dataCollections = [ dc1, dc2, dc3, dc4, dc5, dc6, dc7 ] dataCollections.sort(key=icat.entity.Entity.__sortkey__) assert dataCollections == [ dc6, dc3, dc5, dc7, dc2, dc1, dc4 ] # Now, add a few Datasets. ds1 = client.new("dataset", id=508, name="ds_a") ds2 = client.new("dataset", id=673, name="ds_b") dcds1 = client.new("dataCollectionDataset", id=184, dataset=ds1) dcds2 = client.new("dataCollectionDataset", id=361, dataset=ds2) dc4.dataCollectionDatasets=[ dcds1, dcds2 ] dc5.dataCollectionDatasets=[ dcds1 ] dc6.dataCollectionDatasets=[ dcds1, dcds2 ] dc7.dataCollectionDatasets=[ dcds1 ] dataCollections.sort(key=icat.entity.Entity.__sortkey__) assert dataCollections == [ dc3, dc2, dc1, dc5, dc7, dc6, dc4 ] def test_sort_datacollection_datafile_order_mrel(client): """Sort DataCollections with Datafiles. The order of the DataCollectionDatafiles in DataCollection should not matter for the sort key. This used to be broken, fixed in baac4b2. """ df1 = client.new("datafile", id=62, name="df_a") df2 = client.new("datafile", id=471, name="df_b") df3 = client.new("datafile", id=113, name="df_c") df4 = client.new("datafile", id=810, name="df_d") dcdf1 = client.new("dataCollectionDatafile", id=850, datafile=df1) dcdf2 = client.new("dataCollectionDatafile", id=741, datafile=df2) dcdf3 = client.new("dataCollectionDatafile", id=18, datafile=df3) dcdf4 = client.new("dataCollectionDatafile", id=888, datafile=df4) dc1 = client.new("dataCollection", id=861) dc1.dataCollectionDatafiles=[ dcdf3 ] dc2 = client.new("dataCollection", id=859) dc2.dataCollectionDatafiles=[ dcdf2, dcdf4 ] dc3 = client.new("dataCollection", id=402) dc3.dataCollectionDatafiles=[ dcdf1 ] dc4 = client.new("dataCollection", id=190) dc4.dataCollectionDatafiles=[ dcdf4 ] dc5 = client.new("dataCollection", id=687) dc5.dataCollectionDatafiles=[ dcdf2 ] dc6 = client.new("dataCollection", id=230) dc6.dataCollectionDatafiles=[] dc7 = client.new("dataCollection", id=701) dc7.dataCollectionDatafiles=[ dcdf3, dcdf4, dcdf2 ] dc8 = client.new("dataCollection", id=747) dc8.dataCollectionDatafiles=[ dcdf4, dcdf2, dcdf3 ] dc9 = client.new("dataCollection", id=501) dc9.dataCollectionDatafiles=[ dcdf2, dcdf4, dcdf3 ] dataCollections = [ dc1, dc2, dc3, dc4, dc5, dc6, dc7, dc8, dc9 ] # Note that dc7, dc8, and dc9 are all equal and sort before dc2. dataCollections.sort(key=icat.entity.Entity.__sortkey__) assert dataCollections == [ dc6, dc3, dc5, dc7, dc8, dc9, dc2, dc1, dc4 ] def test_datacollection_sortkey_max_recursion(client): """Entity.__sortkey__() may enter in an infinite recursion. Issue #14. """ df1 = client.new("datafile", name="df_a") dc1 = client.new("dataCollection") dcdf1 = client.new("dataCollectionDatafile", datafile=df1, dataCollection=dc1) dc1.dataCollectionDatafiles.append(dcdf1) df1.dataCollectionDatafiles.append(dcdf1) print(dc1.__sortkey__()) def test_sortattrs_dependencies(client): """Check that there are no circular dependencies for sort attributes. The cause for Bug #14 was that DataCollections were sorted by Datasets and Datafiles via DataCollectionDatafile and DataCollectionDataset respectively and that sorting of the latter was by DataCollection. The fix was to break this circular dependency. This test verifies that there are no further circular dependencies for sort attributes in the entity object classes. """ def checkSortDependency(cls, recursionList=()): """Helper function.""" if cls.BeanName in recursionList: raise RuntimeError("Circular sorting dependency detected: %s" % " -> ".join(recursionList)) rl = list(recursionList) rl.append(cls.BeanName) deplist = [] sortAttrs = cls.SortAttrs or cls.Constraint for a in sortAttrs: if a in cls.InstRel or a in cls.InstMRel: rname = cls.getAttrInfo(client, a).type deplist.append(rname) rcls = client.getEntityClass(rname) deplist.extend(checkSortDependency(rcls, rl)) return deplist for cls in client.typemap.values(): if cls.BeanName is None: continue deplist = checkSortDependency(cls) print("%s: %s" % (cls.BeanName, ", ".join(deplist)))
# Copyright (c) 2010-2014 Turbulenz Limited """ Controller class for deploying a game """ ################################################################################ # pylint:disable=W0212 import sys if "darwin" == sys.platform: # and 0 == sys.version.find("2.7.2"): # Monkey path socket.sendall to handle EAGAIN (Errno 35) on mac. # Ideally, httplib.send would handle EAGAIN, but it just calls # sendall. The code below this patches httplib, but relies on # accessing internal variables. OTOH, socket.sendall can be # implemented using only calls to public methods, so should be # safer to override. import socket import time def socket_socket_sendall(self, data): while len(data) > 0: try: bytes_sent = self.send(data) data = data[bytes_sent:] except socket.error, e: if str(e) == "[Errno 35] Resource temporarily unavailable": time.sleep(0.1) else: raise e socket._socketobject.sendall = socket_socket_sendall # Monkey patch httplib to handle EAGAIN socket errors on maxosx. # send() is the original function from httplib with # socket.sendall() replaced by self._dosendall(). _dosendall() calls # socket.send() handling Errno 35 by retrying. # import httplib # import array # def httplib_httpconnection__dosendall(self, data): # while len(data) > 0: # try: # bytes_sent = self.sock.send(data) # data = data[bytes_sent:] # except socket.error, e: # if str(e) == "[Errno 35] Resource temporarily unavailable": # time.sleep(0.1) # else: # raise e # def httplib_httpconnection_send(self, data): # """Send `data' to the server.""" # if self.sock is None: # if self.auto_open: # self.connect() # else: # raise httplib.NotConnected() # # if self.debuglevel > 0: # print "send:", repr(data) # blocksize = 8192 # if hasattr(data,'read') and not isinstance(data, array.array): # if self.debuglevel > 0: print "sendIng a read()able" # datablock = data.read(blocksize) # while datablock: # self._dosendall(datablock) # datablock = data.read(blocksize) # else: # self._dosendall(data) # httplib.HTTPConnection._dosendall = httplib_httpconnection__dosendall # httplib.HTTPConnection.send = httplib_httpconnection_send # pylint:enable=W0212 ################################################################################ from urllib3 import connection_from_url from urllib3.exceptions import HTTPError, SSLError from threading import Thread from logging import getLogger from simplejson import loads as json_loads from pylons import request, response, config from turbulenz_local.decorators import jsonify from turbulenz_local.controllers import BaseController from turbulenz_local.models.gamelist import get_game_by_slug, GameError from turbulenz_local.lib.deploy import Deployment LOG = getLogger(__name__) class DeployController(BaseController): """ Controller class for the 'deploy' branch of the URL tree. """ _deploying = {} base_url = config.get('deploy.base_url', None) hub_pool = None cookie_name = config.get('deploy.cookie_name', None) cache_dir = config.get('deploy.cache_dir', None) @classmethod def _create_deploy_info(cls, game, hub_project, hub_version, hub_versiontitle, hub_cookie): deploy_info = Deployment(game, cls.hub_pool, hub_project, hub_version, hub_versiontitle, hub_cookie, cls.cache_dir) thread = Thread(target=deploy_info.deploy, args=[]) thread.daemon = True thread.start() deploy_key = hub_project + hub_version cls._deploying[deploy_key] = deploy_info @classmethod def _get_projects_for_upload(cls, hub_headers, username, rememberme=False): try: r = cls.hub_pool.request('POST', '/dynamic/upload/projects', headers=hub_headers, redirect=False) except (HTTPError, SSLError) as e: LOG.error(e) response.status_int = 500 return {'ok': False, 'msg': str(e)} if r.status != 200: if r.status == 503: response.status_int = 503 # pylint: disable=E1103 return {'ok': False, 'msg': json_loads(r.data).get('msg', 'Service currently unavailable.')} # pylint: enable=E1103 response.status_int = 500 return {'ok': False, 'msg': 'Wrong Hub answer.'} response.headers['Cache-Control'] = 'no-store, no-cache, max-age=0' # pylint: disable=E1103 return { 'ok': True, 'cookie': hub_headers.get('Cookie') if rememberme else None, 'user': username, 'projects': json_loads(r.data).get('projects', []) } # pylint: enable=E1103 # pylint: disable=R0911 @classmethod @jsonify def login(cls): """ Start deploying the game. """ response.headers['Cache-Control'] = 'no-store, no-cache, max-age=0' hub_pool = connection_from_url(cls.base_url, maxsize=8, timeout=8.0) if not hub_pool or not cls.cookie_name: response.status_int = 500 return {'ok': False, 'msg': 'Wrong deployment configuration.'} cls.hub_pool = hub_pool form = request.params try: login_name = form['login'] credentials = { 'login': login_name, 'password': form['password'], 'source': '/local' } except KeyError: response.status_int = 400 return {'ok': False, 'msg': 'Missing user login information.'} try: r = hub_pool.request('POST', '/dynamic/login', fields=credentials, retries=1, redirect=False) except (HTTPError, SSLError) as e: LOG.error(e) response.status_int = 500 return {'ok': False, 'msg': str(e)} if r.status != 200: response.status_int = 400 return {'ok': False, 'msg': 'Wrong user login information.'} cookie = r.headers.get('set-cookie', None) login_info = json_loads(r.data) # pylint: disable=E1103 if not cookie or cls.cookie_name not in cookie or login_info.get('source') != credentials['source']: response.status_int = 400 return {'ok': False, 'msg': 'Wrong user login information.'} # pylint: enable=E1103 hub_headers = {'Cookie': cookie} return cls._get_projects_for_upload(hub_headers, login_name, form.get('rememberme')) # pylint: enable=R0911 # pylint: disable=R0911 @classmethod @jsonify def try_login(cls): """ Try to login automatically and return deployable projects. """ response.headers['Cache-Control'] = 'no-store, no-cache, max-age=0' hub_pool = connection_from_url(cls.base_url, maxsize=8, timeout=8.0) if not hub_pool or not cls.cookie_name: response.status_int = 500 return {'ok': False, 'msg': 'Wrong deployment configuration.'} cls.hub_pool = hub_pool try: hub_headers = {'Cookie': request.params['cookie']} r = hub_pool.request('POST', '/dynamic/user', headers=hub_headers, retries=1, redirect=False) # pylint: disable=E1103 username = json_loads(r.data).get('username') # pylint: enable=E1103 status = r.status except (HTTPError, SSLError) as e: LOG.error(e) response.status_int = 500 return {'ok': False, 'msg': str(e)} except KeyError: status = 400 if status != 200: response.status_int = 401 return {'ok': False, 'msg': 'Wrong user login information.'} return cls._get_projects_for_upload(hub_headers, username, True) # pylint: enable=R0911 @classmethod @jsonify def start(cls): """ Start deploying the game. """ response.headers['Cache-Control'] = 'no-store, no-cache, max-age=0' hub_pool = cls.hub_pool if not hub_pool or not cls.cookie_name: response.status_int = 500 return {'ok': False, 'msg': 'Wrong deployment configuration.'} form = request.params try: cookie_value = form[cls.cookie_name] game = form['local'] hub_project = form['project'] hub_version = form['version'] hub_versiontitle = form.get('versiontitle', '') except KeyError: response.status_int = 400 return {'ok': False, 'msg': 'Wrong project information.'} game = get_game_by_slug(game) if not game or not game.path.is_set() or not game.path.is_correct(): response.status_int = 400 return {'ok': False, 'msg': 'Wrong game to upload.'} hub_cookie = '%s=%s' % (cls.cookie_name, cookie_value) cls._create_deploy_info(game, hub_project, hub_version, hub_versiontitle, hub_cookie) return { 'ok': True, 'data': 'local=%s&project=%s&version=%s' % (game.slug, hub_project, hub_version) } @classmethod @jsonify def progress(cls): response.headers['Cache-Control'] = 'no-store, no-cache, max-age=0' form = request.params try: hub_project = form['project'] hub_version = form['version'] except KeyError: response.status_int = 400 return {'ok': False, 'msg': 'Wrong project information.'} deploy_key = hub_project + hub_version deploy_info = cls._deploying.get(deploy_key, None) if not deploy_info: response.status_int = 404 return {'ok': False, 'msg': 'Unknown deploy session.'} if deploy_info.error: LOG.error(deploy_info.error) response.status_int = 400 return {'ok': False, 'msg': deploy_info.error} num_files = deploy_info.num_files if deploy_info.done: if not num_files: return { 'ok': True, 'data': { 'total_files': 1, 'num_files': 1, 'num_bytes': 1, 'uploaded_files': 1, 'uploaded_bytes': 1 } } return { 'ok': True, 'data': { 'total_files': deploy_info.total_files, 'num_files': deploy_info.num_files, 'num_bytes': deploy_info.num_bytes, 'uploaded_files': deploy_info.uploaded_files, 'uploaded_bytes': deploy_info.uploaded_bytes } } # pylint: disable=R0911 @classmethod @jsonify def postupload_progress(cls): response.headers['Cache-Control'] = 'no-store, no-cache, max-age=0' form = request.params try: hub_project = form['project'] hub_version = form['version'] except KeyError: response.status_int = 400 return {'ok': False, 'msg': 'Wrong project information.'} deploy_key = hub_project + hub_version deploy_info = cls._deploying.get(deploy_key, None) if not deploy_info: response.status_int = 404 return {'ok': False, 'msg': 'Unknown deploy session.'} if deploy_info.error: LOG.error(deploy_info.error) response.status_int = 400 return {'ok': False, 'msg': deploy_info.error} if not deploy_info.done: return { 'ok': True, 'data': { 'total': 1, 'processed': 0 } } if not deploy_info.hub_session: response.status_int = 404 return {'ok': False, 'msg': 'No deploy session found.'} try: r = cls.hub_pool.request('POST', '/dynamic/upload/progress/%s' % deploy_info.hub_session, headers={'Cookie': deploy_info.hub_cookie}, redirect=False) except (HTTPError, SSLError) as e: LOG.error(e) response.status_int = 500 return {'ok': False, 'msg': 'Post-upload progress check failed.'} if r.status != 200: response.status_int = 500 return {'ok': False, 'msg': 'Wrong Hub answer.'} r_data = json_loads(r.data) # pylint: disable=E1103 progress = int(r_data.get('progress', -1)) upload_info = str(r_data.get('info', '')) failed = r_data.get('failed', False) # pylint: enable=E1103 if failed: response.status_int = 500 return {'ok': False, 'msg': 'Post-upload processing failed: %s' % upload_info} if -1 == progress: response.status_int = 500 return {'ok': False, 'msg': 'Invalid post-upload progress.'} if 100 <= progress: del cls._deploying[deploy_key] try: cls.hub_pool.request('POST', '/dynamic/logout', headers={'Cookie': deploy_info.hub_cookie}, redirect=False) except (HTTPError, SSLError) as e: LOG.error(e) try: game = form['local'] except KeyError: response.status_int = 400 return {'ok': False, 'msg': 'Wrong request.'} game = get_game_by_slug(game) if game: game.set_deployed() return { 'ok': True, 'data': { 'total': 100, 'processed': progress, 'msg': upload_info } } # pylint: enable=R0911 @classmethod @jsonify def cancel(cls): response.headers['Cache-Control'] = 'no-store, no-cache, max-age=0' form = request.params try: hub_project = form['project'] hub_version = form['version'] except KeyError: response.status_int = 400 return {'ok': False, 'msg': 'Missing deploy information.'} deploy_key = hub_project + hub_version deploy_info = cls._deploying.get(deploy_key, None) if not deploy_info: response.status_int = 404 return {'ok': False, 'msg': 'Unknown deploy session.'} deploy_info.cancel() del cls._deploying[deploy_key] try: cls.hub_pool.request('POST', '/dynamic/logout', headers={'Cookie': deploy_info.hub_cookie}, redirect=False) except (HTTPError, SSLError) as e: LOG.error(e) return {'ok':True, 'data':''} @classmethod @jsonify def check(cls, slug): # get game game = get_game_by_slug(slug) if game is None: response.status_int = 404 return {'ok': False, 'msg': 'No game with that slug.'} try: game.load() except GameError: response.status_int = 405 return {'ok': False, 'msg': 'Can\'t deploy a temporary game.'} # check if game is deployable complete, issues = game.check_completeness() if not complete: response.status_int = 400 return {'ok': False, 'msg': issues} issues, critical = game.validate_yaml() if not issues: return {'ok': True, 'msg': ''} elif critical: response.status_int = 400 return {'ok': False, 'msg': issues}
""" :codeauthor: Rupesh Tare <rupesht@saltstack.com> """ import logging import os import shutil import sys import textwrap import pytest import salt.modules.mount as mount import salt.utils.files import salt.utils.path from salt.exceptions import CommandExecutionError from tests.support.mock import MagicMock, mock_open, patch log = logging.getLogger(__name__) @pytest.fixture def mock_shell_file(): return "A B C D F G\n" @pytest.fixture def config_initial_file(): inital_fsystem = [ "/:\n", "\tdev\t\t= /dev/hd4\n", "\tvfs\t\t= jfs2\n", "\tlog\t\t= /dev/hd8\n", "\tmount \t\t= automatic\n", "\tcheck\t\t= false\n", "\ttype\t\t= bootfs\n", "\tvol\t\t= root\n", "\tfree\t\t= true\n", "\n", "/home:\n", "\tdev\t\t= /dev/hd1\n", "\tvfs\t\t= jfs2\n", "\tlog\t\t= /dev/hd8\n", "\tmount\t\t= true\n", "\tcheck\t\t= true\n", "\tvol\t\t= /home\n", "\tfree\t\t= false\n", "\n", ] return inital_fsystem @pytest.fixture def configure_loader_modules(): return {mount: {}} @pytest.fixture def tmp_sub_dir(tmp_path): directory = tmp_path / "filesystems-dir" directory.mkdir() yield directory shutil.rmtree(str(directory)) @pytest.fixture def config_file(tmp_sub_dir, config_initial_file): filename = str(tmp_sub_dir / "filesystems") with salt.utils.files.fopen(filename, "wb") as fp: fp.writelines(salt.utils.data.encode(config_initial_file)) yield filename os.remove(filename) def test_active(): """ List the active mounts. """ with patch.dict(mount.__grains__, {"os": "FreeBSD", "kernel": "FreeBSD"}): # uid=user1 tests the improbable case where a OS returns a name # instead of a numeric id, for #25293 mock = MagicMock(return_value="A B C D,E,F,uid=user1,gid=grp1") mock_user = MagicMock(return_value={"uid": "100"}) mock_group = MagicMock(return_value={"gid": "100"}) with patch.dict( mount.__salt__, { "cmd.run_stdout": mock, "user.info": mock_user, "group.info": mock_group, }, ): assert mount.active() == { "B": { "device": "A", "opts": ["D", "E", "F", "uid=100", "gid=100"], "fstype": "C", } } with patch.dict(mount.__grains__, {"os": "Solaris", "kernel": "SunOS"}): mock = MagicMock(return_value="A * B * C D/E/F") with patch.dict(mount.__salt__, {"cmd.run_stdout": mock}): assert mount.active() == { "B": {"device": "A", "opts": ["D", "E", "F"], "fstype": "C"} } with patch.dict(mount.__grains__, {"os": "AIX", "kernel": "AIX"}): mock = MagicMock(return_value="A * B * C D/E/F") with patch.dict(mount.__salt__, {"cmd.run_stdout": mock}): assert mount.active() == {"B": {"node": "A", "device": "*", "fstype": "*"}} with patch.dict(mount.__grains__, {"os": "OpenBSD", "kernel": "OpenBSD"}): mock = MagicMock(return_value={}) with patch.object(mount, "_active_mounts_openbsd", mock): assert mount.active() == {} with patch.dict(mount.__grains__, {"os": "MacOS", "kernel": "Darwin"}): mock = MagicMock(return_value={}) with patch.object(mount, "_active_mounts_darwin", mock): assert mount.active() == {} with patch.dict(mount.__grains__, {"os": "MacOS", "kernel": "Darwin"}): mock = MagicMock(return_value={}) with patch.object(mount, "_active_mountinfo", mock): with patch.object(mount, "_active_mounts_darwin", mock): assert mount.active(extended=True) == {} with patch.dict(mount.__grains__, {"os": "AIX", "kernel": "AIX"}): mock = MagicMock(return_value={}) with patch.object(mount, "_active_mounts_aix", mock): assert mount.active() == {} def test_fstab(): """ List the content of the fstab """ mock = MagicMock(return_value=False) with patch.object(os.path, "isfile", mock): assert mount.fstab() == {} file_data = "\n".join(["#", "A B C D,E,F G H"]) mock = MagicMock(return_value=True) with patch.dict(mount.__grains__, {"kernel": ""}), patch.object( os.path, "isfile", mock ), patch("salt.utils.files.fopen", mock_open(read_data=file_data)): fstab = mount.fstab() assert fstab == { "B": { "device": "A", "dump": "G", "fstype": "C", "opts": ["D", "E", "F"], "pass": "H", } }, fstab def test_vfstab(): """ List the content of the vfstab """ mock = MagicMock(return_value=False) with patch.object(os.path, "isfile", mock): assert mount.vfstab() == {} file_data = textwrap.dedent( """\ # swap - /tmp tmpfs - yes size=2048m """ ) mock = MagicMock(return_value=True) with patch.dict(mount.__grains__, {"kernel": "SunOS"}), patch.object( os.path, "isfile", mock ), patch("salt.utils.files.fopen", mock_open(read_data=file_data)): vfstab = mount.vfstab() assert vfstab == { "/tmp": { "device": "swap", "device_fsck": "-", "fstype": "tmpfs", "mount_at_boot": "yes", "opts": ["size=2048m"], "pass_fsck": "-", } }, vfstab def test_filesystems(): """ List the content of the filesystems """ file_data = textwrap.dedent( """\ # """ ) mock = MagicMock(return_value=True) with patch.dict(mount.__grains__, {"os": "AIX", "kernel": "AIX"}), patch.object( os.path, "isfile", mock ), patch("salt.utils.files.fopen", mock_open(read_data=file_data)): assert mount.filesystems() == {} file_data = textwrap.dedent( """\ # /home: dev = /dev/hd1 vfs = jfs2 log = /dev/hd8 mount = true check = true vol = /home free = false quota = no """ ) mock = MagicMock(return_value=True) with patch.dict(mount.__grains__, {"os": "AIX", "kernel": "AIX"}), patch.object( os.path, "isfile", mock ), patch("salt.utils.files.fopen", mock_open(read_data=file_data)): fsyst = mount.filesystems() test_fsyst = { "/home": { "dev": "/dev/hd1", "vfs": "jfs2", "log": "/dev/hd8", "mount": "true", "check": "true", "vol": "/home", "free": "false", "quota": "no", } } assert test_fsyst == fsyst def test_rm_fstab(): """ Remove the mount point from the fstab """ mock_fstab = MagicMock(return_value={}) with patch.dict(mount.__grains__, {"kernel": ""}): with patch.object(mount, "fstab", mock_fstab): with patch("salt.utils.files.fopen", mock_open()): assert mount.rm_fstab("name", "device") def test_set_fstab(mock_shell_file): """ Tests to verify that this mount is represented in the fstab, change the mount to match the data passed, or add the mount if it is not present. """ mock = MagicMock(return_value=False) with patch.object(os.path, "isfile", mock): pytest.raises(CommandExecutionError, mount.set_fstab, "A", "B", "C") mock = MagicMock(return_value=True) mock_read = MagicMock(side_effect=OSError) with patch.object(os.path, "isfile", mock): with patch.object(salt.utils.files, "fopen", mock_read): pytest.raises(CommandExecutionError, mount.set_fstab, "A", "B", "C") mock = MagicMock(return_value=True) with patch.object(os.path, "isfile", mock): with patch("salt.utils.files.fopen", mock_open(read_data=mock_shell_file)): assert mount.set_fstab("A", "B", "C") == "new" mock = MagicMock(return_value=True) with patch.object(os.path, "isfile", mock): with patch("salt.utils.files.fopen", mock_open(read_data=mock_shell_file)): assert mount.set_fstab("B", "A", "C", "D", "F", "G") == "present" mock = MagicMock(return_value=True) with patch.object(os.path, "isfile", mock): with patch("salt.utils.files.fopen", mock_open(read_data=mock_shell_file)): assert mount.set_fstab("B", "A", "C", not_change=True) == "present" def test_rm_automaster(): """ Remove the mount point from the auto_master """ mock = MagicMock(return_value={}) with patch.object(mount, "automaster", mock): assert mount.rm_automaster("name", "device") mock = MagicMock(return_value={"name": "name"}) with patch.object(mount, "fstab", mock): assert mount.rm_automaster("name", "device") def test_set_automaster(mock_shell_file): """ Verify that this mount is represented in the auto_salt, change the mount to match the data passed, or add the mount if it is not present. """ mock = MagicMock(return_value=True) with patch.object(os.path, "isfile", mock): pytest.raises(CommandExecutionError, mount.set_automaster, "A", "B", "C") mock = MagicMock(return_value=True) mock_read = MagicMock(side_effect=OSError) with patch.object(os.path, "isfile", mock): with patch.object(salt.utils.files, "fopen", mock_read): pytest.raises(CommandExecutionError, mount.set_automaster, "A", "B", "C") mock = MagicMock(return_value=True) with patch.object(os.path, "isfile", mock): with patch("salt.utils.files.fopen", mock_open(read_data=mock_shell_file)): assert mount.set_automaster("A", "B", "C") == "new" mock = MagicMock(return_value=True) with patch.object(os.path, "isfile", mock): with patch( "salt.utils.files.fopen", mock_open(read_data="/..A -fstype=C,D C:B") ): assert mount.set_automaster("A", "B", "C", "D") == "present" mock = MagicMock(return_value=True) with patch.object(os.path, "isfile", mock): with patch( "salt.utils.files.fopen", mock_open(read_data="/..A -fstype=XX C:B") ): assert ( mount.set_automaster("A", "B", "C", "D", not_change=True) == "present" ) def test_automaster(): """ Test the list the contents of the fstab """ assert mount.automaster() == {} def test_rm_filesystems(): """ Remove the mount point from the filesystems """ file_data = textwrap.dedent( """\ # """ ) mock = MagicMock(return_value=True) with patch.dict(mount.__grains__, {"os": "AIX", "kernel": "AIX"}), patch.object( os.path, "isfile", mock ), patch("salt.utils.files.fopen", mock_open(read_data=file_data)): assert not mount.rm_filesystems("name", "device") file_data = textwrap.dedent( """\ # /name: dev = device vol = /name """ ) mock = MagicMock(return_value=True) mock_fsyst = MagicMock(return_value=True) with patch.dict(mount.__grains__, {"os": "AIX", "kernel": "AIX"}), patch.object( os.path, "isfile", mock ), patch("salt.utils.files.fopen", mock_open(read_data=file_data)): assert mount.rm_filesystems("/name", "device") def test_set_filesystems(): """ Tests to verify that this mount is represented in the filesystems, change the mount to match the data passed, or add the mount if it is not present. """ mock = MagicMock(return_value=False) with patch.dict(mount.__grains__, {"os": "AIX", "kernel": "AIX"}): with patch.object(os.path, "isfile", mock): pytest.raises(CommandExecutionError, mount.set_filesystems, "A", "B", "C") mock_read = MagicMock(side_effect=OSError) with patch.object(os.path, "isfile", mock): with patch.object(salt.utils.files, "fopen", mock_read): pytest.raises( CommandExecutionError, mount.set_filesystems, "A", "B", "C" ) @pytest.mark.skipif( sys.version_info[0] == 3 and sys.version_info[1] <= 5, reason="run on Python 3.6 or greater where OrderedDict is default", ) @pytest.mark.skip_on_windows( reason="Not supported on Windows, does not handle tabs well" ) def test_set_filesystems_with_data(tmp_sub_dir, config_file): """ Tests to verify set_filesystems reads and adjusts file /etc/filesystems correctly """ # Note AIX uses tabs in filesystems files, hence disable warings and errors for tabs and spaces # pylint: disable=W8191 # pylint: disable=E8101 config_filepath = str(tmp_sub_dir / "filesystems") with patch.dict(mount.__grains__, {"os": "AIX", "kernel": "AIX"}): mount.set_filesystems( "/test_mount", "/dev/hd3", "jsf2", "-", "true", config_filepath ) with salt.utils.files.fopen(config_filepath, "r") as fp: fsys_content = fp.read() test_fsyst = """/: dev = /dev/hd4 vfs = jfs2 log = /dev/hd8 mount = automatic check = false type = bootfs vol = root free = true /home: dev = /dev/hd1 vfs = jfs2 log = /dev/hd8 mount = true check = true vol = /home free = false /test_mount: dev = /dev/hd3 vfstype = jsf2 opts = - mount = true """ assert test_fsyst == fsys_content def test_mount(): """ Mount a device """ with patch.dict(mount.__grains__, {"os": "MacOS"}): mock = MagicMock(return_value=True) with patch.object(os.path, "exists", mock): mock = MagicMock(return_value=None) with patch.dict(mount.__salt__, {"file.mkdir": None}): mock = MagicMock(return_value={"retcode": True, "stderr": True}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.mount("name", "device") mock.assert_called_with( "mount device name ", python_shell=False, runas=None ) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.mount("name", "device", fstype="fstype") mock.assert_called_with( "mount -t fstype device name ", python_shell=False, runas=None, ) mock = MagicMock(return_value={"retcode": False, "stderr": False}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.mount("name", "device") with patch.dict(mount.__grains__, {"os": "AIX"}): mock = MagicMock(return_value=True) with patch.object(os.path, "exists", mock): mock = MagicMock(return_value=None) with patch.dict(mount.__salt__, {"file.mkdir": None}): mock = MagicMock(return_value={"retcode": True, "stderr": True}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.mount("name", "device") mock.assert_called_with( "mount device name ", python_shell=False, runas=None ) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.mount("name", "device", fstype="fstype") mock.assert_called_with( "mount -v fstype device name ", python_shell=False, runas=None, ) mock = MagicMock(return_value={"retcode": False, "stderr": False}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.mount("name", "device") with patch.dict(mount.__grains__, {"os": "Linux"}): mock = MagicMock(return_value=True) with patch.object(os.path, "exists", mock): mock = MagicMock(return_value=None) with patch.dict(mount.__salt__, {"file.mkdir": None}): mock = MagicMock(return_value={"retcode": True, "stderr": True}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.mount("name", "device") mock.assert_called_with( "mount -o defaults device name ", python_shell=False, runas=None, ) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.mount("name", "device", fstype="fstype") mock.assert_called_with( "mount -o defaults -t fstype device name ", python_shell=False, runas=None, ) mock = MagicMock(return_value={"retcode": False, "stderr": False}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.mount("name", "device") def test_remount_non_mounted(): """ Attempt to remount a device, if the device is not already mounted, mount is called """ with patch.dict(mount.__grains__, {"os": "MacOS"}): mock = MagicMock(return_value=[]) with patch.object(mount, "active", mock): mock = MagicMock(return_value=True) with patch.object(mount, "mount", mock): assert mount.remount("name", "device") with patch.dict(mount.__grains__, {"os": "AIX"}): mock = MagicMock(return_value=[]) with patch.object(mount, "active", mock): mock = MagicMock(return_value=True) with patch.object(mount, "mount", mock): assert mount.remount("name", "device") with patch.dict(mount.__grains__, {"os": "Linux"}): mock = MagicMock(return_value=[]) with patch.object(mount, "active", mock): mock = MagicMock(return_value=True) with patch.object(mount, "mount", mock): assert mount.remount("name", "device") def test_remount_already_mounted_no_fstype(): """ Attempt to remount a device already mounted that do not provides fstype """ with patch.dict(mount.__grains__, {"os": "MacOS"}): mock = MagicMock(return_value=["name"]) with patch.object(mount, "active", mock): mock = MagicMock(return_value={"retcode": 0}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.remount("name", "device") mock.assert_called_with( "mount -u -o noowners device name ", python_shell=False, runas=None, ) with patch.dict(mount.__grains__, {"os": "AIX"}): mock = MagicMock(return_value=["name"]) with patch.object(mount, "active", mock): mock = MagicMock(return_value={"retcode": 0}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.remount("name", "device") mock.assert_called_with( "mount -o remount device name ", python_shell=False, runas=None ) with patch.dict(mount.__grains__, {"os": "Linux"}): mock = MagicMock(return_value=["name"]) with patch.object(mount, "active", mock): mock = MagicMock(return_value={"retcode": 0}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.remount("name", "device") mock.assert_called_with( "mount -o defaults,remount device name ", python_shell=False, runas=None, ) def test_remount_already_mounted_with_fstype(): """ Attempt to remount a device already mounted that do not provides fstype """ with patch.dict(mount.__grains__, {"os": "MacOS"}): mock = MagicMock(return_value=["name"]) with patch.object(mount, "active", mock): mock = MagicMock(return_value={"retcode": 0}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.remount("name", "device", fstype="type") mock.assert_called_with( "mount -u -o noowners -t type device name ", python_shell=False, runas=None, ) with patch.dict(mount.__grains__, {"os": "AIX"}): mock = MagicMock(return_value=["name"]) with patch.object(mount, "active", mock): mock = MagicMock(return_value={"retcode": 0}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.remount("name", "device", fstype="type") mock.assert_called_with( "mount -o remount -v type device name ", python_shell=False, runas=None, ) with patch.dict(mount.__grains__, {"os": "Linux"}): mock = MagicMock(return_value=["name"]) with patch.object(mount, "active", mock): mock = MagicMock(return_value={"retcode": 0}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.remount("name", "device", fstype="type") mock.assert_called_with( "mount -o defaults,remount -t type device name ", python_shell=False, runas=None, ) def test_umount(): """ Attempt to unmount a device by specifying the directory it is mounted on """ mock = MagicMock(return_value={}) with patch.object(mount, "active", mock): assert mount.umount("name") == "name does not have anything mounted" mock = MagicMock(return_value={"name": "name"}) with patch.object(mount, "active", mock): mock = MagicMock(return_value={"retcode": True, "stderr": True}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.umount("name") mock = MagicMock(return_value={"retcode": False}) with patch.dict(mount.__salt__, {"cmd.run_all": mock}): assert mount.umount("name") # Test unmounting with guestfs util mock = MagicMock() with patch.dict(mount.__salt__, {"guestfs.umount": mock}): mount.umount("/mountpoint", device="/path/to/my.qcow", util="guestfs") mock.assert_called_once_with("/mountpoint", disk="/path/to/my.qcow") def test_is_fuse_exec(): """ Returns true if the command passed is a fuse mountable application """ with patch.object(salt.utils.path, "which", return_value=None): assert not mount.is_fuse_exec("cmd") def _ldd_side_effect(cmd, *args, **kwargs): """ Neither of these are full ldd output, but what is_fuse_exec is looking for is 'libfuse' in the ldd output, so these examples should be sufficient enough to test both the True and False cases. """ return { "ldd cmd1": textwrap.dedent( """\ linux-vdso.so.1 (0x00007ffeaf5fb000) libfuse3.so.3 => /usr/lib/libfuse3.so.3 (0x00007f91e66ac000) """ ), "ldd cmd2": textwrap.dedent( """\ linux-vdso.so.1 (0x00007ffeaf5fb000) """ ), }[cmd] which_mock = MagicMock(side_effect=lambda x: x) ldd_mock = MagicMock(side_effect=_ldd_side_effect) with patch.object(salt.utils.path, "which", which_mock): with patch.dict(mount.__salt__, {"cmd.run": _ldd_side_effect}): assert mount.is_fuse_exec("cmd1") assert not mount.is_fuse_exec("cmd2") def test_swaps(): """ Return a dict containing information on active swap """ file_data = textwrap.dedent( """\ Filename Type Size Used Priority /dev/sda1 partition 31249404 4100 -1 """ ) with patch.dict(mount.__grains__, {"os": "", "kernel": ""}): with patch("salt.utils.files.fopen", mock_open(read_data=file_data)): swaps = mount.swaps() assert swaps == { "/dev/sda1": { "priority": "-1", "size": "31249404", "type": "partition", "used": "4100", } }, swaps file_data = textwrap.dedent( """\ Device Size Used Unknown Unknown Priority /dev/sda1 31249404 4100 unknown unknown -1 """ ) mock = MagicMock(return_value=file_data) with patch.dict( mount.__grains__, {"os": "OpenBSD", "kernel": "OpenBSD"} ), patch.dict(mount.__salt__, {"cmd.run_stdout": mock}): swaps = mount.swaps() assert swaps == { "/dev/sda1": { "priority": "-1", "size": "31249404", "type": "partition", "used": "4100", } }, swaps file_data = textwrap.dedent( """\ device maj,min total free /dev/hd6 10, 2 11776MB 11765MB """ ) mock = MagicMock(return_value=file_data) with patch.dict(mount.__grains__, {"os": "AIX", "kernel": "AIX"}), patch.dict( mount.__salt__, {"cmd.run_stdout": mock} ): swaps = mount.swaps() assert swaps == { "/dev/hd6": { "priority": "-", "size": 12058624, "type": "device", "used": 11264, } }, swaps def test_swapon(): """ Activate a swap disk """ mock = MagicMock(return_value={"name": "name"}) with patch.dict(mount.__grains__, {"kernel": ""}): with patch.object(mount, "swaps", mock): assert mount.swapon("name") == {"stats": "name", "new": False} mock = MagicMock(return_value={}) with patch.dict(mount.__grains__, {"kernel": ""}): with patch.object(mount, "swaps", mock): mock = MagicMock(return_value=None) with patch.dict(mount.__salt__, {"cmd.run": mock}): assert mount.swapon("name", False) == {} mock = MagicMock(side_effect=[{}, {"name": "name"}]) with patch.dict(mount.__grains__, {"kernel": ""}): with patch.object(mount, "swaps", mock): mock = MagicMock(return_value=None) with patch.dict(mount.__salt__, {"cmd.run": mock}): assert mount.swapon("name") == {"stats": "name", "new": True} ## effects of AIX mock = MagicMock(return_value={"name": "name"}) with patch.dict(mount.__grains__, {"kernel": "AIX"}): with patch.object(mount, "swaps", mock): assert mount.swapon("name") == {"stats": "name", "new": False} mock = MagicMock(return_value={}) with patch.dict(mount.__grains__, {"kernel": "AIX"}): with patch.object(mount, "swaps", mock): mock = MagicMock(return_value=None) with patch.dict(mount.__salt__, {"cmd.run": mock}): assert mount.swapon("name", False) == {} mock = MagicMock(side_effect=[{}, {"name": "name"}]) with patch.dict(mount.__grains__, {"kernel": "AIX"}): with patch.object(mount, "swaps", mock): mock = MagicMock(return_value=None) with patch.dict(mount.__salt__, {"cmd.run": mock}): assert mount.swapon("name") == {"stats": "name", "new": True} def test_swapoff(): """ Deactivate a named swap mount """ mock = MagicMock(return_value={}) with patch.dict(mount.__grains__, {"kernel": ""}): with patch.object(mount, "swaps", mock): assert mount.swapoff("name") is None mock = MagicMock(return_value={"name": "name"}) with patch.dict(mount.__grains__, {"kernel": ""}): with patch.object(mount, "swaps", mock): with patch.dict(mount.__grains__, {"os": "test"}): mock = MagicMock(return_value=None) with patch.dict(mount.__salt__, {"cmd.run": mock}): assert not mount.swapoff("name") mock = MagicMock(side_effect=[{"name": "name"}, {}]) with patch.dict(mount.__grains__, {"kernel": ""}): with patch.object(mount, "swaps", mock): with patch.dict(mount.__grains__, {"os": "test"}): mock = MagicMock(return_value=None) with patch.dict(mount.__salt__, {"cmd.run": mock}): assert mount.swapoff("name") # check on AIX mock = MagicMock(return_value={}) with patch.dict(mount.__grains__, {"kernel": "AIX"}): with patch.object(mount, "swaps", mock): assert mount.swapoff("name") is None mock = MagicMock(return_value={"name": "name"}) with patch.dict(mount.__grains__, {"kernel": "AIX"}): with patch.object(mount, "swaps", mock): with patch.dict(mount.__grains__, {"os": "test"}): mock = MagicMock(return_value=None) with patch.dict(mount.__salt__, {"cmd.run": mock}): assert not mount.swapoff("name") mock = MagicMock(side_effect=[{"name": "name"}, {}]) with patch.dict(mount.__grains__, {"kernel": "AIX"}): with patch.object(mount, "swaps", mock): with patch.dict(mount.__grains__, {"os": "test"}): mock = MagicMock(return_value=None) with patch.dict(mount.__salt__, {"cmd.run": mock}): assert mount.swapoff("name") def test_is_mounted(): """ Provide information if the path is mounted """ mock = MagicMock(return_value={}) with patch.object(mount, "active", mock), patch.dict( mount.__grains__, {"kernel": ""} ): assert not mount.is_mounted("name") mock = MagicMock(return_value={"name": "name"}) with patch.object(mount, "active", mock), patch.dict( mount.__grains__, {"kernel": ""} ): assert mount.is_mounted("name")
#!/usr/bin/python3 from curwmysqladapter import MySQLAdapter, Station import sys, traceback, csv, json, datetime, getopt, glob, os, copy import numpy as np from LIBFLO2DWATERLEVELGRID import getGridBoudary from LIBFLO2DWATERLEVELGRID import getCellGrid def usage() : usageText = """ Usage: ./STORE_MYSQL.py [-d YYYY-MM-DD] [-h] -h --help Show usage -d --date Date in YYYY-MM-DD. Default is current date. -t --time Time which need to run the forecast in HH:MM:SS format. -f --force Force insert timeseries. If timeseries exists, delete existing data and replace with new data. -r --rainfall Store rainfall specifically. Ignore others if not mentioned. -e --discharge Store discharge(emission) specifically. Ignore others if not mentioned. -w --waterlevel Store waterlevel specifically. Ignore others if not mentioned. -g --waterlevelgrid Store waterlevel grid specifically. Ignore others if not mentioned. --flo2d-stations Store FLO2D model stations --wl-out-suffix Suffix for 'water_level-<SUFFIX>' output directories. Default is 'water_level-<YYYY-MM-DD>' same as -d option value. --rainfall-path Directory path which contains the Rainfall timeseries. --discharge-path Directory path which contains the Discharge timeseries. --waterlevel-path Directory path which contains the WaterLevel timeseries directories. E.g: '<waterlevel-path>/water_level-2017-05-27'. --waterlevelgrid-path Directory path which contains the WaterLevel timeseries directories. E.g: '<waterlevelgrid-path>/water_level_grid-2017-05-27'. -n New Line character -> None, '', '\\n', '\\r', and '\\r\\n'. Default is '\\n'. """ print(usageText) try : # print('Config :: ', CONFIG) INIT_DIR = os.getcwd() ROOT_DIR = os.path.dirname(os.path.realpath(__file__)) os.chdir(ROOT_DIR) CONFIG = json.loads(open(os.path.join(ROOT_DIR, 'CONFIG.json')).read()) NEW_LINE = '\n' DISCHARGE_NUM_METADATA_LINES = 2 DISCHARGE_CSV_FILE = 'DailyDischarge.csv' RAIN_CSV_FILE = 'DailyRain.csv' WATER_LEVEL_DIR_NAME = 'water_level' WATER_LEVEL_GRID_DIR_NAME = 'water_level_grid' CADPTS_DAT_FILE = './META_FLO2D/CADPTS.DAT' OUTPUT_DIR = './OUTPUT' RF_DIR_PATH = '/mnt/disks/wrf-mod/OUTPUT/' DIS_OUTPUT_DIR = OUTPUT_DIR WL_OUTPUT_DIR = OUTPUT_DIR WL_GRID_OUTPUT_DIR = OUTPUT_DIR DIS_RESOLUTION = 24 # In 1 hours RF_RESOLUTION = 24 # In 1 hours WL_RESOLUTION = 24 * 4 # In 15 mins WL_GRID_RESOLUTION = 24 * 4 # In 15 mins WL_GRID_MISSING_VALUE = -9 MYSQL_HOST="localhost" MYSQL_USER="root" MYSQL_DB="curw" MYSQL_PASSWORD="" if 'DISCHARGE_CSV_FILE' in CONFIG : DISCHARGE_CSV_FILE = CONFIG['DISCHARGE_CSV_FILE'] if 'RAIN_CSV_FILE' in CONFIG : RAIN_CSV_FILE = CONFIG['RAIN_CSV_FILE'] if 'RF_DIR_PATH' in CONFIG : RF_DIR_PATH = CONFIG['RF_DIR_PATH'] if 'OUTPUT_DIR' in CONFIG : OUTPUT_DIR = CONFIG['OUTPUT_DIR'] DIS_OUTPUT_DIR = OUTPUT_DIR WL_OUTPUT_DIR = OUTPUT_DIR WL_GRID_OUTPUT_DIR = OUTPUT_DIR if 'MYSQL_HOST' in CONFIG : MYSQL_HOST = CONFIG['MYSQL_HOST'] if 'MYSQL_USER' in CONFIG : MYSQL_USER = CONFIG['MYSQL_USER'] if 'MYSQL_DB' in CONFIG : MYSQL_DB = CONFIG['MYSQL_DB'] if 'MYSQL_PASSWORD' in CONFIG : MYSQL_PASSWORD = CONFIG['MYSQL_PASSWORD'] date = '' time = '' forceInsert = False allInsert = True rainfallInsert = False dischargeInsert = False waterlevelInsert = False waterlevelGridInsert = False flo2dStationsInsert = False waterlevelOutSuffix = '' try: opts, args = getopt.getopt(sys.argv[1:], "hd:t:frewgn:", [ "help", "date=", "time=", "force", "rainfall", "discharge", "waterlevel", "waterlevelgrid", "flo2d-stations", "wl-out-suffix=", "rainfall-path=", "discharge-path=", "waterlevel-path=", "waterlevelgrid-path=" ]) except getopt.GetoptError: usage() sys.exit(2) for opt, arg in opts: if opt in ("-h", "--help"): usage() sys.exit() elif opt in ("-d", "--date"): date = arg elif opt in ("-t", "--time"): time = arg elif opt in ("-f", "--force"): forceInsert = True elif opt in ("-r", "--rainfall"): rainfallInsert = True elif opt in ("-e", "--discharge"): dischargeInsert = True elif opt in ("-w", "--waterlevel"): waterlevelInsert = True elif opt in ("-g", "--waterlevelgrid"): waterlevelGridInsert = True elif opt in ("--flo2d-stations"): flo2dStationsInsert = True elif opt in ("--wl-out-suffix"): waterlevelOutSuffix = arg elif opt in ("--rainfall-path"): RF_DIR_PATH = arg print('WARN: Using custom Rainfall Path :', RF_DIR_PATH) elif opt in ("--discharge-path"): DIS_OUTPUT_DIR = arg print('WARN: Using custom Discharge Path :', DIS_OUTPUT_DIR) elif opt in ("--waterlevel-path"): WL_OUTPUT_DIR = arg print('WARN: Using custom WaterLevel Path :', WL_OUTPUT_DIR) elif opt in ("--waterlevelgrid-path"): WL_GRID_OUTPUT_DIR = arg print('WARN: Using custom WaterLevel Grid Path :', WL_GRID_OUTPUT_DIR) elif opt in ("-n"): NEW_LINE = arg if rainfallInsert or dischargeInsert or waterlevelInsert or waterlevelGridInsert or flo2dStationsInsert : allInsert = False # Default run for current day now = datetime.datetime.now() if date : now = datetime.datetime.strptime(date, '%Y-%m-%d') date = now.strftime("%Y-%m-%d") if not waterlevelOutSuffix : waterlevelOutSuffix = date print('STORE_MYSQL startTime:', datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'on', ROOT_DIR) if forceInsert : print('WARNING: Force Insert enabled') except Exception as e : traceback.print_exc() def storeDischarge(adapter): stations = ['Hanwella'] types = [ 'Forecast-0-d', 'Forecast-1-d-after', 'Forecast-2-d-after', 'Forecast-3-d-after', 'Forecast-4-d-after', 'Forecast-5-d-after' ] metaData = { 'station': stations[0], 'variable': 'Discharge', 'unit': 'm3/s', 'type': types[0], 'source': 'HEC-HMS', 'name': 'Cloud Continuous', } fileName = DISCHARGE_CSV_FILE.rsplit('.', 1) fileName = "%s-%s.%s" % (fileName[0], date, fileName[1]) # DISCHARGE_CSV_FILE_PATH = "%s/%s" % (DIS_OUTPUT_DIR, fileName) DISCHARGE_CSV_FILE_PATH = os.path.join(DIS_OUTPUT_DIR, fileName) if not os.path.exists(DISCHARGE_CSV_FILE_PATH): print('Discharge > Unable to find file : ', DISCHARGE_CSV_FILE_PATH) return None print('Discharge > store %s on startTime: %s' % (DISCHARGE_CSV_FILE_PATH, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) csvReader = csv.reader(open(DISCHARGE_CSV_FILE_PATH, 'r'), delimiter=',', quotechar='|') timeseries = list(csvReader)[DISCHARGE_NUM_METADATA_LINES:] print('Start Date :', timeseries[0][0]) print('End Date :', timeseries[-1][0]) startDateTime = datetime.datetime.strptime(timeseries[0][0], '%Y:%m:%d %H:%M:%S') endDateTime = datetime.datetime.strptime(timeseries[-1][0], '%Y:%m:%d %H:%M:%S') dischargeMeta = copy.deepcopy(metaData) dischargeMeta['start_date'] = startDateTime.strftime("%Y-%m-%d %H:%M:%S") dischargeMeta['end_date'] = endDateTime.strftime("%Y-%m-%d %H:%M:%S") for i in range(0, 6) : dischargeMeta['type'] = types[i] eventId = adapter.get_event_id(dischargeMeta) if eventId is None : eventId = adapter.create_event_id(dischargeMeta) print('HASH SHA256 created: ', eventId) else : print('HASH SHA256 exists: ', eventId) if not forceInsert : print('Timeseries already exists. User --force to update the existing.\n') continue # for l in timeseries[:3] + timeseries[-2:] : # print(l) rowCount = adapter.insert_timeseries(eventId, timeseries[i*DIS_RESOLUTION:(i+1)*DIS_RESOLUTION], forceInsert) print('%s rows inserted.\n' % rowCount) def storeRainfall(adapter): stations = ['Attanagalla', 'Colombo', 'Daraniyagala', 'Glencourse', 'Hanwella', 'Holombuwa', 'Kitulgala', 'Norwood'] types = ['Forecast-0-d', 'Forecast-1-d-after', 'Forecast-2-d-after'] metaData = { 'station': stations[0], 'variable': 'Precipitation', 'unit': 'mm', 'type': types[0], 'source': 'WRF', 'name': 'Cloud-1', } for station in stations : for filename in glob.glob(os.path.join(RF_DIR_PATH, '%s-%s*.txt' % (station, date))): if not os.path.exists(filename): print('Discharge > Unable to find file : ', filename) break print('Rainfall > store %s on startTime: %s' % (filename, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) csvGuage = csv.reader(open(filename, 'r'), delimiter=' ', skipinitialspace=True) timeseries = list(csvGuage) print('Start Date :', timeseries[0][0]) print('End Date :', timeseries[-1][0]) startDateTime = datetime.datetime.strptime(timeseries[0][0], '%Y-%m-%d_%H:%M:%S') endDateTime = datetime.datetime.strptime(timeseries[-1][0], '%Y-%m-%d_%H:%M:%S') rainfallMeta = copy.deepcopy(metaData) rainfallMeta['station'] = station rainfallMeta['start_date'] = startDateTime.strftime("%Y-%m-%d %H:%M:%S") rainfallMeta['end_date'] = endDateTime.strftime("%Y-%m-%d %H:%M:%S") for i in range(0, 3) : rainfallMeta['type'] = types[i] eventId = adapter.get_event_id(rainfallMeta) if eventId is None : eventId = adapter.create_event_id(rainfallMeta) print('HASH SHA256 created: ', eventId) else : print('HASH SHA256 exists: ', eventId) if not forceInsert : print('Timeseries already exists. User --force to update the existing.\n') continue # for l in timeseries[:3] + timeseries[-2:] : # print(l) rowCount = adapter.insert_timeseries(eventId, timeseries[i*RF_RESOLUTION:(i+1)*RF_RESOLUTION], forceInsert) print('%s rows inserted.\n' % rowCount) def storeWaterlevel(adapter): print('\nStoring Waterlevels :::') stations = [ "N'Street-River", "N'Street-Canal", "Wellawatta", "Dematagoda-Canal", "Dehiwala", "Parliment Lake Bridge-Kotte Canal", "Parliment Lake-Out", "Madiwela-US", "Ambathale", "Madiwela-Out", "Salalihini-River", "Salalihini-Canal", "Kittampahuwa-River", "Kittampahuwa-Out", "Kolonnawa-Canal", "Heen Ela", "Torington", "Parliment Lake", ] types = [ 'Forecast-0-d', 'Forecast-1-d-after', 'Forecast-2-d-after', 'Forecast-3-d-after', 'Forecast-4-d-after', 'Forecast-5-d-after' ] metaData = { 'station': stations[0], 'variable': 'Waterlevel', 'unit': 'm', 'type': types[0], 'source': 'FLO2D', 'name': 'Cloud-1', } WATER_LEVEL_DIR_PATH = os.path.join(WL_OUTPUT_DIR, '%s-%s' % (WATER_LEVEL_DIR_NAME, waterlevelOutSuffix)) if not os.path.exists(WATER_LEVEL_DIR_PATH): print('Discharge > Unable to find dir : ', WATER_LEVEL_DIR_PATH) return for station in stations : for filename in glob.glob(os.path.join(WATER_LEVEL_DIR_PATH, '%s-%s-*.txt' % (WATER_LEVEL_DIR_NAME, station.replace(' ', '_')))): if not os.path.exists(filename): print('Discharge > Unable to find file : ', filename) break print('Waterlevel > store %s on startTime: %s' % (filename, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) csvReader = csv.reader(open(filename, 'r', newline=NEW_LINE), delimiter=',', quotechar='|') timeseries = list(csvReader) print('Start Date :', timeseries[0][0]) print('End Date :', timeseries[-1][0]) startDateTime = datetime.datetime.strptime(timeseries[0][0], '%Y-%m-%d %H:%M:%S') endDateTime = datetime.datetime.strptime(timeseries[-1][0], '%Y-%m-%d %H:%M:%S') waterlevelMeta = copy.deepcopy(metaData) waterlevelMeta['station'] = station waterlevelMeta['start_date'] = startDateTime.strftime("%Y-%m-%d %H:%M:%S") waterlevelMeta['end_date'] = endDateTime.strftime("%Y-%m-%d %H:%M:%S") for i in range(0, 6) : waterlevelMeta['type'] = types[i] eventId = adapter.get_event_id(waterlevelMeta) if eventId is None : eventId = adapter.create_event_id(waterlevelMeta) print('HASH SHA256 created: ', eventId) else : print('HASH SHA256 exists: ', eventId) waterlevelMetaQuery = copy.deepcopy(metaData) waterlevelMetaQuery['station'] = station waterlevelMetaQuery['type'] = types[i] dailyTimeseries = timeseries[i*WL_RESOLUTION:(i+1)*WL_RESOLUTION] dailyStartDateTime = datetime.datetime.strptime(dailyTimeseries[0][0], '%Y-%m-%d %H:%M:%S') dailyEndDateTime = datetime.datetime.strptime(dailyTimeseries[-1][0], '%Y-%m-%d %H:%M:%S') opts = { 'from': dailyStartDateTime.strftime("%Y-%m-%d %H:%M:%S"), 'to': dailyEndDateTime.strftime("%Y-%m-%d %H:%M:%S") } existingTimeseries = adapter.retrieve_timeseries(waterlevelMetaQuery, opts) if len(existingTimeseries[0]['timeseries']) > 0 and not forceInsert: print('Timeseries already exists. User --force to update the existing.\n') continue # for l in timeseries[:3] + timeseries[-2:] : # print(l) rowCount = adapter.insert_timeseries(eventId, timeseries[i*WL_RESOLUTION:(i+1)*WL_RESOLUTION], forceInsert) print('%s rows inserted.\n' % rowCount) def storeFLO2DStations(adapter): print('\nStoring FLO2D Stations :::') CADPTS_DAT_FILE_PATH = os.path.join(ROOT_DIR, CADPTS_DAT_FILE) bufsize = 65536 stationIDOffset = 1000 with open(CADPTS_DAT_FILE_PATH) as infile: num_stations = 0 while True: lines = infile.readlines(bufsize) if not lines: break for line in lines: s = line.split() if len(s) > 0 : cellId = int(s[0]) station = [Station.FLO2D, 'flo2d_%s' % (stationIDOffset + cellId), 'FLO2D %s' % cellId, s[1], s[2], 0, 'FLO2D Virtual Station'] print('FLO2D Cell:', cellId, 'with latitude: %s, longitude: %s -> inserted as `FLO2D %s`' % (s[1], s[2], cellId)) is_station_exists = adapter.get_station({'name': 'flo2d_%s' % (stationIDOffset + cellId)}) if is_station_exists is None: adapter.create_station(station) num_stations += 1 print('%s stations inserted.\n' % num_stations) def storeWaterlevelGrid(adapter): print('\nStoring Waterlevel Grid :::') CADPTS_DAT_FILE_PATH = os.path.join(ROOT_DIR, CADPTS_DAT_FILE) bufsize = 65536 stationIDOffset = 1000 CELLS = [] with open(CADPTS_DAT_FILE_PATH) as infile: while True: lines = infile.readlines(bufsize) if not lines: break for line in lines: s = line.split() if len(s) > 0 : cellId = int(s[0]) CELLS.append(cellId) types = [ 'Forecast-0-d', 'Forecast-1-d-after', 'Forecast-2-d-after', 'Forecast-3-d-after', 'Forecast-4-d-after', 'Forecast-5-d-after' ] metaData = { 'station': 'FLO2D %s' % CELLS[0], 'variable': 'Waterlevel', 'unit': 'm', 'type': types[0], 'source': 'FLO2D', 'name': 'Cloud-WL-Grid', } WATER_LEVEL_GRID_DIR_PATH = os.path.join(WL_GRID_OUTPUT_DIR, '%s-%s' % (WATER_LEVEL_GRID_DIR_NAME, waterlevelOutSuffix)) if not os.path.exists(WATER_LEVEL_GRID_DIR_PATH): print('Discharge > Unable to find dir : ', WATER_LEVEL_GRID_DIR_PATH) return boundary = getGridBoudary() CellGrid = getCellGrid(boundary) waterLevelGridSeriesDict = dict.fromkeys(CELLS, []) for fileName in sorted(glob.glob(os.path.join(WATER_LEVEL_GRID_DIR_PATH, '%s-*.asc' % (WATER_LEVEL_GRID_DIR_NAME)))) : if not os.path.exists(fileName): print('Discharge > Unable to find file : ', fileName) break # Extract time from fileName ascFileName = fileName.rsplit('/', 1)[-1] dateTimeStr = ascFileName[len(WATER_LEVEL_GRID_DIR_NAME)+1:-4] dateTime = datetime.datetime.strptime(dateTimeStr, '%Y-%m-%d_%H-%M-%S') ascii_grid = np.loadtxt(fileName, skiprows=6) for cellNo in CELLS : i, j = CellGrid[cellNo] tmpTS = waterLevelGridSeriesDict[cellNo][:] tmpTS.append([dateTime.strftime("%Y-%m-%d %H:%M:%S"), ascii_grid[j][i] ]) waterLevelGridSeriesDict[cellNo] = tmpTS print('Scanned Waterlevel Grid file :', ascFileName) for station in CELLS : timeseries = waterLevelGridSeriesDict[station] startDateTime = datetime.datetime.strptime(timeseries[0][0], '%Y-%m-%d %H:%M:%S') baseTime = datetime.datetime.strptime(date, '%Y-%m-%d') if(startDateTime > baseTime) : print('Adding base time into the top of timeseries') timeseries = [[baseTime.strftime("%Y-%m-%d %H:%M:%S"), WL_GRID_MISSING_VALUE]] + timeseries[:] print('Start Date :', timeseries[0][0]) print('End Date :', timeseries[-1][0]) startDateTime = datetime.datetime.strptime(timeseries[0][0], '%Y-%m-%d %H:%M:%S') endDateTime = datetime.datetime.strptime(timeseries[-1][0], '%Y-%m-%d %H:%M:%S') waterlevelGridMeta = copy.deepcopy(metaData) waterlevelGridMeta['station'] = 'FLO2D %s' % station waterlevelGridMeta['start_date'] = startDateTime.strftime("%Y-%m-%d %H:%M:%S") waterlevelGridMeta['end_date'] = endDateTime.strftime("%Y-%m-%d %H:%M:%S") for i in range(0, 6) : waterlevelGridMeta['type'] = types[i] eventId = adapter.get_event_id(waterlevelGridMeta) if eventId is None : eventId = adapter.create_event_id(waterlevelGridMeta) print('HASH SHA256 created: ', eventId) else : print('HASH SHA256 exists: ', eventId) waterlevelGridMetaQuery = copy.deepcopy(metaData) waterlevelGridMetaQuery['station'] = 'FLO2D %s' % station waterlevelGridMetaQuery['type'] = types[i] dailyTimeseries = timeseries[i*WL_GRID_RESOLUTION:(i+1)*WL_GRID_RESOLUTION] dailyStartDateTime = datetime.datetime.strptime(dailyTimeseries[0][0], '%Y-%m-%d %H:%M:%S') dailyEndDateTime = datetime.datetime.strptime(dailyTimeseries[-1][0], '%Y-%m-%d %H:%M:%S') opts = { 'from': dailyStartDateTime.strftime("%Y-%m-%d %H:%M:%S"), 'to': dailyEndDateTime.strftime("%Y-%m-%d %H:%M:%S") } existingTimeseries = adapter.retrieve_timeseries(waterlevelGridMetaQuery, opts) if len(existingTimeseries[0]['timeseries']) > 0 and not forceInsert: print('Timeseries already exists. User --force to update the existing.\n') continue # for l in timeseries[:3] + timeseries[-2:] : # print(l) rowCount = adapter.insert_timeseries(eventId, timeseries[i*WL_GRID_RESOLUTION:(i+1)*WL_GRID_RESOLUTION], forceInsert) print('%s rows inserted.\n' % rowCount) adapter = MySQLAdapter(host=MYSQL_HOST, user=MYSQL_USER, password=MYSQL_PASSWORD, db=MYSQL_DB) if rainfallInsert or allInsert : storeRainfall(adapter) if dischargeInsert or allInsert : storeDischarge(adapter) if waterlevelInsert or allInsert : storeWaterlevel(adapter) if waterlevelGridInsert or allInsert : storeWaterlevelGrid(adapter) if flo2dStationsInsert : storeFLO2DStations(adapter)
# Copyright 2014-2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import pytest from distutils.version import LooseVersion from f5.bigip.tm.gtm.server import Server from f5.bigip.tm.gtm.server import Virtual_Server from requests.exceptions import HTTPError from six import iteritems def delete_server(mgmt_root, name): try: foo = mgmt_root.tm.gtm.servers.server.load(name=name) except HTTPError as err: if err.response.status_code != 404: raise return foo.delete() def delete_dc(mgmt_root, name, partition): try: delete_server(mgmt_root, 'fake_serv1') foo = mgmt_root.tm.gtm.datacenters.datacenter.load( name=name, partition=partition ) except HTTPError as err: if err.response.status_code != 404: raise return foo.delete() def create_dc(request, mgmt_root, name, partition): def teardown(): delete_dc(mgmt_root, name, partition) # this line is to clean up any object that might have been left by # previous test delete_dc(mgmt_root, name, partition) dc = mgmt_root.tm.gtm.datacenters.datacenter.create( name=name, partition=partition) request.addfinalizer(teardown) return dc def setup_create_test(request, mgmt_root, name): def teardown(): delete_server(mgmt_root, name) request.addfinalizer(teardown) def setup_basic_test(request, mgmt_root, name, partition): def teardown(): delete_server(mgmt_root, name) # this line is to clean up any object that might have been left by # previous test delete_dc(mgmt_root, 'dc1', partition) dc = create_dc(request, mgmt_root, 'dc1', partition) serv1 = mgmt_root.tm.gtm.servers.server.create( name=name, datacenter=dc.name, addresses=[{'name': '1.1.1.1'}]) request.addfinalizer(teardown) return serv1 def delete_vs(mgmt_root, name): s1 = mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') try: foo = s1.virtual_servers_s.virtual_server.load( name=name) except HTTPError as err: if err.response.status_code != 404: raise return foo.delete() def setup_vs_basic_test(request, mgmt_root, name, destination): def teardown(): delete_vs(mgmt_root, name) s1 = setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') vs = s1.virtual_servers_s.virtual_server.create( name=name, destination=destination) request.addfinalizer(teardown) return vs def setup_create_vs_test(request, mgmt_root, name): def teardown(): delete_server(mgmt_root, name) request.addfinalizer(teardown) class TestCreate(object): def test_create_req_arg(self, request, mgmt_root): setup_create_test(request, mgmt_root, 'fake_serv1') dc = create_dc(request, mgmt_root, 'dc1', 'Common') serv1 = mgmt_root.tm.gtm.servers.server.create( name='fake_serv1', datacenter=dc.name, addresses=[{'name': '1.1.1.1'}]) if LooseVersion(pytest.config.getoption('--release')) >= \ LooseVersion('12.1.0'): link = 'https://localhost/mgmt/tm/gtm/server/~Common' \ '~fake_serv1' else: link = 'https://localhost/mgmt/tm/gtm/server/fake_serv1' assert serv1.name == 'fake_serv1' assert serv1.generation and isinstance(serv1.generation, int) assert serv1.kind == 'tm:gtm:server:serverstate' assert serv1.selfLink.startswith(link) def test_create_optional_args(self, request, mgmt_root): setup_create_test(request, mgmt_root, 'fake_serv1') dc = create_dc(request, mgmt_root, 'dc1', 'Common') serv1 = mgmt_root.tm.gtm.servers.server.create( name='fake_serv1', datacenter=dc.name, addresses=[{'name': '1.1.1.1'}], iqAllowPath='no', enabled=False, disabled=True) assert serv1.disabled is True assert not hasattr(serv1, 'enabled') assert serv1.iqAllowPath == 'no' def test_create_duplicate(self, request, mgmt_root): setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') with pytest.raises(HTTPError) as err: mgmt_root.tm.gtm.servers.server.create( name='fake_serv1', datacenter='dc1', addresses=[{'name': '1.1.1.1'}]) assert err.value.response.status_code == 409 class TestRefresh(object): def test_refresh(self, request, mgmt_root): setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') s1 = mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') s2 = mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') assert s1.iqAllowPath == 'yes' assert s2.iqAllowPath == 'yes' s2.update(iqAllowPath='no') assert s1.iqAllowPath == 'yes' assert s2.iqAllowPath == 'no' s1.refresh() assert s1.iqAllowPath == 'no' class TestLoad(object): def test_load_no_object(self, mgmt_root): with pytest.raises(HTTPError) as err: mgmt_root.tm.gtm.servers.server.load( name='fake_serv1') assert err.value.response.status_code == 404 @pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) == '11.5.4', reason='Needs > v11.5.4 TMOS to pass' ) def test_load(self, request, mgmt_root): setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') s1 = mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') assert s1.enabled is True s1.enabled = False s1.disabled = True s1.update() s2 = mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') assert not hasattr(s1, 'enabled') assert hasattr(s2, 'disabled') assert s2.disabled is True @pytest.mark.skipif( LooseVersion(pytest.config.getoption('--release')) >= LooseVersion('11.6.0'), reason='This test is for 11.5.4 or less.' ) def test_load_11_5_4_and_less(self, request, mgmt_root): setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') s1 = mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') assert s1.enabled is True s1.enabled = False s1.update() s2 = mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') assert hasattr(s2, 'enabled') assert s2.enabled is True class TestUpdateModify(object): def test_update(self, request, mgmt_root): s1 = setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') assert s1.iqAllowPath == 'yes' s1.update(iqAllowPath='no') assert s1.iqAllowPath == 'no' def test_modify(self, request, mgmt_root): s1 = setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') original_dict = copy.copy(s1.__dict__) iqpath = 'iqAllowPath' s1.modify(iqAllowPath='no') for k, v in iteritems(original_dict): if k != iqpath: original_dict[k] = s1.__dict__[k] elif k == iqpath: assert s1.__dict__[k] == 'no' class TestDelete(object): def test_delete(self, request, mgmt_root): s1 = setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') s1.delete() with pytest.raises(HTTPError) as err: mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') assert err.value.response.status_code == 404 class TestServerCollection(object): def test_server_collection(self, request, mgmt_root): s1 = setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') if LooseVersion(pytest.config.getoption('--release')) >= LooseVersion('12.1.0'): link = 'https://localhost/mgmt/tm/gtm/server/~Common~fake_serv1' else: link = 'https://localhost/mgmt/tm/gtm/server/fake_serv1' assert s1.name == 'fake_serv1' assert s1.generation and isinstance(s1.generation, int) assert s1.kind == 'tm:gtm:server:serverstate' assert s1.selfLink.startswith(link) sc = mgmt_root.tm.gtm.servers.get_collection() assert isinstance(sc, list) assert len(sc) assert isinstance(sc[0], Server) class TestVirtualServerSubCollection(object): def test_create_req_arg(self, request, mgmt_root): setup_create_vs_test(request, mgmt_root, 'vs1') s1 = setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') vs = s1.virtual_servers_s vs1 = vs.virtual_server.create(name='vs1', destination='5.5.5.5:80') if LooseVersion(pytest.config.getoption('--release')) >= LooseVersion('12.1.0'): link = 'https://localhost/mgmt/tm/gtm/server/~Common~fake_serv1' \ '/virtual-servers/vs' else: link = 'https://localhost/mgmt/tm/gtm/server/fake_serv1' \ '/virtual-servers/vs1' assert vs1.name == 'vs1' assert vs1.generation and isinstance(vs1.generation, int) assert vs1.kind == 'tm:gtm:server:virtual-servers:virtual-serversstate' assert vs1.selfLink.startswith(link) @pytest.mark.skipif(pytest.config.getoption('--release') < '12.0.0', reason='11.x was buggy. Only test 12.x') def test_create_req_arg_remote_like_name(self, request, mgmt_root): setup_create_vs_test(request, mgmt_root, 'fake_serv1') s1 = setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') vs = s1.virtual_servers_s vs1 = vs.virtual_server.create(name='/mouse/vs1', destination='5.5.5.5:80') link = 'https://localhost/mgmt/tm/gtm/server/~Common~fake_serv1/virtual-servers/~mouse~vs1' assert vs1.name == '/mouse/vs1' assert vs1.kind == 'tm:gtm:server:virtual-servers:virtual-serversstate' assert vs1.selfLink.startswith(link) vs1.update(destination='5.5.5.5:8000') vs1.delete() # Create so that pytest cleanup will succeed vs.virtual_server.create(name='/mouse/vs1', destination='5.5.5.5:80') def test_create_optional_args(self, request, mgmt_root): setup_create_vs_test(request, mgmt_root, 'vs1') s1 = setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') vs = s1.virtual_servers_s vs1 = vs.virtual_server.create(name='vs1', destination='5.5.5.5:80', description='FancyFakeVS', limitMaxBpsStatus='enabled', limitMaxBps=1337) assert vs1.name == 'vs1' assert vs1.description == 'FancyFakeVS' assert vs1.limitMaxBpsStatus == 'enabled' assert vs1.limitMaxBps == 1337 def test_create_duplicate(self, request, mgmt_root): setup_vs_basic_test(request, mgmt_root, 'vs1', '5.5.5.5:80') s1 = mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') try: s1.virtual_servers_s.virtual_server.create( name='vs1', destination='5.5.5.5:80') except HTTPError as err: assert err.response.status_code == 409 def test_refresh(self, request, mgmt_root): setup_vs_basic_test(request, mgmt_root, 'vs1', '5.5.5.5:80') s1 = mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') vs1 = s1.virtual_servers_s.virtual_server.load(name='vs1') vs2 = s1.virtual_servers_s.virtual_server.load(name='vs1') assert vs1.limitMaxBpsStatus == 'disabled' assert vs2.limitMaxBpsStatus == 'disabled' vs2.update(limitMaxBpsStatus='enabled') assert vs1.limitMaxBpsStatus == 'disabled' assert vs2.limitMaxBpsStatus == 'enabled' vs1.refresh() assert vs2.limitMaxBpsStatus == 'enabled' def test_load_no_object(self, request, mgmt_root): s1 = setup_basic_test(request, mgmt_root, 'fake_serv1', 'Common') try: s1.virtual_servers_s.virtual_server.load(name='vs1') except HTTPError as err: assert err.response.status_code == 404 def test_load(self, request, mgmt_root): setup_vs_basic_test(request, mgmt_root, 'vs1', '5.5.5.5:80') s1 = mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') vs1 = s1.virtual_servers_s.virtual_server.load(name='vs1') assert vs1.name == 'vs1' assert vs1.limitMaxBpsStatus == 'disabled' vs1.limitMaxBpsStatus = 'enabled' vs1.update() vs2 = s1.virtual_servers_s.virtual_server.load(name='vs1') assert vs2.name == 'vs1' assert vs2.limitMaxBpsStatus == 'enabled' def test_update(self, request, mgmt_root): vs1 = setup_vs_basic_test(request, mgmt_root, 'vs1', '5.5.5.5:80') assert vs1.limitMaxBpsStatus == 'disabled' vs1.update(limitMaxBpsStatus='enabled') assert vs1.limitMaxBpsStatus == 'enabled' def test_modify(self, request, mgmt_root): vs1 = setup_vs_basic_test(request, mgmt_root, 'vs1', '5.5.5.5:80') original_dict = copy.copy(vs1.__dict__) limit = 'limitMaxBpsStatus' vs1.modify(limitMaxBpsStatus='enabled') for k, v in iteritems(original_dict): if k != limit: original_dict[k] = vs1.__dict__[k] elif k == limit: assert vs1.__dict__[k] == 'enabled' @pytest.mark.skipif( pytest.config.getoption('--release') == '11.6.0', reason='Due to a bug in 11.6.0 Final this test fails' ) def test_delete(self, request, mgmt_root): vs1 = setup_vs_basic_test(request, mgmt_root, 'vs2', '5.5.5.5:80') vs1.delete() s1 = mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') try: s1.virtual_servers_s.virtual_server.load(name='vs2') except HTTPError as err: assert err.response.status_code == 404 def test_virtual_server_collection(self, request, mgmt_root): vs1 = setup_vs_basic_test(request, mgmt_root, 'vs1', '5.5.5.5:80') if LooseVersion(pytest.config.getoption('--release')) >= LooseVersion('12.1.0'): link = 'https://localhost/mgmt/tm/gtm/server/~Common~fake_serv1' \ '/virtual-servers/vs' else: link = 'https://localhost/mgmt/tm/gtm/server/fake_serv1/virtual' \ '-servers/vs1' assert vs1.name == 'vs1' assert vs1.generation and isinstance(vs1.generation, int) assert vs1.kind == 'tm:gtm:server:virtual-servers:virtual-serversstate' assert vs1.selfLink.startswith(link) s1 = mgmt_root.tm.gtm.servers.server.load(name='fake_serv1') vsc = s1.virtual_servers_s.get_collection() assert isinstance(vsc, list) assert len(vsc) assert isinstance(vsc[0], Virtual_Server)
import sys messages = [] # C++ typesMap = { "i8": "uint8_t", "i32": "uint32_t", "i64": "uint64_t", "bool": "bool", "ip": "IPAddress", "mac": "MACAddress", "string": "string", "match": "Match&", "match[]": "std::vector<Match>", "action": "Action&", "action[]": "std::vector<Action>", "option": "Option&", "option[]": "std::vector<Option>", } defaultValues = { "i8": "0", "i32": "0", "i64": "0", "bool": "false", "ip": "IPAddress(IPV4)", "mac": "MACAddress()", "string": "\"\"", "match[]": "std::vector<Match>()", "action[]": "std::vector<Action>()", "option[]": "std::vector<Option>()", } exportType = { # Cast prevents C++ stringstreams from interpreting uint8_t as char "i8": "to_string<uint16_t>({0})", "i32": "to_string<uint32_t>({0})", "i64": "to_string<uint64_t>({0})", "bool": "{0}", "ip": "{0}.toString()", "mac": "{0}.toString()", "string": "{0}", "match[]": "MatchList::to_BSON({0})", "action[]": "ActionList::to_BSON({0})", "option[]": "OptionList::to_BSON({0})", } importType = { "i8": "string_to<uint8_t>({0}.String())", "i32": "string_to<uint32_t>({0}.String())", "i64": "string_to<uint64_t>({0}.String())", "bool": "{0}.Bool()", "ip": "IPAddress(IPV4, {0}.String())", "mac": "MACAddress({0}.String())", "string": "{0}.String()", "match[]": "MatchList::to_vector({0}.Array())", "action[]": "ActionList::to_vector({0}.Array())", "option[]": "OptionList::to_vector({0}.Array())", } # Python pyTypesMap = { "match" : "Match", "action" : "Action", "option" : "Option", } pyDefaultValues = { "i8": "0", "i32": "0", "i64": "0", "bool": "False", "ip": "\"\"", "mac": "\"\"", "string": "\"\"", "match[]": "list()", "action[]": "list()", "option[]": "list()", } pyExportType = { "i8": "str({0})", "i32": "str({0})", "i64": "str({0})", "bool": "bool({0})", "ip": "str({0})", "mac": "str({0})", "string": "{0}", "match[]": "{0}", "action[]": "{0}", "option[]": "{0}", } pyImportType = { "i8": "int({0})", "i32": "int({0})", "i64": "int({0})", "bool": "bool({0})", "ip": "str({0})", "mac": "str({0})", "string": "str({0})", "match[]": "list({0})", "action[]": "list({0})", "option[]": "list({0})", } def convmsgtype(string): result = "" i = 0 for char in string: if char.isupper(): result += char i += 1 else: break if i > 1: result = result[:-1] i -= 1 for char in string[i:]: if char.isupper(): result += " " + char.lower() else: result += char.lower() return result.replace(" ", "_").upper() class CodeGenerator: def __init__(self): self.code = [] self.indentLevel = 0 def addLine(self, line): indent = self.indentLevel * " " self.code.append(indent + line) def increaseIndent(self): self.indentLevel += 1 def decreaseIndent(self): self.indentLevel -= 1 def blankLine(self): self.code.append("") def __str__(self): return "\n".join(self.code) def genH(messages, fname): g = CodeGenerator() g.addLine("#ifndef __" + fname.upper() + "_H__") g.addLine("#define __" + fname.upper() + "_H__") g.blankLine(); g.addLine("#include <stdint.h>") g.blankLine(); g.addLine("#include \"converter.h\"") g.addLine("#include \"ipc/IPC.h\"") g.addLine("#include \"types/IPAddress.h\"") g.addLine("#include \"types/MACAddress.h\"") g.addLine("#include \"types/Action.hh\"") g.addLine("#include \"types/Match.hh\"") g.addLine("#include \"types/Option.hh\"") g.blankLine(); enum = "enum {\n\t" enum += ",\n\t".join([convmsgtype(name) for name, msg in messages]) enum += "\n};" g.addLine(enum); g.blankLine(); for (name, msg) in messages: g.addLine("class " + name + " : public IPCMessage {") g.increaseIndent() g.addLine("public:") g.increaseIndent() # Default constructor g.addLine(name + "();") # Constructor with parameters g.addLine("{0}({1});".format(name, ", ".join([typesMap[t] + " " + f for t, f in msg]))) g.blankLine() for t, f in msg: g.addLine("{0} get_{1}();".format(typesMap[t], f)) g.addLine("void set_{0}({1} {2});".format(f, typesMap[t], f)) if t[-2:] == "[]": t2 = t[0:-2] g.addLine("void add_{0}(const {1} {0});".format(t2, typesMap[t2])) g.blankLine() g.addLine("virtual int get_type();") g.addLine("virtual void from_BSON(const char* data);") g.addLine("virtual const char* to_BSON();") g.addLine("virtual string str();") g.decreaseIndent(); g.blankLine() g.addLine("private:") g.increaseIndent() for t, f in msg: g.addLine("{0} {1};".format(typesMap[t], f)) g.decreaseIndent(); g.decreaseIndent(); g.addLine("};") g.blankLine(); g.addLine("#endif /* __" + fname.upper() + "_H__ */") return str(g) def genCPP(messages, fname): g = CodeGenerator() g.addLine("#include \"{0}.h\"".format(fname)) g.blankLine() g.addLine("#include <mongo/bson/bson.h>") g.blankLine() for name, msg in messages: g.addLine("{0}::{0}() {{".format(name)) g.increaseIndent(); for t, f in msg: g.addLine("set_{0}({1});".format(f, defaultValues[t])) g.decreaseIndent() g.addLine("}") g.blankLine(); g.addLine("{0}::{0}({1}) {{".format(name, ", ".join([typesMap[t] + " " + f for t, f in msg]))) g.increaseIndent(); for t, f in msg: g.addLine("set_{0}({1});".format(f, f)) g.decreaseIndent() g.addLine("}") g.blankLine(); g.addLine("int {0}::get_type() {{".format(name)) g.increaseIndent(); g.addLine("return {0};".format(convmsgtype(name))) g.decreaseIndent() g.addLine("}") g.blankLine(); for t, f in msg: g.addLine("{0} {1}::get_{2}() {{".format(typesMap[t], name, f)) g.increaseIndent(); g.addLine("return this->{0};".format(f)) g.decreaseIndent() g.addLine("}") g.blankLine(); g.addLine("void {0}::set_{1}({2} {3}) {{".format(name, f, typesMap[t], f)) g.increaseIndent(); g.addLine("this->{0} = {1};".format(f, f)) g.decreaseIndent() g.addLine("}") g.blankLine(); if t[-2:] == "[]": t2 = t[0:-2] g.addLine("void {0}::add_{1}(const {2} {1}) {{".format(name, t2, typesMap[t2])) g.increaseIndent() g.addLine("this->{0}.push_back({1});".format(f, t2)) g.decreaseIndent() g.addLine("}") g.blankLine(); g.addLine("void {0}::from_BSON(const char* data) {{".format(name)) g.increaseIndent(); g.addLine("mongo::BSONObj obj(data);") for t, f in msg: value = "obj[\"{0}\"]".format(f) g.addLine("set_{0}({1});".format(f, importType[t].format(value))) g.decreaseIndent() g.addLine("}") g.blankLine(); g.addLine("const char* {0}::to_BSON() {{".format(name)) g.increaseIndent(); g.addLine("mongo::BSONObjBuilder _b;") for t, f in msg: value = "get_{0}()".format(f) if t[-2:] == "[]": g.addLine("_b.appendArray(\"{0}\", {1});".format(f, exportType[t].format(value))) else: g.addLine("_b.append(\"{0}\", {1});".format(f, exportType[t].format(value))) g.addLine("mongo::BSONObj o = _b.obj();") g.addLine("char* data = new char[o.objsize()];") g.addLine("memcpy(data, o.objdata(), o.objsize());") g.addLine("return data;"); g.decreaseIndent() g.addLine("}") g.blankLine(); g.addLine("string {0}::str() {{".format(name)) g.increaseIndent(); g.addLine("stringstream ss;") g.addLine("ss << \"{0}\" << endl;".format(name)) for t, f in msg: value = "get_{0}()".format(f) g.addLine("ss << \" {0}: \" << {1} << endl;".format(f, exportType[t].format(value))) g.addLine("return ss.str();") g.decreaseIndent() g.addLine("}") g.blankLine(); return str(g) def genHFactory(messages, fname): g = CodeGenerator() g.addLine("#ifndef __{0}FACTORY_H__".format(fname.upper())) g.addLine("#define __{0}FACTORY_H__".format(fname.upper())) g.blankLine() g.addLine("#include \"IPC.h\"") g.addLine("#include \"{0}.h\"".format(fname)) g.blankLine() g.addLine("class {0}Factory : public IPCMessageFactory {1}".format(fname, "{")) g.increaseIndent() g.addLine("protected:") g.increaseIndent() g.addLine("IPCMessage* buildForType(int type);") g.decreaseIndent() g.decreaseIndent() g.addLine("};"); g.blankLine() g.addLine("#endif /* __{0}FACTORY_H__ */".format(fname.upper())) g.blankLine() return str(g) def genCPPFactory(messages, fname): g = CodeGenerator() g.addLine("#include \"{0}Factory.h\"".format(fname)) g.blankLine() g.addLine("IPCMessage* {0}Factory::buildForType(int type) {1}".format(fname, "{")) g.increaseIndent() g.addLine("switch (type) {0}".format("{")) g.increaseIndent() for name, msg in messages: g.addLine("case {0}:".format(convmsgtype(name))) g.increaseIndent() g.addLine("return new {0}();".format(name)) g.decreaseIndent() g.addLine("default:") g.increaseIndent() g.addLine("return NULL;") g.decreaseIndent() g.decreaseIndent() g.addLine("}") g.decreaseIndent() g.addLine("}") g.blankLine() return str(g) def genPy(messages, fname): g = CodeGenerator() g.addLine("import bson") g.blankLine() for tlv in ["Match","Action","Option"]: g.addLine("from rflib.types.{0} import {0}".format(tlv)) g.addLine("from IPC import IPCMessage") g.blankLine() g.addLine("format_id = lambda dp_id: hex(dp_id).rstrip('L')") g.blankLine() v = 0 for name, msg in messages: g.addLine("{0} = {1}".format(convmsgtype(name), v)) v += 1 g.blankLine() for name, msg in messages: g.addLine("class {0}(IPCMessage):".format(name)) g.increaseIndent() g.addLine("def __init__(self, {0}):".format(", ".join([f + "=None" for t, f in msg]))) g.increaseIndent() for t, f in msg: g.addLine("self.set_{0}({0})".format(f)) g.decreaseIndent() g.blankLine(); g.addLine("def get_type(self):") g.increaseIndent(); g.addLine("return {0}".format(convmsgtype(name))) g.decreaseIndent() g.blankLine(); for t, f in msg: g.addLine("def get_{0}(self):".format(f)) g.increaseIndent(); g.addLine("return self.{0}".format(f)) g.decreaseIndent() g.blankLine(); g.addLine("def set_{0}(self, {0}):".format(f)) g.increaseIndent(); g.addLine("{0} = {1} if {0} is None else {0}".format(f, pyDefaultValues[t])) g.addLine("try:") g.increaseIndent() g.addLine("self.{0} = {1}".format(f, pyImportType[t].format(f))) g.decreaseIndent() g.addLine("except:") g.increaseIndent() g.addLine("self.{0} = {1}".format(f, pyDefaultValues[t])) g.decreaseIndent() g.decreaseIndent() g.blankLine(); if t[-2:] == "[]": t2 = t[0:-2] g.addLine("def add_{0}(self, {0}):".format(t2)) g.increaseIndent() g.addLine("self.{0}.append({1}.to_dict())".format(f, t2)) g.decreaseIndent() g.blankLine(); g.addLine("def from_dict(self, data):") g.increaseIndent(); for t, f in msg: g.addLine("self.set_{0}(data[\"{0}\"])".format(f)) g.decreaseIndent() g.blankLine(); g.addLine("def to_dict(self):") g.increaseIndent(); g.addLine("data = {}") for t, f in msg: value = pyExportType[t].format("self.get_{0}()".format(f)) g.addLine("data[\"{0}\"] = {1}".format(f, value)) g.addLine("return data") g.decreaseIndent() g.blankLine(); g.addLine("def __str__(self):") g.increaseIndent(); g.addLine("s = \"{0}\\n\"".format(name)) for t, f in msg: value = "self.get_{0}()".format(f) if t[-2:] == "[]": g.addLine("s += \" {0}:\\n\"".format(f)) g.addLine("for {0} in {1}:".format(t[:-2], value)) g.increaseIndent() g.addLine("s += \" \" + str({0}.from_dict({1})) + \"\\n\"".format(pyTypesMap[t[:-2]], t[:-2])) g.decreaseIndent() elif t == "i64": g.addLine("s += \" {0}: \" + format_id({1}) + \"\\n\"".format(f, value)) else: g.addLine("s += \" {0}: \" + str({1}) + \"\\n\"".format(f, value)) g.addLine("return s") g.decreaseIndent() g.decreaseIndent() g.blankLine(); return str(g) def genPyFactory(messages, fname): g = CodeGenerator() g.addLine("import rflib.ipc.IPC as IPC") g.addLine("from rflib.ipc.{0} import *".format(fname)) g.blankLine() g.addLine("class {0}Factory(IPC.IPCMessageFactory):".format(fname)) g.increaseIndent() g.addLine("def build_for_type(self, type_):") g.increaseIndent() for name, msg in messages: g.addLine("if type_ == {0}:".format(convmsgtype(name))) g.increaseIndent() g.addLine("return {0}()".format(name)) g.decreaseIndent() g.decreaseIndent() g.decreaseIndent() g.blankLine() return str(g) # Text processing fname = sys.argv[1] f = open(fname, "r") currentMessage = None lines = [line.rstrip("\n") for line in f.readlines()] f.close() i = 0 for line in lines: parts = line.split() if len(parts) == 0: continue elif len(parts) == 1: currentMessage = parts[0] messages.append((currentMessage, [])) elif len(parts) == 2: if currentMessage is None: print "Error: message not declared" messages[-1][1].append((parts[0], parts[1])) else: print "Error: invalid line" f = open(fname + ".h", "w") f.write(genH(messages, fname)) f.close() f = open(fname + ".cc", "w") f.write(genCPP(messages, fname)) f.close() f = open(fname + "Factory.h", "w") f.write(genHFactory(messages, fname)) f.close() f = open(fname + "Factory.cc", "w") f.write(genCPPFactory(messages, fname)) f.close() f = open(fname + ".py", "w") f.write(genPy(messages, fname)) f.close() f = open(fname + "Factory.py", "w") f.write(genPyFactory(messages, fname)) f.close()
# -*- coding: utf-8 -*- # pylint: disable=C0103 """ Linear regression (ordinary least squares, ridge). """ # Author: bertrand-l # License: BSD from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np from .features import Standard from .mixin import LeastSquaresErrorMixin from .optimize.objective import BaseObjective from .optimize.regularize import (BaseRegularizer, LassoRegularizer, TikhonovRegularizer) from .optimize.optimize import GradientDescent from .predictor import BaseRegressionPredictor from .stats import confint, f_test, t_test_1samp from .util import assert_Xy, strfmt, strtab __all__ = ('LassoRegress', 'LinearRegress', 'RegularizedLinearRegress', 'RidgeRegress') def h_linear(X, theta): r""" Linear hypothesis :math:`y = \theta_0 + \sum_{i=i}^n theta_i x_i` Parameters ---------- X : array_like input features, shape=(n_samples, n_features). Returns ------- y : array Raises ------ ValueError """ X = np.asarray(X) theta = np.asarray(theta) if not theta.shape[0] == X.shape[1] + 1: raise ValueError("Mismatch between 'n_features' and 'n_parameters'.") return theta[0] + np.dot(X, theta[1:]) class LeastSquares(BaseObjective): """ Least-squares error (or cost) function for linear regression ``J(theta) = (1/2n) * sum_i (theta0 + theta^T x^(i) - y^(i))^2`` where y is assumed to be a scalar. Subclass of optimize.objective.BaseObjective. Parameters ---------- regularizer : instance of a BaseRegularizer subclass Regularizing penalty such as Tikhonov (ridge) or Lasso. """ def __init__(self, regularizer=None): if not (regularizer is None or isinstance(regularizer, BaseRegularizer)): raise TypeError("'regularizer' is not a BaseRegularizer object.") self.regularizer = regularizer def J(self, theta, X, y): """ Sum of squared residuals (error or cost function). Parameters ---------- theta : array_like parameters X : array_like input features, shape=(n_samples, n_features). y : array_like targets, shape=(n_samples,). Returns ------- J : float Raises ------ TypeError, ValueError """ X, y = assert_Xy(X, y) theta = np.asarray(theta) residuals = h_linear(X, theta) - y J = 0.5 * np.dot(residuals, residuals) / len(y) if self.regularizer is not None: J += self.regularizer.penalty(theta) return J def gradient_J(self, theta, X, y): """ Derivative of the error or cost function with respect to theta. Parameters ---------- X : array_like input features, shape=(n_samples, n_features). y : array_like targets, shape=(n_samples,). theta : array_like parameters Returns ------- gradJ : float Raises ------ TypeError, ValueError """ X, y = assert_Xy(X, y) theta = np.asarray(theta) residuals = h_linear(X, theta) - y gradJ = np.append(np.mean(residuals), np.dot(residuals, X) / len(y)) if self.regularizer is not None: gradJ += self.regularizer.gradient_penalty(theta) return gradJ def hessian_J(self, theta, X, y): """ Hessian of the objective function with respect to theta. Parameters ---------- X : array_like input features, shape=(n_samples, n_features). y : array_like targets, shape=(n_samples,). theta : array_like parameters Returns ------- hessian_J : array """ X, y = assert_Xy(X, y) # add a column of 1s for intercept variable X1 = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1) hessJ = np.dot(X1.T, X1) / len(y) if self.regularizer is not None: hessJ += self.regularizer.hessian_penalty(theta) return hessJ @staticmethod def loglik(y, y_hat): """ Log-likelihood. Parameters ---------- y : float or array_like observed target values y_hat : float or array_like predicted target values Returns ------- loglik : float """ n_samples = float(len(y)) residuals = y - y_hat sigma2 = np.dot(residuals, residuals) / n_samples return - 0.5 * n_samples * (1. + np.log(2 * np.pi * sigma2)) class LinearRegress(LeastSquaresErrorMixin, BaseRegressionPredictor): """ Linear regression using ordinary least squares. Parameters ---------- minimizer : Optimizer object, optional By default an optimize.GradientDescent instance. Raises ------ TypeError """ def __init__(self, minimizer=None): if minimizer is None: minimizer = GradientDescent() BaseRegressionPredictor.__init__(self, minimizer=minimizer) self._objective = LeastSquares(regularizer=None) self._h = h_linear self._fn_parameters = lambda n_features: n_features + 1 def __str__(self): stats = self._training_stats txt = BaseRegressionPredictor.__str__(self) table = [("Mean = {0}" .format(strfmt(stats.get('mean_residual', None), '.4g')), "MAE = {0}" .format(strfmt(stats.get('MAE', None), '.4g')), "RMSE = {0}" .format(strfmt(stats.get('RMSE', None), '.4g')))] txt += ("\n" + strtab(table, margin=8, ffmt='s', title="Residuals")) table = (("Log-lik = {0}" .format(strfmt(stats.get('loglik', None), '.3g')), "Log-lik H0 = {0}" .format(strfmt(stats.get('loglik_null', None), '.3g'))), ("R^2 = {0}" .format(strfmt(stats.get('R2', None), '.4f')), "Adjusted R^2 = {0}" .format(strfmt(stats.get('R2_adj', None), '.4f'))), ("F = {0} on {1}, {2} dof" .format(strfmt(stats.get('F', None), '.3g'), strfmt(stats.get('F_dof1', None), 'i'), strfmt(stats.get('F_dof2', None), 'i')), "Pr(>F) = {0}" .format(strfmt(stats.get('F_pvalue', None), '.4f')))) txt += ("\n" + strtab(table, margin=8, ffmt='s', title="Global fit--H0: theta_i = 0 for i > 0")) table = [("AIC = {0}" .format(strfmt(stats.get('AIC', None), '.3g')), "AICc = {0}" .format(strfmt(stats.get('AICc', None), '.3g')), "BIC = {0}" .format(strfmt(stats.get('BIC', None), '.3g')))] txt += ("\n" + strtab(table, margin=8, ffmt='s', title="Model selection")) return txt def learn(self, X, y, standardize=True, theta=None): """ Learn by fitting model parameter to the training set. Parameters ---------- X : array_like input features, shape=(n_samples, n_features). y : array_like targets, shape=(n_samples,). standardize : boolean, optional standardize data before fitting. theta : array_like, optional guess for model parameters. Returns ------- self : LinearRegress object Raises ------ TypeError, ValueError """ X, y = assert_Xy(X, y) n_samples, n_features = X.shape n_parameters = self._fn_parameters(n_features) if theta is None: theta = np.zeros(n_parameters, dtype=np.float) theta[0] = np.mean(y) if standardize: std = Standard() std.learn(y) theta[0] = (theta[0] - std.mean) / std.std theta[1:] /= std.std BaseRegressionPredictor.learn(self, X, std.transform(y), standardize=standardize, theta=theta) self._theta[0] = self._theta[0] * std.std + std.mean self._theta[1:] *= std.std else: BaseRegressionPredictor.learn(self, X, y, standardize=False, theta=theta) theta = self.theta stats = {} # global fit # References: # - http://en.wikipedia.org/wiki/Ordinary_least_squares # - Hastie et al, Elements of Statistical Learning, 2nd, 2008, chap 3 y_hat = self.predict(X) loglik_full = LeastSquares.loglik(y, y_hat) loglik_null = LeastSquares.loglik(y, theta[0]) delta = y - np.mean(y) SS_tot = np.dot(delta, delta) residuals = y - self.predict(X) SS_res = np.dot(residuals, residuals) sigma = np.sqrt(SS_res / max(n_samples - n_features - 1, 1)) R2 = 1. - SS_res / SS_tot # coeff of determination R2_adj = (1. - (1. - R2) * (n_samples - 1) / max(n_samples - n_features - 1, 1)) # F-test. H0: theta_i=0 for i>0. n_dof1, n_dof2 = n_features, max(n_samples - n_features - 1, 1) F = ((SS_tot - SS_res) / n_dof1) / (SS_res / n_dof2) F_pvalue = f_test(F, n_dof1, n_dof2) # Pr(>F) under H0 stats['loglik'] = loglik_full stats['loglik_null'] = loglik_null stats['mean_residual'] = np.mean(residuals) stats['MAE'] = np.mean(np.absolute(residuals)) stats['RMSE'] = sigma stats['R2'] = R2 stats['R2_adj'] = R2_adj stats['F'] = F stats['F_dof1'], stats['F_dof2'] = n_dof1, n_dof2 stats['F_pvalue'] = F_pvalue # standard error of the coefficients varcovar = np.linalg.solve(self.objective.hessian_J(theta, X, y), np.eye(n_features + 1)) varcovar *= (sigma ** 2) / n_samples theta_stderr = np.sqrt(np.diag(varcovar)) n_dof = max(n_samples - n_features - 1, 1) tscore, pvalue = t_test_1samp(theta, 0., theta_stderr, n_dof) theta_95ci = confint(theta, theta_stderr, ndof=n_dof, level=0.95) stats['varcovar'] = varcovar stats['theta_stderr'] = theta_stderr stats['theta_tscore'] = tscore stats['theta_pvalue'] = pvalue stats['theta_95ci'] = theta_95ci # model selection stats['AIC'] = 2 * (n_parameters - loglik_full) stats['AICc'] = stats['AIC'] + (2. * n_parameters * (n_parameters + 1) / max(n_samples - n_parameters - 1, 1)) stats['BIC'] = n_parameters * np.log(n_samples) - 2 * loglik_full self._training_stats.update(stats) return self class RegularizedLinearRegress(LinearRegress): """ Linear regression using least squares with regularization. Parameters ---------- minimizer : Optimizer object, optional By default an optimize.GradientDescent instance. regularizer : Regularizer object or None Raises ------ TypeError """ def __init__(self, minimizer=None, regularizer=None): LinearRegress.__init__(self, minimizer=minimizer) self._objective = LeastSquares(regularizer=regularizer) def learn(self, X, y, shrinkage=1.e-6, standardize=True, theta=None): """ Learn by fitting model parameters to training set. Parameters ---------- X : array_like input features, shape=(n_samples, n_features). y : array_like targets, shape=(n_samples,). shrinkage : float or array_like shrinkage parameter (shrinkage = 0 means no regularization). If it is an array, it should have the same size as `theta` and `theta[0]` will be forced to be zero in order to exclude the intercept. standardize : boolean, optional standardize data before fitting. theta : array_like, optional guess for model parameters. Returns ------- self : RegularizedLinearRegress object Raises ------ TypeError, ValueError """ self.objective.regularizer.settings(shrinkage=shrinkage) return LinearRegress.learn(self, X, y, standardize=standardize, theta=theta) @property def regularizer(self): """ Regularizer penalizing the objective function. """ return self.objective.regularizer class LassoRegress(RegularizedLinearRegress): """ Linear regression using least squares with LASSO (Least Absolute Shrinkage and Selection Operator) regularization. Parameters ---------- minimizer : Optimizer object, optional By default an optimize.GradientDescent instance. Raises ------ TypeError """ def __init__(self, minimizer=None): if minimizer is None: # J not differentiable at theta=0... minimizer = GradientDescent() regularizer = LassoRegularizer(intercept=False) RegularizedLinearRegress.__init__(self, minimizer=minimizer, regularizer=regularizer) self._objective = LeastSquares(regularizer=regularizer) class RidgeRegress(RegularizedLinearRegress): """ Linear regression using least squares with Tikhonov regularization. Parameters ---------- minimizer : Optimizer object, optional By default an optimize.GradientDescent instance. Raises ------ TypeError """ def __init__(self, minimizer=None): regularizer = TikhonovRegularizer(intercept=False) RegularizedLinearRegress.__init__(self, minimizer=minimizer, regularizer=regularizer)
from time import sleep from unittest import TestCase from koradserial import KoradSerial from koradserial import OnOffState from koradserial import Tracking class KoradSerialTest(TestCase): def setUp(self): self.device = KoradSerial('/dev/tty.usbmodemfd121', True) self.overrideSkippedTests = False def tearDown(self): self.device.close() def _pause(self, delay=1): """ Give the power supply time to digest the commands. :param delay: How long to pause. The default 1 second is overkill. """ sleep(delay) def test_beep(self): """ Test the BEEP command. According to what I've read on the Internet, and confirmed by my trials, is that BEEP0 doesn't work. Thus this test is useless. :return: """ if not self.overrideSkippedTests: return self.device.beep.off() status = self.device.status self.assertEqual(OnOffState.off, status.beep) self._pause() self.device.beep.on() status = self.device.status self.assertEqual(OnOffState.on, status.beep) def test_channel1(self): """ Test Channel 1's functionality. This test assumes a small load (perhaps 100 ohm) is on the power supply so a small amount of current is drawn. """ channel = self.device.channels[0] # Turn off output and ensure that it's reading zeroes. self.device.output.off() self._pause() self.assertEqual(0, channel.output_voltage) self.assertEqual(0, channel.output_current) # Set the current and voltage and ensure it's reporting back correctly. channel.voltage = 12.34 channel.current = 1.234 self.assertAlmostEqual(12.34, channel.voltage, 2) self.assertAlmostEqual(1.234, channel.current, 3) # Set a different current and voltage to ensure that we're not reading old data. channel.voltage = 3.30 channel.current = 0.123 self.assertAlmostEqual(3.30, channel.voltage, 2) self.assertAlmostEqual(0.123, channel.current, 3) # Turn on the output and ensure that current is flowing across the small load. self.device.output.on() self._pause() self.assertAlmostEqual(3.30, channel.output_voltage, 2) self.assertLess(0, channel.output_current) self.device.output.off() def test_lock(self): """ Test the lock state. Ha! Just kidding. This is a stub. It appears that there is no command to alter the lock state. While connected to a serial line and processing commands, the power supply is in a lock state. """ if not self.overrideSkippedTests: return pass def test_memory(self): """ Ensure that memory store/recall works. A two-step process is required to set a memory. * First, one must choose the memory number with a `recall()` command. * Second, one must set the desired voltage and current limit. * Third, one must save the memory with a `save()` command. Recalling a memory setting simply requires calling the `recall()` command. This goes through the test twice with different values to ensure what is read isn't old data. """ channel = self.device.channels[0] m1 = self.device.memories[0] m2 = self.device.memories[1] m3 = self.device.memories[2] m4 = self.device.memories[3] # Pass one with the first set of values. m1.recall() channel.voltage = 1.00 channel.current = 0.100 m1.save() m2.recall() channel.voltage = 2.00 channel.current = 0.200 m2.save() m3.recall() channel.voltage = 3.00 channel.current = 0.300 m3.save() m4.recall() channel.voltage = 4.00 channel.current = 0.400 m4.save() m1.recall() self.assertAlmostEqual(1.00, channel.voltage, 2) self.assertAlmostEqual(0.100, channel.current, 3) m2.recall() self.assertAlmostEqual(2.00, channel.voltage, 2) self.assertAlmostEqual(0.200, channel.current, 3) m3.recall() self.assertAlmostEqual(3.00, channel.voltage, 2) self.assertAlmostEqual(0.300, channel.current, 3) m4.recall() self.assertAlmostEqual(4.00, channel.voltage, 2) self.assertAlmostEqual(0.400, channel.current, 3) # Pass two with different values. m1.recall() channel.voltage = 5.00 channel.current = 0.500 m1.save() m2.recall() channel.voltage = 10.00 channel.current = 1.000 m2.save() m3.recall() channel.voltage = 15.00 channel.current = 1.500 m3.save() m4.recall() channel.voltage = 20.00 channel.current = 2.000 m4.save() m1.recall() self.assertAlmostEqual(5.00, channel.voltage, 2) self.assertAlmostEqual(0.500, channel.current, 3) m2.recall() self.assertAlmostEqual(10.00, channel.voltage, 2) self.assertAlmostEqual(1.000, channel.current, 3) m3.recall() self.assertAlmostEqual(15.00, channel.voltage, 2) self.assertAlmostEqual(1.500, channel.current, 3) m4.recall() self.assertAlmostEqual(20.00, channel.voltage, 2) self.assertAlmostEqual(2.000, channel.current, 3) def test_model(self): """ Test the IDN command. Read the model number from the device. """ model = self.device.model self.assertTrue(model.startswith("KORAD")) def test_ocp(self): """ Test Over Current Protection There's no way to get feedback on these, so simply ensure that no exceptions are thrown. """ self.device.over_current_protection.on() self._pause() self.device.over_current_protection.off() def test_ovp(self): """ Test Over Voltage Protection There's no way to get feedback on these, so simply ensure that no exceptions are thrown. """ self.device.over_voltage_protection.on() self._pause() self.device.over_voltage_protection.off() def test_output(self): """ Ensure the device is reporting the output on/off state correctly. """ self.device.output.on() status = self.device.status self.assertEqual(OnOffState.on, status.output) self._pause() self.device.output.off() status = self.device.status self.assertEqual(OnOffState.off, status.output) def test_track(self): """ Test the TRACK commands. **NOTE:** The tests here are hypothetical. I don't have a multi-channel power supply to actually test this against. """ if not self.overrideSkippedTests: return self.device.track(Tracking.parallel) status = self.device.status self.assertEqual(Tracking.parallel, status.tracking) self._pause() self.device.track(Tracking.series) status = self.device.status self.assertEqual(Tracking.series, status.tracking) self._pause() self.device.track(Tracking.independent) status = self.device.status self.assertEqual(Tracking.independent, status.tracking)
#coding=utf8 import thread, time, sys, os, platform try: import termios, tty termios.tcgetattr, termios.tcsetattr import threading OS = 'Linux' except (ImportError, AttributeError): try: import msvcrt OS = 'Windows' except ImportError: raise Exception('Mac is currently not supported') OS = 'Mac' else: getch = msvcrt.getwch else: def fn(): try: fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) tty.setraw(fd) ch = sys.stdin.read(1) except: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) raise Exception termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch getch = fn CMD_HISTORY = 30 class ChatLikeCMD(): def __init__(self, header = 'LittleCoder', symbol = '>', inPip = None, inputMaintain = False): self.strBuff = [] self.cmdBuff = [] self.historyCmd = -1 self.cursor = 0 self.inPip = [] if inPip == None else inPip self.outPip = [] self.isLaunch = False self.isPause = False self.header = header self.symbol = symbol self.inputMaintain = inputMaintain def reprint_input(self): sys.stdout.write(self.header + self.symbol) if self.strBuff: for i in self.strBuff: sys.stdout.write(i) sys.stdout.flush() def getch(self): c = getch() return c if c != '\r' else '\n' def get_history_command(self, direction): if direction == 'UP': if self.historyCmd < CMD_HISTORY - 1 and self.historyCmd < len(self.cmdBuff) - 1: self.historyCmd += 1 else: if self.historyCmd == 0: return '' if self.historyCmd > 0: self.historyCmd -= 1 if -1 < self.historyCmd < len(self.cmdBuff): return self.cmdBuff[self.historyCmd] def output_command(self, s): self.outPip.append(s if isinstance(s, unicode) else s.decode(sys.stdin.encoding)) if len(self.cmdBuff) >= CMD_HISTORY: self.cmdBuff = self.cmdBuff[::-1].pop()[::-1] self.cmdBuff.append(s) def print_thread(self): while self.isLaunch: if self.inPip: sys.stdout.write('\r' + ' ' * 50 + '\r') sys.stdout.flush() print self.inPip.pop() # linux special sys.stdout.write('\r') sys.stdout.flush() self.reprint_input() time.sleep(0.01) def fast_input_test(self): timer = threading.Timer(0.001, thread.interrupt_main) c = None try: timer.start() c = getch() except: pass timer.cancel() return c def process_direction_char(self, c): if OS == 'Windows': if ord(c) == 72: c = 'A' elif ord(c) == 80: c = 'B' elif ord(c) == 77: c = 'C' elif ord(c) == 75: c = 'D' if ord(c) == 68: # LEFT self.process_char('\b') return # cursor bugs if self.cursor > 0: if OS == 'Windows': sys.stdout.write(chr(224) + chr(75)) else: sys.stdout.write(chr(27) + '[C') self.cursor -= 1 elif ord(c) == 67: # RIGHT return # cursor bugs if self.cursor < len(self.strBuff): if OS == 'Windows': sys.stdout.write(chr(224) + chr(77)) else: sys.stdout.write(chr(27) + '[D') self.cursor += 1 elif ord(c) == 65: # UP hc = self.get_history_command('UP') if not hc is None: self.strBuff = [i for i in hc] self.cursor = len(hc) sys.stdout.write('\r' + ' ' * 50 + '\r') self.reprint_input() elif ord(c) == 66: # DOWN hc = self.get_history_command('DOWN') if not hc is None: self.strBuff = [i for i in hc] self.cursor = len(hc) sys.stdout.write('\r' + ' ' * 50 + '\r') self.reprint_input() else: raise Exception(c) def process_char(self, c): if ord(c) == 27: # Esc if OS == 'Linux': fitc1 = self.fast_input_test() if ord(fitc1) == 91: fitc2 = self.fast_input_test() if 65 <= ord(fitc2) <= 68: self.process_direction_char(fitc2) return sys.stdout.write('\r' + ' ' * 50 + '\r') sys.stdout.flush() self.reprint_input() self.outPip.append(c) time.sleep(0.02) if 'fitc1' in dir(): self.process_char(fitc1) self.cursor += 1 if 'fitc2' in dir(): self.process_char(fitc2) self.cursor += 1 elif ord(c) == 3: # Ctrl+C self.stop() self.isPause = True if raw_input('Exit?(y) ') == 'y': sys.stdout.write('Command Line Exit') else: self.start() self.isPause = False elif ord(c) in (8, 127): # Backspace if self.strBuff: if ord(self.strBuff[-1]) < 128: sys.stdout.write('\b \b') else: sys.stdout.write('\b\b \b') if OS == 'Linux': self.strBuff.pop() self.strBuff.pop() self.strBuff.pop() self.cursor -= 1 elif c == '\n': if self.strBuff: if self.inputMaintain: sys.stdout.write(c) else: sys.stdout.write('\r' + ' ' * 50 + '\r') sys.stdout.flush() self.reprint_input() self.output_command(''.join(self.strBuff)) self.strBuff = [] self.historyCmd = -1 elif ord(c) == 224: # Windows direction if OS == 'Windows': direction = self.getch() self.process_direction_char(direction) else: sys.stdout.write(c) sys.stdout.flush() self.strBuff.append(c) self.cursor += 1 def command_thread(self): c = None while self.isLaunch: c = self.getch() self.process_char(c) time.sleep(0.01) def start(self): self.isLaunch = True thread.start_new_thread(self.print_thread, ()) self.reprint_input() thread.start_new_thread(self.command_thread, ()) def stop(self): sys.stdout.write('\r' + ' ' * 50 + '\r') sys.stdout.flush() self.isLaunch = False def print_line(self, msg = None): self.inPip.append(msg) def clear(self): os.system('cls' if platform.system() == 'Windows' else 'clear') self.reprint_input() def get_command_pip(self): return self.outPip def set_header(self, header): self.header = header if __name__ == '__main__': c = ChatLikeCMD() s = c.get_command_pip() c.start() def loopinput(c): while True: c.print_line('LOOP INPUT......') time.sleep(3) thread.start_new_thread(loopinput, (c,)) while c.isLaunch or c.isPause: if s: c.print_line(s.pop()) time.sleep(0.01)
import enum import logging import uuid from datetime import datetime, timedelta, timezone from typing import Any, List, NamedTuple, Optional from .conditions import render from .exceptions import MissingObjects, TransactionTokenExpired from .models import unpack_from_dynamodb from .signals import object_deleted, object_loaded, object_saved from .util import dump_key, get_table_name __all__ = [ "PreparedTransaction", "ReadTransaction", "Transaction", "TxItem", "TxType", "WriteTransaction", ] logger = logging.getLogger("bloop.transactions") MAX_TRANSACTION_ITEMS = 10 # per docs this is 10 minutes, minus a bit for clock skew guard MAX_TOKEN_LIFETIME = timedelta(minutes=9, seconds=30) class TxType(enum.Enum): """Enum whose value is the wire format of its name""" Get = "Get" Check = "CheckCondition" Delete = "Delete" Update = "Update" @classmethod def by_alias(cls, name: str) -> "TxType": """get a type by the common bloop operation name: get/check/delete/save""" return { "get": TxType.Get, "check": TxType.Check, "delete": TxType.Delete, "save": TxType.Update, }[name] class TxItem(NamedTuple): """ Includes the type, an object, and its condition settings. The common way to construct an item is through the ``new`` method: .. code-block:: pycon >>> get_item = TxItem.new("get", some_obj) >>> save_item = TxItem.new("save", some_obj) """ #: How this item will be used in a transaction type: TxType #: The object that will be modified, persisted, or referenced in a transaction obj: Any #: An optional condition that constrains an update condition: Optional[Any] @classmethod def new(cls, type_alias, obj, condition=None) -> "TxItem": return TxItem(type=TxType.by_alias(type_alias), obj=obj, condition=condition) @property def is_update(self): """Whether this should render an "UpdateExpression" in the TransactItem""" return self.type is TxType.Update @property def should_render_obj(self): """Whether the object values should be rendered in the TransactItem""" return self.type not in {TxType.Check, TxType.Get} # hack to get around NamedTuple field docstrings renaming: # https://stackoverflow.com/a/39320627 TxItem.type.__doc__ = """How this item will be used in a transaction""" TxItem.obj.__doc__ = """The object that will be modified, persisted, or referenced in a transaction""" TxItem.condition.__doc__ = """An optional condition that constrains an update""" class Transaction: """ Holds a collection of transaction items to be rendered into a PreparedTransaction. If used as a context manager, calls prepare() and commit() when the outermost context exits. .. code-block:: pycon >>> engine = Engine() >>> tx = Transaction(engine) >>> tx.mode = "w" >>> p1 = tx.prepare() >>> p2 = tx.prepare() # different instances >>> with tx: ... pass >>> # tx.prepare().commit() is called here """ mode: str _items: List[TxItem] def __init__(self, engine): self.engine = engine self._items = [] self._ctx_depth = 0 def __enter__(self): self._ctx_depth += 1 return self def __exit__(self, exc_type, exc_value, exc_tb): self._ctx_depth -= 1 if exc_type: return if self._ctx_depth == 0: self.prepare().commit() def _extend(self, items): if len(self._items) + len(items) > MAX_TRANSACTION_ITEMS: raise RuntimeError(f"transaction cannot exceed {MAX_TRANSACTION_ITEMS} items.") self._items += items def prepare(self): """ Create a new PreparedTransaction that can be committed. This is called automatically when exiting the transaction as a context: .. code-block:: python >>> engine = Engine() >>> tx = WriteTransaction(engine) >>> prepared = tx.prepare() >>> prepared.commit() # automatically calls commit when exiting >>> with WriteTransaction(engine) as tx: ... # modify the transaction here ... pass >>> # tx commits here :return: """ tx = PreparedTransaction() tx.prepare( engine=self.engine, mode=self.mode, items=self._items, ) return tx class PreparedTransaction: """ Transaction that can be committed once or more. Usually created from a :class:`~bloop.transactions.Transaction` instance. """ mode: str items: List[TxItem] #: Unique id used as the "ClientRequestToken" for write transactions. This is #: generated but not sent with a read transaction, since reads are not idempotent. tx_id: str #: When the transaction was first committed at. A prepared write transaction can only call commit #: again within 10 minutes of its first commit. This is ``None`` until commit() is called at least once. first_commit_at: Optional[datetime] = None def __init__(self): self.engine = None self._request = None def prepare(self, engine, mode, items) -> None: """ Create a unique transaction id and dumps the items into a cached request object. """ self.tx_id = str(uuid.uuid4()).replace("-", "") self.engine = engine self.mode = mode self.items = items self._prepare_request() def _prepare_request(self): self._request = [ { item.type.value: { "Key": dump_key(self.engine, item.obj), "TableName": get_table_name(self.engine, item.obj), **render( self.engine, obj=item.obj if item.should_render_obj else None, condition=item.condition, update=item.is_update), } } for item in self.items ] def commit(self) -> None: """ Commit the transaction with a fixed transaction id. A read transaction can call commit() any number of times, while a write transaction can only use the same tx_id for 10 minutes from the first call. """ now = datetime.now(timezone.utc) if self.first_commit_at is None: self.first_commit_at = now if self.mode == "r": response = self.engine.session.transaction_read(self._request) elif self.mode == "w": if now - self.first_commit_at > MAX_TOKEN_LIFETIME: raise TransactionTokenExpired response = self.engine.session.transaction_write(self._request, self.tx_id) else: raise ValueError(f"unrecognized mode {self.mode}") self._handle_response(response) def _handle_response(self, response: dict) -> None: if self.mode == "w": for item in self.items: obj = item.obj if item.type is TxType.Delete: object_deleted.send(self.engine, engine=self.engine, obj=obj) elif item.type is TxType.Update: object_saved.send(self.engine, engine=self.engine, obj=obj) else: blobs = response["Responses"] not_loaded = set() if len(self.items) != len(blobs): raise RuntimeError("malformed response from DynamoDb") for item, blob in zip(self.items, blobs): obj = item.obj if not blob: not_loaded.add(obj) continue unpack_from_dynamodb(attrs=blob["Item"], expected=obj.Meta.columns, engine=self.engine, obj=obj) object_loaded.send(self.engine, engine=self.engine, obj=obj) if not_loaded: logger.info("loaded {} of {} objects".format(len(self.items) - len(not_loaded), len(self.items))) raise MissingObjects("Failed to load some objects.", objects=not_loaded) logger.info("successfully loaded {} objects".format(len(self.items))) class ReadTransaction(Transaction): """ Loads all items in the same transaction. Items can be from different models and tables. """ mode = "r" def load(self, *objs) -> "ReadTransaction": """ Add one or more objects to be loaded in this transaction. At most 10 items can be loaded in the same transaction. All objects will be loaded each time you call commit(). :param objs: Objects to add to the set that are loaded in this transaction. :return: this transaction for chaining :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded. """ self._extend([TxItem.new("get", obj) for obj in objs]) return self class WriteTransaction(Transaction): """ Applies all updates in the same transaction. Items can be from different models and tables. As with an engine, you can apply conditions to each object that you save or delete, or a condition for the entire transaction that won't modify the specified object: .. code-block:: python # condition on some_obj >>> tx.save(some_obj, condition=SomeModel.name.begins_with("foo")) # condition on the tx, based on the values of some_other_obj >>> tx.check(some_other_obj, condition=ThatModel.capacity >= 100) """ mode = "w" def check(self, obj, condition) -> "WriteTransaction": """ Add a condition which must be met for the transaction to commit. While the condition is checked against the provided object, that object will not be modified. It is only used to provide the hash and range key to apply the condition to. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param obj: The object to use for the transaction condition. This object will not be modified. :param condition: A condition on an object which must hold for the transaction to commit. :return: this transaction for chaining """ self._extend([TxItem.new("check", obj, condition)]) return self def save(self, *objs, condition=None) -> "WriteTransaction": """ Add one or more objects to be saved in this transaction. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param objs: Objects to add to the set that are updated in this transaction. :param condition: A condition for these objects which must hold for the transaction to commit. :return: this transaction for chaining """ self._extend([TxItem.new("save", obj, condition) for obj in objs]) return self def delete(self, *objs, condition=None) -> "WriteTransaction": """ Add one or more objects to be deleted in this transaction. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param objs: Objects to add to the set that are deleted in this transaction. :param condition: A condition for these objects which must hold for the transaction to commit. :return: this transaction for chaining """ self._extend([TxItem.new("delete", obj, condition) for obj in objs]) return self
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A script to pull licenses/notices/source code for Java dependencies. It generates a CSV file with [dependency_name, url_to_license, license_type, source_included] """ import argparse import csv import json import logging import os import shutil import threading import traceback import yaml from bs4 import BeautifulSoup from datetime import datetime from multiprocessing.pool import ThreadPool from queue import Queue from tenacity import retry from tenacity import stop_after_attempt from tenacity import wait_fixed from urllib.request import urlopen, URLError, HTTPError SOURCE_CODE_REQUIRED_LICENSES = ['lgpl', 'gpl', 'cddl', 'mpl', 'gnu', 'mozilla public license'] RETRY_NUM = 9 THREADS = 16 @retry(reraise=True, wait=wait_fixed(5), stop=stop_after_attempt(RETRY_NUM)) def pull_from_url(file_name, url, dep, no_list): if url == 'skip': return try: url_read = urlopen(url) with open(file_name, 'wb') as temp_write: shutil.copyfileobj(url_read, temp_write) logging.debug( 'Successfully pulled {file_name} from {url} for {dep}'.format( url=url, file_name=file_name, dep=dep)) except URLError as e: traceback.print_exc() if pull_from_url.retry.statistics["attempt_number"] < RETRY_NUM: logging.error('Invalid url for {dep}: {url}. Retrying...'.format( url=url, dep=dep)) raise else: logging.error( 'Invalid url for {dep}: {url} after {n} retries.'.format( url=url, dep=dep, n=RETRY_NUM)) with thread_lock: no_list.append(dep) return except HTTPError as e: traceback.print_exc() if pull_from_url.retry.statistics["attempt_number"] < RETRY_NUM: logging.info( 'Received {code} from {url} for {dep}. Retrying...'.format( code=e.code, url=url, dep=dep)) raise else: logging.error( 'Received {code} from {url} for {dep} after {n} retries.'. format(code=e.code, url=url, dep=dep, n=RETRY_NUM)) with thread_lock: no_list.append(dep) return except Exception as e: traceback.print_exc() if pull_from_url.retry.statistics["attempt_number"] < RETRY_NUM: logging.error( 'Error occurred when pull {file_name} from {url} for {dep}. Retrying...' .format(url=url, file_name=file_name, dep=dep)) raise else: logging.error( 'Error occurred when pull {file_name} from {url} for {dep} after {n} retries.' .format(url=url, file_name=file_name, dep=dep, n=RETRY_NUM)) with thread_lock: no_list.append(dep) return def pull_source_code(base_url, dir_name, dep): # base_url example: https://repo1.maven.org/maven2/org/mortbay/jetty/jsp-2.1/6.1.14/ try: soup = BeautifulSoup(urlopen(base_url).read(), "html.parser") except: logging.error('Error reading source base from {base_url}'.format(base_url=base_url)) raise source_count = 0 for href in (a["href"] for a in soup.select("a[href]")): if href.endswith( '.jar') and 'sources.jar' in href: # download sources jar file only file_name = dir_name + '/' + href url = base_url + '/' + href logging.debug('Pulling source from {url}'.format(url=url)) pull_from_url(file_name, url, dep, incorrect_source_url) source_count = source_count + 1 if source_count == 0: raise RuntimeError('No source found at {base_url}'.format(base_url=base_url)) @retry(reraise=True, stop=stop_after_attempt(3)) def write_to_csv(csv_list): csv_columns = [ 'dependency_name', 'url_to_license', 'license_type', 'source_included' ] csv_file = "{output_dir}/beam_java_dependency_list.csv".format( output_dir=output_dir) try: with open(csv_file, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for data in csv_list: writer.writerow(data) except: traceback.print_exc() raise def execute(dep): ''' An example of dep. { "moduleName": "antlr:antlr", "moduleUrl": "http://www.antlr.org/", "moduleVersion": "2.7.7", "moduleLicense": "BSD License", "moduleLicenseUrl": "http://www.antlr.org/license.html" } ''' name = dep['moduleName'].split(':')[1] version = dep['moduleVersion'] name_version = name + '-' + version # javac is not a runtime dependency if name == 'javac': logging.debug('Skipping', name_version) return # skip self dependencies if dep['moduleName'].lower().startswith('beam'): logging.debug('Skipping', name_version) return dir_name = '{output_dir}/{name_version}.jar'.format( output_dir=output_dir, name_version=name_version) # if auto pulled, directory is existing at {output_dir} if not os.path.isdir(dir_name): os.mkdir(dir_name) # pull license try: license_url = dep_config[name][version]['license'] except: try: license_url = dep['moduleLicenseUrl'] except: # url cannot be found, add to no_licenses and skip to pull. with thread_lock: no_licenses.append(name_version) license_url = 'skip' pull_from_url(dir_name + '/LICENSE', license_url, name_version, no_licenses) # pull notice try: notice_url = dep_config[name][version]['notice'] pull_from_url(dir_name + '/NOTICE', notice_url, name_version) except: pass else: try: license_url = dep['moduleLicenseUrl'] except: license_url = '' logging.debug( 'License/notice for {name_version} were pulled automatically.'. format(name_version=name_version)) # get license_type to decide if pull source code. try: license_type = dep['moduleLicense'] except: try: license_type = dep_config[name][version]['type'] except: license_type = 'no_license_type' with thread_lock: no_license_type.append(name_version) # pull source code if license_type is one of SOURCE_CODE_REQUIRED_LICENSES. if any(x in license_type.lower() for x in SOURCE_CODE_REQUIRED_LICENSES): try: base_url = dep_config[name][version]['source'] except: module = dep['moduleName'].split(':')[0].replace('.', '/') base_url = maven_url_temp.format(module=module + '/' + name, version=version) pull_source_code(base_url, dir_name, name_version) source_included = True else: source_included = False csv_dict = { 'dependency_name': name_version, 'url_to_license': license_url, 'license_type': license_type, 'source_included': source_included } with thread_lock: csv_list.append(csv_dict) if __name__ == "__main__": start = datetime.now() parser = argparse.ArgumentParser() parser.add_argument('--license_index', required=True) parser.add_argument('--output_dir', required=True) parser.add_argument('--dep_url_yaml', required=True) args = parser.parse_args() license_index = args.license_index output_dir = args.output_dir dep_url_yaml = args.dep_url_yaml logging.getLogger().setLevel(logging.INFO) # index.json is generated by Gradle plugin. with open(license_index) as f: dependencies = json.load(f) with open(dep_url_yaml) as file: dep_config = yaml.full_load(file) maven_url_temp = 'https://repo1.maven.org/maven2/{module}/{version}' csv_list = [] no_licenses = [] no_license_type = [] incorrect_source_url = [] logging.info( 'Pulling license for {num_deps} dependencies using {num_threads} threads.' .format(num_deps=len(dependencies['dependencies']), num_threads=THREADS)) thread_lock = threading.Lock() pool = ThreadPool(THREADS) pool.map(execute, dependencies['dependencies']) write_to_csv(csv_list) error_msg = [] run_status = 'succeed' if no_licenses: logging.error(no_licenses) how_to = '**************************************** ' \ 'Licenses were not able to be pulled ' \ 'automatically for some dependencies. Please search source ' \ 'code of the dependencies on the internet and add "license" ' \ 'and "notice" (if available) field to {yaml_file} for each ' \ 'missing license. Dependency List: [{dep_list}]'.format( dep_list=','.join(sorted(no_licenses)), yaml_file=dep_url_yaml) logging.error(how_to) error_msg.append(how_to) run_status = 'failed' if no_license_type: how_to = '**************************************** ' \ 'License type of some dependencies were not ' \ 'identified. The license type is used to decide whether the ' \ 'source code of the dependency should be pulled or not. ' \ 'Please add "type" field to {yaml_file} for each dependency. ' \ 'Dependency List: [{dep_list}]'.format( dep_list=','.join(sorted(no_license_type)), yaml_file=dep_url_yaml) error_msg.append(how_to) run_status = 'failed' if incorrect_source_url: how_to = '**************************************** ' \ 'Urls to maven repo for some dependencies ' \ 'were not able to be generated automatically. Please add ' \ '"source" field to {yaml_file} for each dependency. ' \ 'Dependency List: [{dep_list}]'.format( dep_list=','.join(sorted(incorrect_source_url)), yaml_file=dep_url_yaml) error_msg.append(how_to) run_status = 'failed' end = datetime.now() logging.info( 'pull_licenses_java.py {status}. It took {sec} seconds with {threads} threads.' .format(status=run_status, sec=(end - start).total_seconds(), threads=THREADS)) if error_msg: raise RuntimeError('{n} error(s) occurred.'.format(n=len(error_msg)), error_msg)
# Copyright (c) 2016 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Copyright (c) 2008-2009 The Hewlett-Packard Development Company # Copyright (c) 2004-2006 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Nathan Binkert import os import re import sys import convert import jobfile from attrdict import attrdict, multiattrdict, optiondict from code_formatter import code_formatter from multidict import multidict from orderdict import orderdict from smartdict import SmartDict from sorteddict import SortedDict # panic() should be called when something happens that should never # ever happen regardless of what the user does (i.e., an acutal m5 # bug). def panic(fmt, *args): print >>sys.stderr, 'panic:', fmt % args sys.exit(1) # fatal() should be called when the simulation cannot continue due to # some condition that is the user's fault (bad configuration, invalid # arguments, etc.) and not a simulator bug. def fatal(fmt, *args): print >>sys.stderr, 'fatal:', fmt % args sys.exit(1) # warn() should be called when the user should be warned about some condition # that may or may not be the user's fault, but that they should be made aware # of as it may affect the simulation or results. def warn(fmt, *args): print >>sys.stderr, 'warn:', fmt % args # inform() should be called when the user should be informed about some # condition that they may be interested in. def inform(fmt, *args): print >>sys.stdout, 'info:', fmt % args class Singleton(type): def __call__(cls, *args, **kwargs): if hasattr(cls, '_instance'): return cls._instance cls._instance = super(Singleton, cls).__call__(*args, **kwargs) return cls._instance def addToPath(path): """Prepend given directory to system module search path. We may not need this anymore if we can structure our config library more like a Python package.""" # if it's a relative path and we know what directory the current # python script is in, make the path relative to that directory. if not os.path.isabs(path) and sys.path[0]: path = os.path.join(sys.path[0], path) path = os.path.realpath(path) # sys.path[0] should always refer to the current script's directory, # so place the new dir right after that. sys.path.insert(1, path) # Apply method to object. # applyMethod(obj, 'meth', <args>) is equivalent to obj.meth(<args>) def applyMethod(obj, meth, *args, **kwargs): return getattr(obj, meth)(*args, **kwargs) # If the first argument is an (non-sequence) object, apply the named # method with the given arguments. If the first argument is a # sequence, apply the method to each element of the sequence (a la # 'map'). def applyOrMap(objOrSeq, meth, *args, **kwargs): if not isinstance(objOrSeq, (list, tuple)): return applyMethod(objOrSeq, meth, *args, **kwargs) else: return [applyMethod(o, meth, *args, **kwargs) for o in objOrSeq] def compareVersions(v1, v2): """helper function: compare arrays or strings of version numbers. E.g., compare_version((1,3,25), (1,4,1)') returns -1, 0, 1 if v1 is <, ==, > v2 """ def make_version_list(v): if isinstance(v, (list,tuple)): return v elif isinstance(v, str): return map(lambda x: int(re.match('\d+', x).group()), v.split('.')) else: raise TypeError v1 = make_version_list(v1) v2 = make_version_list(v2) # Compare corresponding elements of lists for n1,n2 in zip(v1, v2): if n1 < n2: return -1 if n1 > n2: return 1 # all corresponding values are equal... see if one has extra values if len(v1) < len(v2): return -1 if len(v1) > len(v2): return 1 return 0 def crossproduct(items): if len(items) == 1: for i in items[0]: yield (i,) else: for i in items[0]: for j in crossproduct(items[1:]): yield (i,) + j def flatten(items): while items: item = items.pop(0) if isinstance(item, (list, tuple)): items[0:0] = item else: yield item # force scalars to one-element lists for uniformity def makeList(objOrList): if isinstance(objOrList, list): return objOrList return [objOrList] def printList(items, indent=4): line = ' ' * indent for i,item in enumerate(items): if len(line) + len(item) > 76: print line line = ' ' * indent if i < len(items) - 1: line += '%s, ' % item else: line += item print line def readCommand(cmd, **kwargs): """run the command cmd, read the results and return them this is sorta like `cmd` in shell""" from subprocess import Popen, PIPE, STDOUT if isinstance(cmd, str): cmd = cmd.split() no_exception = 'exception' in kwargs exception = kwargs.pop('exception', None) kwargs.setdefault('shell', False) kwargs.setdefault('stdout', PIPE) kwargs.setdefault('stderr', STDOUT) kwargs.setdefault('close_fds', True) try: subp = Popen(cmd, **kwargs) except Exception, e: if no_exception: return exception raise return subp.communicate()[0] def makeDir(path): """Make a directory if it doesn't exist. If the path does exist, ensure that it is a directory""" if os.path.exists(path): if not os.path.isdir(path): raise AttributeError, "%s exists but is not directory" % path else: os.mkdir(path) def isInteractive(): """Check if the simulator is run interactively or in a batch environment""" return sys.__stdin__.isatty()
""" .. py:module:: resources_utils :platform: Unix :synopsis: Utility functions to populate Django models with resources. Utility functions to populate Django models with given resources. .. warning:: As these functions don't restrict multiple instances of the same entry to be added to the database, they should be used with caution. The basic resources from ``project_root/resources/`` have already been converted into json-fixture, which is available in ``project_root/tweets/fixtures/fixtures.json``. It is advised to use the fixture in order to repopulate the database after migrations and to use functions defined here only to add new content afterwards. After adding the content, new database fixture can then be created as:: $> cd project_root/ $> python manage.py dumpdata --format=json --indent=4 tweets > tweets/fixtures/fixtures.json For more info about working with Django and initial model data, see `Django's documentation <https://docs.djangoproject.com/en/1.6/howto/initial-data/>`_. """ import os import sys def __set_django(): if not 'DJANGO_SETTINGS_MODULE' in os.environ: sys.path.append(os.path.join(os.path.dirname(__file__), '..')) os.environ['DJANGO_SETTINGS_MODULE'] = 'TwatBot.settings' def populate_bracketed_color_bigrams(filepath = "../resources/bracketed_color_bigrams.tsv"): """Populate BracketedColorBigrams model with entries found from file. File should be in tab separated format, where each line has model fields in the following order: *start_bracket, w1, w2, end_bracket, f*. .. note:: The first line of the file is though to contain field names and is omitted. **Args** | filepath (str): Path to the file with entries. """ __set_django() from tweets.models import BracketedColorBigram with open(filepath, 'r') as filehandle: entries = filehandle.readlines() for e in entries[1:]: try: sb, w1, w2, eb, f = e.strip().split("\t") print "Reading: ", sb, w1, w2, eb, f instance = BracketedColorBigram(start_bracket = sb, w1 = w1, w2 = w2,\ end_bracket = eb, f = int(f)) instance.save() except: pass def populate_colormap(filepath = "../resources/color_map.tsv"): """Populate ColorMap model with entries found from file. File should be in tab separated format, where each line has model fields in the following order: *stereotype, color, html*. .. note:: The first line of the file is though to contain field names and is omitted. **Args** | filepath (str): Path to the file with entries. """ __set_django() from tweets.models import ColorMap, Color import color as cu with open(filepath, 'r') as filehandle: entries = filehandle.readlines() for e in entries[1:]: try: s, c, html = e.strip().split("\t") print "Reading: ", s, c, html html = html.strip() R, G, B = cu.html2rgb(html) chex = cu.rgb2hex((R, G, B)) l, a, b = (cu._2lab((R, G, B))).get_value_tuple() color_inst = Color.objects.get_or_none(html = html) if color_inst is None: color_inst = Color(html = html, hex = chex, rgb_r = R, rgb_g = G, rgb_b = B, l = l, a = a, b = b) color_inst.save() instance = ColorMap(stereotype = s, base_color = c, color = color_inst) instance.save() except: pass def populate_color_unigrams(filepath = "../resources/color_unigrams.tsv"): """Populate ColorUnigrams model with entries found from file. File should be in tab separated format, where each line has model fields in the following order: *solid_compound, f*. .. note:: The first line of the file is though to contain field names and is omitted. **Args** | filepath (str): Path to the file with entries. """ __set_django() from tweets.models import ColorUnigram with open(filepath, 'r') as filehandle: entries = filehandle.readlines() for e in entries[1:]: try: s, f = e.strip().split("\t") print "Reading: ", s, f instance = ColorUnigram(solid_compound = s, f = int(f)) instance.save() except: pass def populate_everycolorbot_tweets(filepath = "../resources/everycolorbot_tweets.tsv"): """Populate EveryColorBotTweets model with entries found from file. File should be in tab separated format, where each line has model fields in the following order: *hex, url*. .. note:: The first line of the file is though to contain field names and is omitted. **Args** | filepath (str): Path to the file with entries. """ __set_django() from tweets.models import EveryColorBotTweet, Color import color as cu with open(filepath, 'r') as filehandle: entries = filehandle.readlines() for e in entries[1:]: try: chex, u = e.strip().split("\t") print "Reading: ", chex, u R, G, B = cu.hex2rgb(chex) html = cu.rgb2html((R, G, B)) l, a, b = (cu._2lab((R, G, B))).get_value_tuple() color_inst = Color.objects.get_or_none(html = html) if color_inst is None: color_inst = Color(html = html, hex = chex, rgb_r = R, rgb_g = G, rgb_b = B, l = l, a = a, b = b) color_inst.save() instance = EveryColorBotTweet(url = u, color = color_inst, tweeted = False) instance.save() except: pass def populate_plural_color_bigrams(filepath = "../resources/plural_color_bigrams.tsv"): """Populate PluralColorBigrams model with entries found from file. File should be in tab separated format, where each line has model fields in the following order: *w1, w2, f, singular*. .. note:: The first line of the file is though to contain field names and is omitted. **Args** | filepath (str): Path to the file with entries. """ __set_django() from tweets.models import PluralColorBigram with open(filepath, 'r') as filehandle: entries = filehandle.readlines() for e in entries[1:]: try: w1, w2, f, s = e.strip().split("\t") print "Reading: ", w1, w2, f, s instance = PluralColorBigram(w1 = w1, w2 = w2,\ singular = s, f = int(f)) instance.save() except: pass def populate_unbracketed_color_bigrams(filepath = "../resources/unbracketed_color_bigrams.tsv"): """Populate UnnracketedColorBigrams model with entries found from file. File should be in tab separated format, where each line has model fields in the following order: *w1, w2, f*. .. note:: The first line of the file is though to contain field names and is omitted. **Args** | filepath (``str``): Path to the file with entries. """ __set_django() from tweets.models import UnbracketedColorBigram with open(filepath, 'r') as filehandle: entries = filehandle.readlines() for e in entries[1:]: w1, w2, f = e.strip().split("\t") print "Reading: ", w1, w2, f try: instance = UnbracketedColorBigram(w1 = w1, w2 = w2, f = int(f)) instance.save() except: pass def split_unigrams(): """Split ``ColorUnigram``-instances from database into two words and save them into ``ColorUnigramSplit``-model. Splitting is done by looking for color names from ``ColorMap``-model's ``stereotype`` and ``base_color`` fields. Unigrams which can not be splitted into two parts based on these color names are omitted. """ __set_django() from tweets.models import ColorUnigram, ColorUnigramSplit, ColorMap colormaps = ColorMap.objects.all() colornames = set() for c in colormaps: colornames.add(c.stereotype) colornames.add(c.base_color) colornames = sorted(colornames) unigrams = ColorUnigram.objects.all() splits = [] for u in unigrams: solid = u.solid_compound for c in colornames: if solid.startswith(c): if solid[len(c):] in colornames: w1 = solid[:len(c)] w2 = solid[len(c):] print w1, w2 splits.append((w1, w2)) if ColorUnigramSplit.objects.get_or_none(w1 = w1, w2 = w2) is None: cus = ColorUnigramSplit(w1 = w1, w2 = w2, original = u) cus.save() break print "Found", len(splits), "splits from original", len(unigrams), "unigrams." def populate_default(): """Call all distinct populate functions with default parameters. Default parameter for each model points into `../resources/<relevant_file_name>.tsv`. """ populate_plural_color_bigrams("../resources/plural_color_bigrams.tsv") populate_color_unigrams("../resources/color_unigrams.tsv") populate_colormap("../resources/color_map.tsv") populate_unbracketed_color_bigrams("../resources/unbracketed_color_bigrams.tsv") populate_bracketed_color_bigrams("../resources/bracketed_color_bigrams.tsv") populate_everycolorbot_tweets("../resources/everycolorbot_tweets.tsv") split_unigrams() if __name__ == "__main__": populate_default()
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .sub_resource import SubResource from .backend_address_pool import BackendAddressPool from .inbound_nat_rule import InboundNatRule from .security_rule import SecurityRule from .network_interface_dns_settings import NetworkInterfaceDnsSettings from .network_interface import NetworkInterface from .network_security_group import NetworkSecurityGroup from .route import Route from .route_table import RouteTable from .service_endpoint_properties_format import ServiceEndpointPropertiesFormat from .public_ip_address_sku import PublicIPAddressSku from .public_ip_address_dns_settings import PublicIPAddressDnsSettings from .public_ip_address import PublicIPAddress from .ip_configuration import IPConfiguration from .resource_navigation_link import ResourceNavigationLink from .subnet import Subnet from .network_interface_ip_configuration import NetworkInterfaceIPConfiguration from .application_gateway_backend_address import ApplicationGatewayBackendAddress from .application_gateway_backend_address_pool import ApplicationGatewayBackendAddressPool from .application_gateway_connection_draining import ApplicationGatewayConnectionDraining from .application_gateway_backend_http_settings import ApplicationGatewayBackendHttpSettings from .application_gateway_backend_health_server import ApplicationGatewayBackendHealthServer from .application_gateway_backend_health_http_settings import ApplicationGatewayBackendHealthHttpSettings from .application_gateway_backend_health_pool import ApplicationGatewayBackendHealthPool from .application_gateway_backend_health import ApplicationGatewayBackendHealth from .application_gateway_sku import ApplicationGatewaySku from .application_gateway_ssl_policy import ApplicationGatewaySslPolicy from .application_gateway_ip_configuration import ApplicationGatewayIPConfiguration from .application_gateway_authentication_certificate import ApplicationGatewayAuthenticationCertificate from .application_gateway_ssl_certificate import ApplicationGatewaySslCertificate from .application_gateway_frontend_ip_configuration import ApplicationGatewayFrontendIPConfiguration from .application_gateway_frontend_port import ApplicationGatewayFrontendPort from .application_gateway_http_listener import ApplicationGatewayHttpListener from .application_gateway_path_rule import ApplicationGatewayPathRule from .application_gateway_probe_health_response_match import ApplicationGatewayProbeHealthResponseMatch from .application_gateway_probe import ApplicationGatewayProbe from .application_gateway_request_routing_rule import ApplicationGatewayRequestRoutingRule from .application_gateway_redirect_configuration import ApplicationGatewayRedirectConfiguration from .application_gateway_url_path_map import ApplicationGatewayUrlPathMap from .application_gateway_firewall_disabled_rule_group import ApplicationGatewayFirewallDisabledRuleGroup from .application_gateway_web_application_firewall_configuration import ApplicationGatewayWebApplicationFirewallConfiguration from .application_gateway import ApplicationGateway from .application_gateway_firewall_rule import ApplicationGatewayFirewallRule from .application_gateway_firewall_rule_group import ApplicationGatewayFirewallRuleGroup from .application_gateway_firewall_rule_set import ApplicationGatewayFirewallRuleSet from .application_gateway_available_waf_rule_sets_result import ApplicationGatewayAvailableWafRuleSetsResult from .application_gateway_available_ssl_options import ApplicationGatewayAvailableSslOptions from .application_gateway_ssl_predefined_policy import ApplicationGatewaySslPredefinedPolicy from .resource import Resource from .dns_name_availability_result import DnsNameAvailabilityResult from .endpoint_service_result import EndpointServiceResult from .express_route_circuit_authorization import ExpressRouteCircuitAuthorization from .express_route_circuit_peering_config import ExpressRouteCircuitPeeringConfig from .route_filter_rule import RouteFilterRule from .express_route_circuit_stats import ExpressRouteCircuitStats from .express_route_circuit_peering import ExpressRouteCircuitPeering from .route_filter import RouteFilter from .ipv6_express_route_circuit_peering_config import Ipv6ExpressRouteCircuitPeeringConfig from .express_route_circuit_sku import ExpressRouteCircuitSku from .express_route_circuit_service_provider_properties import ExpressRouteCircuitServiceProviderProperties from .express_route_circuit import ExpressRouteCircuit from .express_route_circuit_arp_table import ExpressRouteCircuitArpTable from .express_route_circuits_arp_table_list_result import ExpressRouteCircuitsArpTableListResult from .express_route_circuit_routes_table import ExpressRouteCircuitRoutesTable from .express_route_circuits_routes_table_list_result import ExpressRouteCircuitsRoutesTableListResult from .express_route_circuit_routes_table_summary import ExpressRouteCircuitRoutesTableSummary from .express_route_circuits_routes_table_summary_list_result import ExpressRouteCircuitsRoutesTableSummaryListResult from .express_route_service_provider_bandwidths_offered import ExpressRouteServiceProviderBandwidthsOffered from .express_route_service_provider import ExpressRouteServiceProvider from .load_balancer_sku import LoadBalancerSku from .frontend_ip_configuration import FrontendIPConfiguration from .load_balancing_rule import LoadBalancingRule from .probe import Probe from .inbound_nat_pool import InboundNatPool from .outbound_nat_rule import OutboundNatRule from .load_balancer import LoadBalancer from .error_details import ErrorDetails from .error import Error from .azure_async_operation_result import AzureAsyncOperationResult from .effective_network_security_group_association import EffectiveNetworkSecurityGroupAssociation from .effective_network_security_rule import EffectiveNetworkSecurityRule from .effective_network_security_group import EffectiveNetworkSecurityGroup from .effective_network_security_group_list_result import EffectiveNetworkSecurityGroupListResult from .effective_route import EffectiveRoute from .effective_route_list_result import EffectiveRouteListResult from .network_watcher import NetworkWatcher from .topology_parameters import TopologyParameters from .topology_association import TopologyAssociation from .topology_resource import TopologyResource from .topology import Topology from .verification_ip_flow_parameters import VerificationIPFlowParameters from .verification_ip_flow_result import VerificationIPFlowResult from .next_hop_parameters import NextHopParameters from .next_hop_result import NextHopResult from .security_group_view_parameters import SecurityGroupViewParameters from .network_interface_association import NetworkInterfaceAssociation from .subnet_association import SubnetAssociation from .security_rule_associations import SecurityRuleAssociations from .security_group_network_interface import SecurityGroupNetworkInterface from .security_group_view_result import SecurityGroupViewResult from .packet_capture_storage_location import PacketCaptureStorageLocation from .packet_capture_filter import PacketCaptureFilter from .packet_capture_parameters import PacketCaptureParameters from .packet_capture import PacketCapture from .packet_capture_result import PacketCaptureResult from .packet_capture_query_status_result import PacketCaptureQueryStatusResult from .troubleshooting_parameters import TroubleshootingParameters from .query_troubleshooting_parameters import QueryTroubleshootingParameters from .troubleshooting_recommended_actions import TroubleshootingRecommendedActions from .troubleshooting_details import TroubleshootingDetails from .troubleshooting_result import TroubleshootingResult from .retention_policy_parameters import RetentionPolicyParameters from .flow_log_status_parameters import FlowLogStatusParameters from .flow_log_information import FlowLogInformation from .connectivity_source import ConnectivitySource from .connectivity_destination import ConnectivityDestination from .connectivity_parameters import ConnectivityParameters from .connectivity_issue import ConnectivityIssue from .connectivity_hop import ConnectivityHop from .connectivity_information import ConnectivityInformation from .patch_route_filter_rule import PatchRouteFilterRule from .patch_route_filter import PatchRouteFilter from .bgp_community import BGPCommunity from .bgp_service_community import BgpServiceCommunity from .usage_name import UsageName from .usage import Usage from .virtual_network_peering import VirtualNetworkPeering from .address_space import AddressSpace from .dhcp_options import DhcpOptions from .virtual_network import VirtualNetwork from .ip_address_availability_result import IPAddressAvailabilityResult from .virtual_network_usage_name import VirtualNetworkUsageName from .virtual_network_usage import VirtualNetworkUsage from .virtual_network_gateway_ip_configuration import VirtualNetworkGatewayIPConfiguration from .virtual_network_gateway_sku import VirtualNetworkGatewaySku from .vpn_client_root_certificate import VpnClientRootCertificate from .vpn_client_revoked_certificate import VpnClientRevokedCertificate from .vpn_client_configuration import VpnClientConfiguration from .bgp_settings import BgpSettings from .bgp_peer_status import BgpPeerStatus from .gateway_route import GatewayRoute from .virtual_network_gateway import VirtualNetworkGateway from .vpn_client_parameters import VpnClientParameters from .bgp_peer_status_list_result import BgpPeerStatusListResult from .gateway_route_list_result import GatewayRouteListResult from .tunnel_connection_health import TunnelConnectionHealth from .local_network_gateway import LocalNetworkGateway from .ipsec_policy import IpsecPolicy from .virtual_network_gateway_connection import VirtualNetworkGatewayConnection from .connection_reset_shared_key import ConnectionResetSharedKey from .connection_shared_key import ConnectionSharedKey from .virtual_network_connection_gateway_reference import VirtualNetworkConnectionGatewayReference from .virtual_network_gateway_connection_list_entity import VirtualNetworkGatewayConnectionListEntity from .application_gateway_paged import ApplicationGatewayPaged from .application_gateway_ssl_predefined_policy_paged import ApplicationGatewaySslPredefinedPolicyPaged from .endpoint_service_result_paged import EndpointServiceResultPaged from .express_route_circuit_authorization_paged import ExpressRouteCircuitAuthorizationPaged from .express_route_circuit_peering_paged import ExpressRouteCircuitPeeringPaged from .express_route_circuit_paged import ExpressRouteCircuitPaged from .express_route_service_provider_paged import ExpressRouteServiceProviderPaged from .load_balancer_paged import LoadBalancerPaged from .backend_address_pool_paged import BackendAddressPoolPaged from .frontend_ip_configuration_paged import FrontendIPConfigurationPaged from .inbound_nat_rule_paged import InboundNatRulePaged from .load_balancing_rule_paged import LoadBalancingRulePaged from .network_interface_paged import NetworkInterfacePaged from .probe_paged import ProbePaged from .network_interface_ip_configuration_paged import NetworkInterfaceIPConfigurationPaged from .network_security_group_paged import NetworkSecurityGroupPaged from .security_rule_paged import SecurityRulePaged from .network_watcher_paged import NetworkWatcherPaged from .packet_capture_result_paged import PacketCaptureResultPaged from .public_ip_address_paged import PublicIPAddressPaged from .route_filter_paged import RouteFilterPaged from .route_filter_rule_paged import RouteFilterRulePaged from .route_table_paged import RouteTablePaged from .route_paged import RoutePaged from .bgp_service_community_paged import BgpServiceCommunityPaged from .usage_paged import UsagePaged from .virtual_network_paged import VirtualNetworkPaged from .virtual_network_usage_paged import VirtualNetworkUsagePaged from .subnet_paged import SubnetPaged from .virtual_network_peering_paged import VirtualNetworkPeeringPaged from .virtual_network_gateway_paged import VirtualNetworkGatewayPaged from .virtual_network_gateway_connection_list_entity_paged import VirtualNetworkGatewayConnectionListEntityPaged from .virtual_network_gateway_connection_paged import VirtualNetworkGatewayConnectionPaged from .local_network_gateway_paged import LocalNetworkGatewayPaged from .network_management_client_enums import ( TransportProtocol, IPAllocationMethod, IPVersion, SecurityRuleProtocol, SecurityRuleAccess, SecurityRuleDirection, RouteNextHopType, PublicIPAddressSkuName, ApplicationGatewayProtocol, ApplicationGatewayCookieBasedAffinity, ApplicationGatewayBackendHealthServerHealth, ApplicationGatewaySkuName, ApplicationGatewayTier, ApplicationGatewaySslProtocol, ApplicationGatewaySslPolicyType, ApplicationGatewaySslPolicyName, ApplicationGatewaySslCipherSuite, ApplicationGatewayRequestRoutingRuleType, ApplicationGatewayRedirectType, ApplicationGatewayOperationalState, ApplicationGatewayFirewallMode, AuthorizationUseStatus, ExpressRouteCircuitPeeringAdvertisedPublicPrefixState, Access, ExpressRouteCircuitPeeringType, ExpressRouteCircuitPeeringState, ExpressRouteCircuitSkuTier, ExpressRouteCircuitSkuFamily, ServiceProviderProvisioningState, LoadBalancerSkuName, LoadDistribution, ProbeProtocol, NetworkOperationStatus, EffectiveSecurityRuleProtocol, EffectiveRouteSource, EffectiveRouteState, ProvisioningState, AssociationType, Direction, Protocol, NextHopType, PcProtocol, PcStatus, PcError, Origin, Severity, IssueType, ConnectionStatus, VirtualNetworkPeeringState, VirtualNetworkGatewayType, VpnType, VirtualNetworkGatewaySkuName, VirtualNetworkGatewaySkuTier, VpnClientProtocol, BgpPeerState, ProcessorArchitecture, AuthenticationMethod, VirtualNetworkGatewayConnectionStatus, VirtualNetworkGatewayConnectionType, IpsecEncryption, IpsecIntegrity, IkeEncryption, IkeIntegrity, DhGroup, PfsGroup, ) __all__ = [ 'SubResource', 'BackendAddressPool', 'InboundNatRule', 'SecurityRule', 'NetworkInterfaceDnsSettings', 'NetworkInterface', 'NetworkSecurityGroup', 'Route', 'RouteTable', 'ServiceEndpointPropertiesFormat', 'PublicIPAddressSku', 'PublicIPAddressDnsSettings', 'PublicIPAddress', 'IPConfiguration', 'ResourceNavigationLink', 'Subnet', 'NetworkInterfaceIPConfiguration', 'ApplicationGatewayBackendAddress', 'ApplicationGatewayBackendAddressPool', 'ApplicationGatewayConnectionDraining', 'ApplicationGatewayBackendHttpSettings', 'ApplicationGatewayBackendHealthServer', 'ApplicationGatewayBackendHealthHttpSettings', 'ApplicationGatewayBackendHealthPool', 'ApplicationGatewayBackendHealth', 'ApplicationGatewaySku', 'ApplicationGatewaySslPolicy', 'ApplicationGatewayIPConfiguration', 'ApplicationGatewayAuthenticationCertificate', 'ApplicationGatewaySslCertificate', 'ApplicationGatewayFrontendIPConfiguration', 'ApplicationGatewayFrontendPort', 'ApplicationGatewayHttpListener', 'ApplicationGatewayPathRule', 'ApplicationGatewayProbeHealthResponseMatch', 'ApplicationGatewayProbe', 'ApplicationGatewayRequestRoutingRule', 'ApplicationGatewayRedirectConfiguration', 'ApplicationGatewayUrlPathMap', 'ApplicationGatewayFirewallDisabledRuleGroup', 'ApplicationGatewayWebApplicationFirewallConfiguration', 'ApplicationGateway', 'ApplicationGatewayFirewallRule', 'ApplicationGatewayFirewallRuleGroup', 'ApplicationGatewayFirewallRuleSet', 'ApplicationGatewayAvailableWafRuleSetsResult', 'ApplicationGatewayAvailableSslOptions', 'ApplicationGatewaySslPredefinedPolicy', 'Resource', 'DnsNameAvailabilityResult', 'EndpointServiceResult', 'ExpressRouteCircuitAuthorization', 'ExpressRouteCircuitPeeringConfig', 'RouteFilterRule', 'ExpressRouteCircuitStats', 'ExpressRouteCircuitPeering', 'RouteFilter', 'Ipv6ExpressRouteCircuitPeeringConfig', 'ExpressRouteCircuitSku', 'ExpressRouteCircuitServiceProviderProperties', 'ExpressRouteCircuit', 'ExpressRouteCircuitArpTable', 'ExpressRouteCircuitsArpTableListResult', 'ExpressRouteCircuitRoutesTable', 'ExpressRouteCircuitsRoutesTableListResult', 'ExpressRouteCircuitRoutesTableSummary', 'ExpressRouteCircuitsRoutesTableSummaryListResult', 'ExpressRouteServiceProviderBandwidthsOffered', 'ExpressRouteServiceProvider', 'LoadBalancerSku', 'FrontendIPConfiguration', 'LoadBalancingRule', 'Probe', 'InboundNatPool', 'OutboundNatRule', 'LoadBalancer', 'ErrorDetails', 'Error', 'AzureAsyncOperationResult', 'EffectiveNetworkSecurityGroupAssociation', 'EffectiveNetworkSecurityRule', 'EffectiveNetworkSecurityGroup', 'EffectiveNetworkSecurityGroupListResult', 'EffectiveRoute', 'EffectiveRouteListResult', 'NetworkWatcher', 'TopologyParameters', 'TopologyAssociation', 'TopologyResource', 'Topology', 'VerificationIPFlowParameters', 'VerificationIPFlowResult', 'NextHopParameters', 'NextHopResult', 'SecurityGroupViewParameters', 'NetworkInterfaceAssociation', 'SubnetAssociation', 'SecurityRuleAssociations', 'SecurityGroupNetworkInterface', 'SecurityGroupViewResult', 'PacketCaptureStorageLocation', 'PacketCaptureFilter', 'PacketCaptureParameters', 'PacketCapture', 'PacketCaptureResult', 'PacketCaptureQueryStatusResult', 'TroubleshootingParameters', 'QueryTroubleshootingParameters', 'TroubleshootingRecommendedActions', 'TroubleshootingDetails', 'TroubleshootingResult', 'RetentionPolicyParameters', 'FlowLogStatusParameters', 'FlowLogInformation', 'ConnectivitySource', 'ConnectivityDestination', 'ConnectivityParameters', 'ConnectivityIssue', 'ConnectivityHop', 'ConnectivityInformation', 'PatchRouteFilterRule', 'PatchRouteFilter', 'BGPCommunity', 'BgpServiceCommunity', 'UsageName', 'Usage', 'VirtualNetworkPeering', 'AddressSpace', 'DhcpOptions', 'VirtualNetwork', 'IPAddressAvailabilityResult', 'VirtualNetworkUsageName', 'VirtualNetworkUsage', 'VirtualNetworkGatewayIPConfiguration', 'VirtualNetworkGatewaySku', 'VpnClientRootCertificate', 'VpnClientRevokedCertificate', 'VpnClientConfiguration', 'BgpSettings', 'BgpPeerStatus', 'GatewayRoute', 'VirtualNetworkGateway', 'VpnClientParameters', 'BgpPeerStatusListResult', 'GatewayRouteListResult', 'TunnelConnectionHealth', 'LocalNetworkGateway', 'IpsecPolicy', 'VirtualNetworkGatewayConnection', 'ConnectionResetSharedKey', 'ConnectionSharedKey', 'VirtualNetworkConnectionGatewayReference', 'VirtualNetworkGatewayConnectionListEntity', 'ApplicationGatewayPaged', 'ApplicationGatewaySslPredefinedPolicyPaged', 'EndpointServiceResultPaged', 'ExpressRouteCircuitAuthorizationPaged', 'ExpressRouteCircuitPeeringPaged', 'ExpressRouteCircuitPaged', 'ExpressRouteServiceProviderPaged', 'LoadBalancerPaged', 'BackendAddressPoolPaged', 'FrontendIPConfigurationPaged', 'InboundNatRulePaged', 'LoadBalancingRulePaged', 'NetworkInterfacePaged', 'ProbePaged', 'NetworkInterfaceIPConfigurationPaged', 'NetworkSecurityGroupPaged', 'SecurityRulePaged', 'NetworkWatcherPaged', 'PacketCaptureResultPaged', 'PublicIPAddressPaged', 'RouteFilterPaged', 'RouteFilterRulePaged', 'RouteTablePaged', 'RoutePaged', 'BgpServiceCommunityPaged', 'UsagePaged', 'VirtualNetworkPaged', 'VirtualNetworkUsagePaged', 'SubnetPaged', 'VirtualNetworkPeeringPaged', 'VirtualNetworkGatewayPaged', 'VirtualNetworkGatewayConnectionListEntityPaged', 'VirtualNetworkGatewayConnectionPaged', 'LocalNetworkGatewayPaged', 'TransportProtocol', 'IPAllocationMethod', 'IPVersion', 'SecurityRuleProtocol', 'SecurityRuleAccess', 'SecurityRuleDirection', 'RouteNextHopType', 'PublicIPAddressSkuName', 'ApplicationGatewayProtocol', 'ApplicationGatewayCookieBasedAffinity', 'ApplicationGatewayBackendHealthServerHealth', 'ApplicationGatewaySkuName', 'ApplicationGatewayTier', 'ApplicationGatewaySslProtocol', 'ApplicationGatewaySslPolicyType', 'ApplicationGatewaySslPolicyName', 'ApplicationGatewaySslCipherSuite', 'ApplicationGatewayRequestRoutingRuleType', 'ApplicationGatewayRedirectType', 'ApplicationGatewayOperationalState', 'ApplicationGatewayFirewallMode', 'AuthorizationUseStatus', 'ExpressRouteCircuitPeeringAdvertisedPublicPrefixState', 'Access', 'ExpressRouteCircuitPeeringType', 'ExpressRouteCircuitPeeringState', 'ExpressRouteCircuitSkuTier', 'ExpressRouteCircuitSkuFamily', 'ServiceProviderProvisioningState', 'LoadBalancerSkuName', 'LoadDistribution', 'ProbeProtocol', 'NetworkOperationStatus', 'EffectiveSecurityRuleProtocol', 'EffectiveRouteSource', 'EffectiveRouteState', 'ProvisioningState', 'AssociationType', 'Direction', 'Protocol', 'NextHopType', 'PcProtocol', 'PcStatus', 'PcError', 'Origin', 'Severity', 'IssueType', 'ConnectionStatus', 'VirtualNetworkPeeringState', 'VirtualNetworkGatewayType', 'VpnType', 'VirtualNetworkGatewaySkuName', 'VirtualNetworkGatewaySkuTier', 'VpnClientProtocol', 'BgpPeerState', 'ProcessorArchitecture', 'AuthenticationMethod', 'VirtualNetworkGatewayConnectionStatus', 'VirtualNetworkGatewayConnectionType', 'IpsecEncryption', 'IpsecIntegrity', 'IkeEncryption', 'IkeIntegrity', 'DhGroup', 'PfsGroup', ]
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import os from oslo_concurrency import lockutils from oslo_log import log as logging import yaml from neutron.tests.tempest.common import cred_provider from neutron.tests.tempest import config from neutron.tests.tempest import exceptions CONF = config.CONF LOG = logging.getLogger(__name__) def read_accounts_yaml(path): yaml_file = open(path, 'r') accounts = yaml.load(yaml_file) return accounts class Accounts(cred_provider.CredentialProvider): def __init__(self, name): super(Accounts, self).__init__(name) self.name = name if os.path.isfile(CONF.auth.test_accounts_file): accounts = read_accounts_yaml(CONF.auth.test_accounts_file) self.use_default_creds = False else: accounts = {} self.use_default_creds = True self.hash_dict = self.get_hash_dict(accounts) # FIXME(dhellmann): The configuration option is not part of # the API of the library, because if we change the option name # or group it will break this use. Tempest needs to set this # value somewhere that it owns, and then use # lockutils.set_defaults() to tell oslo.concurrency what value # to use. self.accounts_dir = os.path.join(CONF.oslo_concurrency.lock_path, 'test_accounts') self.isolated_creds = {} @classmethod def _append_role(cls, role, account_hash, hash_dict): if role in hash_dict['roles']: hash_dict['roles'][role].append(account_hash) else: hash_dict['roles'][role] = [account_hash] return hash_dict @classmethod def get_hash_dict(cls, accounts): hash_dict = {'roles': {}, 'creds': {}} # Loop over the accounts read from the yaml file for account in accounts: roles = [] types = [] if 'roles' in account: roles = account.pop('roles') if 'types' in account: types = account.pop('types') temp_hash = hashlib.md5() temp_hash.update(str(account)) temp_hash_key = temp_hash.hexdigest() hash_dict['creds'][temp_hash_key] = account for role in roles: hash_dict = cls._append_role(role, temp_hash_key, hash_dict) # If types are set for the account append the matching role # subdict with the hash for type in types: if type == 'admin': hash_dict = cls._append_role(CONF.identity.admin_role, temp_hash_key, hash_dict) elif type == 'operator': hash_dict = cls._append_role( CONF.object_storage.operator_role, temp_hash_key, hash_dict) elif type == 'reseller_admin': hash_dict = cls._append_role( CONF.object_storage.reseller_admin_role, temp_hash_key, hash_dict) return hash_dict def is_multi_user(self): # Default credentials is not a valid option with locking Account if self.use_default_creds: raise exceptions.InvalidConfiguration( "Account file %s doesn't exist" % CONF.auth.test_accounts_file) else: return len(self.hash_dict['creds']) > 1 def is_multi_tenant(self): return self.is_multi_user() def _create_hash_file(self, hash_string): path = os.path.join(os.path.join(self.accounts_dir, hash_string)) if not os.path.isfile(path): with open(path, 'w') as fd: fd.write(self.name) return True return False @lockutils.synchronized('test_accounts_io', external=True) def _get_free_hash(self, hashes): # Cast as a list because in some edge cases a set will be passed in hashes = list(hashes) if not os.path.isdir(self.accounts_dir): os.mkdir(self.accounts_dir) # Create File from first hash (since none are in use) self._create_hash_file(hashes[0]) return hashes[0] names = [] for _hash in hashes: res = self._create_hash_file(_hash) if res: return _hash else: path = os.path.join(os.path.join(self.accounts_dir, _hash)) with open(path, 'r') as fd: names.append(fd.read()) msg = ('Insufficient number of users provided. %s have allocated all ' 'the credentials for this allocation request' % ','.join(names)) raise exceptions.InvalidConfiguration(msg) def _get_match_hash_list(self, roles=None): hashes = [] if roles: # Loop over all the creds for each role in the subdict and generate # a list of cred lists for each role for role in roles: temp_hashes = self.hash_dict['roles'].get(role, None) if not temp_hashes: raise exceptions.InvalidConfiguration( "No credentials with role: %s specified in the " "accounts ""file" % role) hashes.append(temp_hashes) # Take the list of lists and do a boolean and between each list to # find the creds which fall under all the specified roles temp_list = set(hashes[0]) for hash_list in hashes[1:]: temp_list = temp_list & set(hash_list) hashes = temp_list else: hashes = self.hash_dict['creds'].keys() # NOTE(mtreinish): admin is a special case because of the increased # privlege set which could potentially cause issues on tests where that # is not expected. So unless the admin role isn't specified do not # allocate admin. admin_hashes = self.hash_dict['roles'].get(CONF.identity.admin_role, None) if ((not roles or CONF.identity.admin_role not in roles) and admin_hashes): useable_hashes = [x for x in hashes if x not in admin_hashes] else: useable_hashes = hashes return useable_hashes def _get_creds(self, roles=None): if self.use_default_creds: raise exceptions.InvalidConfiguration( "Account file %s doesn't exist" % CONF.auth.test_accounts_file) useable_hashes = self._get_match_hash_list(roles) free_hash = self._get_free_hash(useable_hashes) return self.hash_dict['creds'][free_hash] @lockutils.synchronized('test_accounts_io', external=True) def remove_hash(self, hash_string): hash_path = os.path.join(self.accounts_dir, hash_string) if not os.path.isfile(hash_path): LOG.warning('Expected an account lock file %s to remove, but ' 'one did not exist' % hash_path) else: os.remove(hash_path) if not os.listdir(self.accounts_dir): os.rmdir(self.accounts_dir) def get_hash(self, creds): for _hash in self.hash_dict['creds']: # Comparing on the attributes that are expected in the YAML if all([getattr(creds, k) == self.hash_dict['creds'][_hash][k] for k in creds.get_init_attributes()]): return _hash raise AttributeError('Invalid credentials %s' % creds) def remove_credentials(self, creds): _hash = self.get_hash(creds) self.remove_hash(_hash) def get_primary_creds(self): if self.isolated_creds.get('primary'): return self.isolated_creds.get('primary') creds = self._get_creds() primary_credential = cred_provider.get_credentials(**creds) self.isolated_creds['primary'] = primary_credential return primary_credential def get_alt_creds(self): if self.isolated_creds.get('alt'): return self.isolated_creds.get('alt') creds = self._get_creds() alt_credential = cred_provider.get_credentials(**creds) self.isolated_creds['alt'] = alt_credential return alt_credential def get_creds_by_roles(self, roles, force_new=False): roles = list(set(roles)) exist_creds = self.isolated_creds.get(str(roles), None) # The force kwarg is used to allocate an additional set of creds with # the same role list. The index used for the previously allocation # in the isolated_creds dict will be moved. if exist_creds and not force_new: return exist_creds elif exist_creds and force_new: new_index = str(roles) + '-' + str(len(self.isolated_creds)) self.isolated_creds[new_index] = exist_creds creds = self._get_creds(roles=roles) role_credential = cred_provider.get_credentials(**creds) self.isolated_creds[str(roles)] = role_credential return role_credential def clear_isolated_creds(self): for creds in self.isolated_creds.values(): self.remove_credentials(creds) def get_admin_creds(self): return self.get_creds_by_roles([CONF.identity.admin_role]) def is_role_available(self, role): if self.use_default_creds: return False else: if self.hash_dict['roles'].get(role): return True return False def admin_available(self): return self.is_role_available(CONF.identity.admin_role) class NotLockingAccounts(Accounts): """Credentials provider which always returns the first and second configured accounts as primary and alt users. This credential provider can be used in case of serial test execution to preserve the current behaviour of the serial tempest run. """ def _unique_creds(self, cred_arg=None): """Verify that the configured credentials are valid and distinct """ if self.use_default_creds: try: user = self.get_primary_creds() alt_user = self.get_alt_creds() return getattr(user, cred_arg) != getattr(alt_user, cred_arg) except exceptions.InvalidCredentials as ic: msg = "At least one of the configured credentials is " \ "not valid: %s" % ic.message raise exceptions.InvalidConfiguration(msg) else: # TODO(andreaf) Add a uniqueness check here return len(self.hash_dict['creds']) > 1 def is_multi_user(self): return self._unique_creds('username') def is_multi_tenant(self): return self._unique_creds('tenant_id') def get_creds(self, id, roles=None): try: hashes = self._get_match_hash_list(roles) # No need to sort the dict as within the same python process # the HASH seed won't change, so subsequent calls to keys() # will return the same result _hash = hashes[id] except IndexError: msg = 'Insufficient number of users provided' raise exceptions.InvalidConfiguration(msg) return self.hash_dict['creds'][_hash] def get_primary_creds(self): if self.isolated_creds.get('primary'): return self.isolated_creds.get('primary') if not self.use_default_creds: creds = self.get_creds(0) primary_credential = cred_provider.get_credentials(**creds) else: primary_credential = cred_provider.get_configured_credentials( 'user') self.isolated_creds['primary'] = primary_credential return primary_credential def get_alt_creds(self): if self.isolated_creds.get('alt'): return self.isolated_creds.get('alt') if not self.use_default_creds: creds = self.get_creds(1) alt_credential = cred_provider.get_credentials(**creds) else: alt_credential = cred_provider.get_configured_credentials( 'alt_user') self.isolated_creds['alt'] = alt_credential return alt_credential def clear_isolated_creds(self): self.isolated_creds = {} def get_admin_creds(self): if not self.use_default_creds: return self.get_creds_by_roles([CONF.identity.admin_role]) else: creds = cred_provider.get_configured_credentials( "identity_admin", fill_in=False) self.isolated_creds['admin'] = creds return creds def get_creds_by_roles(self, roles, force_new=False): roles = list(set(roles)) exist_creds = self.isolated_creds.get(str(roles), None) index = 0 if exist_creds and not force_new: return exist_creds elif exist_creds and force_new: new_index = str(roles) + '-' + str(len(self.isolated_creds)) self.isolated_creds[new_index] = exist_creds # Figure out how many existing creds for this roles set are present # use this as the index the returning hash list to ensure separate # creds are returned with force_new being True for creds_names in self.isolated_creds: if str(roles) in creds_names: index = index + 1 if not self.use_default_creds: creds = self.get_creds(index, roles=roles) role_credential = cred_provider.get_credentials(**creds) self.isolated_creds[str(roles)] = role_credential else: msg = "Default credentials can not be used with specifying "\ "credentials by roles" raise exceptions.InvalidConfiguration(msg) return role_credential
# Qt import from PySide.QtGui import ( QApplication, QMessageBox, QHBoxLayout, QWidget, QPushButton, QComboBox, QScrollArea, QPixmap, QPainter, QCursor, QPen ) from PySide.QtCore import Signal, Qt # Maya import from maya import cmds # custom import from mttConfig import TOOLBAR_BUTTON_SIZE, TOOLBAR_SEPARATOR_WIDTH class RightPushButton(QPushButton): """ Push Button with Right click signal """ rightClick = Signal() is_right_press = False def __init__(self, parent=None): super(RightPushButton, self).__init__(parent) self.is_right_press = False def mousePressEvent(self, event): QPushButton.mousePressEvent(self, event) if event.button() == Qt.RightButton: self.is_right_press = True self.setDown(True) def mouseMoveEvent(self, event): QPushButton.mouseMoveEvent(self, event) if event.buttons() & Qt.RightButton: if self.contentsRect().contains(event.pos()): self.setDown(True) self.is_right_press = True else: self.setDown(False) self.is_right_press = False def mouseReleaseEvent(self, event): QPushButton.mouseReleaseEvent(self, event) if event.button() == Qt.RightButton: if self.is_right_press: self.rightClick.emit() self.setDown(False) self.is_right_press = False class StatusToolbarButton(QPushButton): """ Button with same Maya Status Line behavior """ def __init__(self, pix_ico, parent=None): super(StatusToolbarButton, self).__init__(parent) self.icon = QPixmap(pix_ico) self.setFlat(True) self.setFixedSize(TOOLBAR_BUTTON_SIZE, TOOLBAR_BUTTON_SIZE) self.new_ui = float(cmds.about(version=True)) >= 2016 palette = QApplication.palette() self.highlight = palette.highlight().color() def paintEvent(self, event): mouse_pos = self.mapFromGlobal(QCursor.pos()) is_hover = self.contentsRect().contains(mouse_pos) if not self.new_ui: QPushButton.paintEvent(self, event) painter = QPainter(self) if self.new_ui and self.isChecked(): painter.setRenderHint(QPainter.Antialiasing) painter.setPen(QPen(Qt.NoPen)) painter.setBrush(self.highlight) painter.drawRoundedRect(event.rect(), 2, 2) painter.drawPixmap(2, 2, self.icon) if is_hover: painter.setCompositionMode(QPainter.CompositionMode_Screen) painter.drawPixmap(2, 2, self.icon) class SeparatorButton(QPushButton): """ Separator button with Maya Status Line's style """ def __init__(self, parent=None): super(SeparatorButton, self).__init__(parent) self.pix = (QPixmap(':/ShortCloseBar.png'), QPixmap(':/ShortOpenBar.png')) self.is_collapsed = False self.icon = self.pix[1] self.setFlat(True) self.setFixedSize(TOOLBAR_SEPARATOR_WIDTH, 20) def paintEvent(self, event): mouse_pos = self.mapFromGlobal(QCursor.pos()) is_hover = self.contentsRect().contains(mouse_pos) QPushButton.paintEvent(self, event) painter = QPainter(self) painter.drawPixmap(2, 1, self.icon) if is_hover: painter.setCompositionMode(QPainter.CompositionMode_Screen) painter.drawPixmap(2, 1, self.icon) def set_collapse(self, state): self.icon = self.pix[state] class StatusCollapsibleLayout(QWidget): """ Collapsible layout with Maya Status Line's style """ toggled = Signal(int) def __init__(self, parent=None, section_name=None): super(StatusCollapsibleLayout, self).__init__(parent) self.icon_buttons = [] self.state = True self.toggle_btn = SeparatorButton() if section_name is None: section_name = 'Show/Hide section' self.toggle_btn.setToolTip(section_name) self.toggle_btn.setFlat(True) self.toggle_btn.clicked.connect(self.toggle_layout) self.group_layout = QHBoxLayout() self.group_layout.setAlignment(Qt.AlignLeft) self.group_layout.setContentsMargins(0, 0, 0, 0) self.group_layout.setSpacing(1) self.group_layout.addWidget(self.toggle_btn) self.setLayout(self.group_layout) def _delta_length(self): if self.state: return self.max_length() - TOOLBAR_SEPARATOR_WIDTH else: return TOOLBAR_SEPARATOR_WIDTH - self.max_length() def add_button(self, button): """ Create a button and add it to the layout :param button: QPushButton """ self.icon_buttons.append(button) self.group_layout.addWidget(button) def toggle_layout(self, init=False): """ Toggle collapse action for layout """ if not init: self.state = not self.state for btn in self.icon_buttons: btn.setVisible(self.state) self.toggle_btn.set_collapse(self.state) if init: self.toggled.emit(0 if self.state else self._delta_length()) else: self.toggled.emit(self._delta_length()) def set_current_state(self, state): self.state = state == 'true' if isinstance(state, unicode) else state self.toggle_layout(init=True) def button_count(self): return len(self.icon_buttons) def button_list(self): return self.icon_buttons def current_state(self): return self.state def max_length(self): count = self.button_count() # separator button width + button count * button size + spacing return TOOLBAR_SEPARATOR_WIDTH + count * TOOLBAR_BUTTON_SIZE + count class StatusScrollArea(QScrollArea): def __init__(self): super(StatusScrollArea, self).__init__() self._width = 0 self._group_count = 0 self._pan_pos = None self.__create_ui() self.__init_ui() def __create_ui(self): self.container = QWidget() self.container_layout = QHBoxLayout() self.container.setLayout(self.container_layout) self.setWidget(self.container) def __init_ui(self): self.container.setFixedHeight(TOOLBAR_BUTTON_SIZE) self.container_layout.setContentsMargins(0, 0, 0, 0) self.container_layout.setSpacing(1) self.container_layout.setAlignment(Qt.AlignLeft) self.setFixedHeight(TOOLBAR_BUTTON_SIZE) self.setFocusPolicy(Qt.NoFocus) self.setFrameShape(self.NoFrame) self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) def _update_width(self, expand_value): self._width += expand_value self.container.setFixedWidth(self._width + self._group_count) def mousePressEvent(self, event): if event.button() == Qt.MidButton: QApplication.setOverrideCursor(QCursor(Qt.SizeHorCursor)) self._pan_pos = event.globalPos() event.accept() else: event.ignore() def mouseReleaseEvent(self, event): if event.button() == Qt.MidButton: QApplication.restoreOverrideCursor() self._pan_pos = None event.accept() else: event.ignore() def mouseMoveEvent(self, event): if self._pan_pos: h_bar = self.horizontalScrollBar() h_bar_pos = h_bar.sliderPosition() cursor_pos = event.globalPos() cursor_delta = (cursor_pos - self._pan_pos).x() h_bar.setValue(h_bar_pos - cursor_delta) self._pan_pos = cursor_pos event.accept() else: event.ignore() def wheelEvent(self, event): if event.orientation() == Qt.Vertical: num_degrees = event.delta() / 8 h_bar = self.horizontalScrollBar() h_bar_pos = h_bar.sliderPosition() h_bar.setValue(h_bar_pos - num_degrees) else: super(StatusScrollArea, self).wheelEvent(event) def resizeEvent(self, event): max_scroll = max(0, self.container.width() - event.size().width()) self.horizontalScrollBar().setMaximum(max_scroll) def add_widget(self, widget): # add widget to layout self.container_layout.addWidget(widget) # connect widget for future update when user interact with it widget.toggled.connect(self._update_width) # expand widget layout self._width += widget.max_length() self._group_count += 1 self.container.setFixedWidth(self._width) class MessageBoxWithCheckbox(QMessageBox): def __init__(self, parent=None): super(MessageBoxWithCheckbox, self).__init__(parent) self.instance_state_widget = QComboBox() self.layout().addWidget(self.instance_state_widget, 1, 1) def exec_(self, *args, **kwargs): return QMessageBox.exec_(self, *args, **kwargs), \ self.instance_state_widget.currentIndex()
"""Colorize strings for output to terminal. Typical usage looks like this:: >>> from arcutils.colorize import colorizer, printer >>> colorized_string = colorizer.error('Whoops') >>> colorized_string '\\x1b[91mWhoops\\x1b[0m' >>> print(colorized_string) \x1b[91mWhoops\x1b[0m >>> printer.error('Something bad happened') Something bad happened You can also do some more advanced things like this:: from arcutils.colorize import colorizer, RED, GREEN, BLUE my_string = colorizer(RED, 'red', GREEN, 'green', BLUE, 'blue') print(my_string) If you need to, you can customize the colors used by creating an instance of :class:`.Colorizer` or :class:`ColorPrinter` with your own color map. """ import os import sys class Color: def __init__(self, name, code): self.name = name self.code = code def __str__(self): return self.code def __repr__(self): return '<Color: {0.name}>'.format(self) NONE = Color('none', '') RESET = Color('reset', '\033[0m') RED = Color('red', '\033[91m') GREEN = Color('green', '\033[92m') YELLOW = Color('yellow', '\033[93m') BLUE = Color('blue', '\033[94m') MAGENTA = Color('magenta', '\033[95m') # Map symbolic names to colors COLOR_MAP = { 'none': NONE, 'reset': RESET, 'header': MAGENTA, 'info': BLUE, 'success': GREEN, 'warning': YELLOW, 'error': RED, 'danger': RED, } class _Base: def __new__(cls, color_map=None): """Dynamically generate convenience methods. All we're doing here is dynamically generating the convenience methods .info(), .error(), etc. These names correspond to the keys of the instance's color map. Each subclass of _Base must point its ``__call__`` method at its main implementation method. E.g. :class:`Colorizer` points its ``__call__`` method at its ``colorize`` method. >>> colorizer.info.__name__ 'info' >>> colorizer.info.__doc__ 'info convenience method' >>> another_colorizer = Colorizer({'pants': ''}) >>> another_colorizer.pants.__name__ 'pants' >>> hasattr(colorizer, 'pants') False """ names = set(COLOR_MAP.keys()) if color_map is not None: names.update(color_map.keys()) # Create a subclass so instances created later don't affect # instances created earlier. Without this, if an instance was # created with, e.g., an extra color map entry, instances # created earlier would get a new convenience method, which # wouldn't be the worst thing in the world, but it might cause # confusion. sub_cls = type(cls.__name__, (cls,), {}) for name in names: setattr(sub_cls, name, cls.__make_convenience_method(name)) return super(_Base, cls).__new__(sub_cls) @classmethod def __make_convenience_method(cls, name): def method(self, *args, **kwargs): kwargs['color'] = name return self(*args, **kwargs) method.__name__ = name method.__doc__ = '{name} convenience method'.format(name=name) return method def __call__(self, *args, **kwargs): raise NotImplementedError class Colorizer(_Base): """Colorize strings. Default colors can be overridden by passing a color map to the constructor. Examples:: >>> colorizer = Colorizer({'success': YELLOW, 'special': GREEN}) >>> colorizer.colorize('boring old message') 'boring old message\\x1b[0m' >>> colorizer.colorize('boring old message', color='none') 'boring old message\\x1b[0m' >>> colorizer.colorize(NONE, 'boring old message') 'boring old message\\x1b[0m' >>> colorizer.colorize(RED, 'red', GREEN, 'green', BLUE, 'blue') '\\x1b[91mred \\x1b[92mgreen \\x1b[94mblue\\x1b[0m' >>> colorizer.info('check this out') '\\x1b[94mcheck this out\\x1b[0m' >>> colorizer.error('whoopsie') '\\x1b[91mwhoopsie\\x1b[0m' >>> colorizer.success('success is green by default, but it was overridden') '\\x1b[93msuccess is green by default, but it was overridden\\x1b[0m' >>> colorizer.special('special') '\\x1b[92mspecial\\x1b[0m' """ def __init__(self, color_map=None): self.color_map = COLOR_MAP.copy() if color_map is not None: self.color_map.update(color_map) def colorize(self, *args, sep=' ', end='', reset=True, **kwargs): """Returns a colorized string (joining ``args`` into one str). Pass ``color`` as a keyword arg to colorize ``*args``. It can be a name from the color map ('info', 'error', etc) or it can be an instance of :class:`Color`. Alternatively, you can pass one or more :class:`Color`s as args; the output string will change color each time a new color is encountered in args. If the ``color`` keyword arg is also passed, the output string will start with the specified color. The remaining args are similar to the built-in ``print()`` function. ``sep`` is a space as usual; ``end`` is an empty string instead of a newline. The string is terminated with the terminal reset code unless ``reset=False``. """ color = kwargs.get('color', NONE) if not isinstance(color, Color): color = self.color_map[color] args = (color,) + args string = [] for arg in args[:-1]: if isinstance(arg, Color): string.append(str(arg)) else: string.append(str(arg)) string.append(sep) string.append(str(args[-1])) string = ''.join(string) reset = self.color_map['reset'] if reset else '' string = '{string}{end}{reset}'.format(**locals()) return string __call__ = colorize class ColorPrinter(_Base): """Print things in color. Default colors can be overridden by passing a color map to the constructor. When the output device isn't a TTY, colorizing is disabled. Examples:: >>> printer = ColorPrinter() >>> printer.print('boring old message') boring old message >>> printer('boring old message') boring old message >>> printer.info('check this out') check this out >>> printer.error('whoopsie') whoopsie Note: This uses the print function from Python 3. """ def __init__(self, color_map=None): self.colorizer = Colorizer(color_map) def print(self, *args, **kwargs): """Like built-in ``print()`` but colorizes strings. Pass ``color`` as a keyword arg to colorize ``*args`` before printing them. If no ``color`` is passed or if ``file`` is not a TTY, *args will printed without color. See :meth:`Colorizer.colorize` for more info on how colorization works and advanced colorization options. """ color = kwargs.pop('color', NONE) file = kwargs.get('file', sys.stdout) try: is_a_tty = file.isatty() except AttributeError: is_a_tty = False if is_a_tty: colorizer_kwargs = kwargs.copy() colorizer_kwargs['color'] = color colorizer_kwargs.pop('file', None) colorizer_kwargs.setdefault('end', os.linesep) string = self.colorizer.colorize(*args, **colorizer_kwargs) print_kwargs = kwargs.copy() print_kwargs.pop('sep', None) print_kwargs['end'] = '' print(string, **print_kwargs) else: args = [a for a in args if not isinstance(a, Color)] print(*args, **kwargs) __call__ = print # Default public API colorizer = Colorizer() printer = ColorPrinter() if __name__ == '__main__': # A basic demonstration print(colorizer.header('Using print() to print colorized strings:')) print(colorizer(RED, 'red', GREEN, 'green', BLUE, 'blue')) print(colorizer.info('Some info')) print(colorizer.warning('I don\'t know about that...')) print(colorizer.success('Achievement unlocked :)')) print(colorizer.error('Something bad happened :(')) print() printer.header('Using ColorPrinter to print strings:') printer(RED, 'red', GREEN, 'green', BLUE, 'blue') printer.info('Some info') printer.warning('I don\'t know about that...') printer.success('Achievement unlocked :)') printer.error('Something bad happened :(') print() print('Printing to a non-TTY:') original_isatty = sys.stdout.isatty sys.stdout.isatty = lambda: False printer.error('Something bad happened (not colorized)') sys.stdout.isatty = original_isatty
# -*- coding: utf-8 -*- """\ This is a python port of "Goose" orignialy licensed to Gravity.com under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Python port was written by Xavier Grangier for Recrutae Gravity.com licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from copy import deepcopy from goose.extractors import BaseExtractor KNOWN_ARTICLE_CONTENT_TAGS = [ {'tag': 'article'}, {'attr': 'itemprop', 'value': 'articleBody'}, {'attr': 'class', 'value': 'post-content'}, {'attr': 'class', 'value': "story-body-text"}, {'attr': 'class', 'value': "story-content"}, ] class ContentExtractor(BaseExtractor): def get_language(self): """\ Returns the language is by the article or the configuration language """ # we don't want to force the target language # so we use the article.meta_lang if self.config.use_meta_language: if self.article.meta_lang: return self.article.meta_lang[:2] return self.config.target_language def get_known_article_tags(self): for item in KNOWN_ARTICLE_CONTENT_TAGS: nodes = self.parser.getElementsByTag( self.article.doc, **item) if len(nodes): return nodes[0] return None def is_articlebody(self, node): for item in KNOWN_ARTICLE_CONTENT_TAGS: # attribute if "attr" in item and "value" in item: if self.parser.getAttribute(node, item['attr']) == item['value']: return True # tag if "tag" in item: if node.tag == item['tag']: return True return False def calculate_best_node(self): doc = self.article.doc top_node = None nodes_to_check = self.nodes_to_check(doc) starting_boost = float(1.0) cnt = 0 i = 0 parent_nodes = [] nodes_with_text = [] for node in nodes_to_check: text_node = self.parser.getText(node) word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node) high_link_density = self.is_highlink_density(node) if word_stats.get_stopword_count() > 2 and not high_link_density: nodes_with_text.append(node) nodes_number = len(nodes_with_text) negative_scoring = 0 bottom_negativescore_nodes = float(nodes_number) * 0.25 for node in nodes_with_text: boost_score = float(0) # boost if(self.is_boostable(node)): if cnt >= 0: boost_score = float((1.0 / starting_boost) * 50) starting_boost += 1 # nodes_number if nodes_number > 15: if (nodes_number - i) <= bottom_negativescore_nodes: booster = float(bottom_negativescore_nodes - (nodes_number - i)) boost_score = float(-pow(booster, float(2))) negscore = abs(boost_score) + negative_scoring if negscore > 40: boost_score = float(5) text_node = self.parser.getText(node) word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node) upscore = int(word_stats.get_stopword_count() + boost_score) # parent node parent_node = self.parser.getParent(node) self.update_score(parent_node, upscore) self.update_node_count(parent_node, 1) if parent_node not in parent_nodes: parent_nodes.append(parent_node) # parentparent node parent_parent_node = self.parser.getParent(parent_node) if parent_parent_node is not None: self.update_node_count(parent_parent_node, 1) self.update_score(parent_parent_node, upscore / 2) if parent_parent_node not in parent_nodes: parent_nodes.append(parent_parent_node) cnt += 1 i += 1 top_node_score = 0 for e in parent_nodes: score = self.get_score(e) if score > top_node_score: top_node = e top_node_score = score if top_node is None: top_node = e return top_node def is_boostable(self, node): """\ alot of times the first paragraph might be the caption under an image so we'll want to make sure if we're going to boost a parent node that it should be connected to other paragraphs, at least for the first n paragraphs so we'll want to make sure that the next sibling is a paragraph and has at least some substatial weight to it """ para = "p" steps_away = 0 minimum_stopword_count = 5 max_stepsaway_from_node = 3 nodes = self.walk_siblings(node) for current_node in nodes: # p current_node_tag = self.parser.getTag(current_node) if current_node_tag == para: if steps_away >= max_stepsaway_from_node: return False paraText = self.parser.getText(current_node) word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(paraText) if word_stats.get_stopword_count() > minimum_stopword_count: return True steps_away += 1 return False def walk_siblings(self, node): current_sibling = self.parser.previousSibling(node) b = [] while current_sibling is not None: b.append(current_sibling) previousSibling = self.parser.previousSibling(current_sibling) current_sibling = None if previousSibling is None else previousSibling return b def add_siblings(self, top_node): # in case the extraction used known attributes # we don't want to add sibilings if self.is_articlebody(top_node): return top_node baselinescore_siblings_para = self.get_siblings_score(top_node) results = self.walk_siblings(top_node) for current_node in results: ps = self.get_siblings_content(current_node, baselinescore_siblings_para) for p in ps: top_node.insert(0, p) return top_node def get_siblings_content(self, current_sibling, baselinescore_siblings_para): """\ adds any siblings that may have a decent score to this node """ if current_sibling.tag == 'p' and len(self.parser.getText(current_sibling)) > 0: e0 = current_sibling if e0.tail: e0 = deepcopy(e0) e0.tail = '' return [e0] else: potential_paragraphs = self.parser.getElementsByTag(current_sibling, tag='p') if potential_paragraphs is None: return None else: ps = [] for first_paragraph in potential_paragraphs: text = self.parser.getText(first_paragraph) if len(text) > 0: word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text) paragraph_score = word_stats.get_stopword_count() sibling_baseline_score = float(.30) high_link_density = self.is_highlink_density(first_paragraph) score = float(baselinescore_siblings_para * sibling_baseline_score) if score < paragraph_score and not high_link_density: p = self.parser.createElement(tag='p', text=text, tail=None) ps.append(p) return ps def get_siblings_score(self, top_node): """\ we could have long articles that have tons of paragraphs so if we tried to calculate the base score against the total text score of those paragraphs it would be unfair. So we need to normalize the score based on the average scoring of the paragraphs within the top node. For example if our total score of 10 paragraphs was 1000 but each had an average value of 100 then 100 should be our base. """ base = 100000 paragraphs_number = 0 paragraphs_score = 0 nodes_to_check = self.parser.getElementsByTag(top_node, tag='p') for node in nodes_to_check: text_node = self.parser.getText(node) word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node) high_link_density = self.is_highlink_density(node) if word_stats.get_stopword_count() > 2 and not high_link_density: paragraphs_number += 1 paragraphs_score += word_stats.get_stopword_count() if paragraphs_number > 0: base = paragraphs_score / paragraphs_number return base def update_score(self, node, addToScore): """\ adds a score to the gravityScore Attribute we put on divs we'll get the current score then add the score we're passing in to the current """ current_score = 0 score_string = self.parser.getAttribute(node, 'gravityScore') if score_string: current_score = int(score_string) new_score = current_score + addToScore self.parser.setAttribute(node, "gravityScore", str(new_score)) def update_node_count(self, node, add_to_count): """\ stores how many decent nodes are under a parent node """ current_score = 0 count_string = self.parser.getAttribute(node, 'gravityNodes') if count_string: current_score = int(count_string) new_score = current_score + add_to_count self.parser.setAttribute(node, "gravityNodes", str(new_score)) def is_highlink_density(self, e): """\ checks the density of links within a node, is there not much text and most of it contains linky shit? if so it's no good """ links = self.parser.getElementsByTag(e, tag='a') if links is None or len(links) == 0: return False text = self.parser.getText(e) words = text.split(' ') words_number = float(len(words)) sb = [] for link in links: sb.append(self.parser.getText(link)) linkText = ''.join(sb) linkWords = linkText.split(' ') numberOfLinkWords = float(len(linkWords)) numberOfLinks = float(len(links)) linkDivisor = float(numberOfLinkWords / words_number) score = float(linkDivisor * numberOfLinks) if score >= 1.0: return True return False # return True if score > 1.0 else False def get_score(self, node): """\ returns the gravityScore as an integer from this node """ return self.get_node_gravity_score(node) or 0 def get_node_gravity_score(self, node): grvScoreString = self.parser.getAttribute(node, 'gravityScore') if not grvScoreString: return None return int(grvScoreString) def nodes_to_check(self, doc): """\ returns a list of nodes we want to search on like paragraphs and tables """ nodes_to_check = [] for tag in ['p', 'pre', 'td']: items = self.parser.getElementsByTag(doc, tag=tag) nodes_to_check += items return nodes_to_check def is_table_and_no_para_exist(self, e): subParagraphs = self.parser.getElementsByTag(e, tag='p') for p in subParagraphs: txt = self.parser.getText(p) if len(txt) < 25: self.parser.remove(p) subParagraphs2 = self.parser.getElementsByTag(e, tag='p') if len(subParagraphs2) == 0 and e.tag != "td": return True return False def is_nodescore_threshold_met(self, node, e): top_node_score = self.get_score(node) current_nodeScore = self.get_score(e) thresholdScore = float(top_node_score * .08) if (current_nodeScore < thresholdScore) and e.tag != 'td': return False return True def post_cleanup(self): """\ remove any divs that looks like non-content, clusters of links, or paras with no gusto """ targetNode = self.article.top_node node = self.add_siblings(targetNode) for e in self.parser.getChildren(node): e_tag = self.parser.getTag(e) if e_tag != 'p': if self.is_highlink_density(e) \ or self.is_table_and_no_para_exist(e) \ or not self.is_nodescore_threshold_met(node, e): self.parser.remove(e) return node class StandardContentExtractor(ContentExtractor): pass
import importlib import inspect from datetime import timedelta, datetime from dateutil.relativedelta import relativedelta, weekday from dateutil.relativedelta import weekdays as wdays from picklefield.fields import PickledObjectField from django.db import models from django.db import transaction from django.utils.timezone import now from six import get_method_self, integer_types from .exceptions import InvalidInterval class Job(models.Model): SCHEDULED = 0 QUEUED = 1 FINISHED = 2 FAILED = 3 STARTED = 4 FLOW = 5 STATUS_CHOICES = ( (SCHEDULED, 'scheduled'), (QUEUED, 'queued'), (FINISHED, 'finished'), (FAILED, 'failed'), (STARTED, 'started'), (FLOW, 'flow'), ) uuid = models.CharField(max_length=64, null=True, blank=True) connection = None created_at = models.DateTimeField() origin = models.CharField(max_length=254, null=True, blank=True) queue = models.ForeignKey('Queue', null=True, blank=True) instance = PickledObjectField(null=True, blank=True) func_name = models.CharField(max_length=254) args = PickledObjectField(blank=True) kwargs = PickledObjectField(blank=True) description = models.CharField(max_length=254) result_ttl = models.IntegerField(null=True, blank=True) status = models.PositiveIntegerField(null=True, blank=True, choices=STATUS_CHOICES) enqueued_at = models.DateTimeField(null=True, blank=True) scheduled_for = models.DateTimeField() repeat = PickledObjectField(null=True, blank=True, help_text="Number of times to repeat. -1 for forever.") interval = PickledObjectField(null=True, blank=True, help_text="Timedelta till next job") between = models.CharField(max_length=5, null=True, blank=True) weekdays = PickledObjectField(blank=True, null=True) ended_at = models.DateTimeField(null=True, blank=True) expired_at = models.DateTimeField('expires', null=True, blank=True) result = PickledObjectField(null=True, blank=True) exc_info = models.TextField(null=True, blank=True) timeout = models.PositiveIntegerField(null=True, blank=True) meta = PickledObjectField(blank=True) flow = models.ForeignKey('FlowStore', null=True, blank=True) if_failed = models.CharField(max_length=64, null=True, blank=True) if_result = models.CharField(max_length=64, null=True, blank=True) def __unicode__(self): return self.get_call_string() @classmethod def create(cls, func, args=None, kwargs=None, connection=None, result_ttl=None, status=None, scheduled_for=None, interval=0, repeat=0, between=None, weekdays=None): """Creates a new Job instance for the given function, arguments, and keyword arguments. """ if args is None: args = () if kwargs is None: kwargs = {} assert isinstance(args, tuple), \ '%r is not a valid args list.' % (args,) assert isinstance(kwargs, dict), \ '%r is not a valid kwargs dict.' % (kwargs,) job = cls() job.connection = connection job.created_at = now() if inspect.ismethod(func): job.instance = get_method_self(func) job.func_name = func.__name__ elif inspect.isfunction(func) or inspect.isbuiltin(func): job.func_name = '%s.%s' % (func.__module__, func.__name__) else: # we expect a string job.func_name = func job.args = args job.kwargs = kwargs job.description = job.get_call_string()[:254] job.result_ttl = result_ttl job.status = status job.scheduled_for = scheduled_for job.interval = interval job.between = between job.repeat = repeat job.weekdays = weekdays job.clean() return job def clean(self): if isinstance(self.interval, int) and self.interval >= 0: self.interval = relativedelta(seconds=self.interval) elif self.scheduled_for and not ( isinstance(self.interval, timedelta) or isinstance(self.interval, relativedelta)): raise InvalidInterval( "Interval must be a positive integer," " timedelta, or relativedelta instance") @classmethod def _get_job_or_promise(cls, conn, queue, timeout): """ Helper function that pops the job from the queue or returns a queue_name (the promise) and a revised timeout. The job is considered started at this point. The promised queue name is a queue to be polled at the timeout. """ promise = None with transaction.commit_on_success(using=conn): try: qs = cls.objects.using(conn).select_for_update().filter( queue_id=queue.name) if queue.scheduled: near_future = now() if timeout: near_future += timedelta(seconds=timeout) job = qs.filter(scheduled_for__lte=near_future).order_by( 'scheduled_for')[0] if job.scheduled_for > now(): # ensure the next listen times-out # when scheduled job is due timed = near_future - now() if timed.seconds > 1: timeout = timed.seconds promise = job.queue_id job = None else: job = qs.order_by('id')[0] if job: job.queue = None job.status = Job.STARTED job.save() return job, None, timeout except IndexError: pass return None, promise, timeout @property def func(self): func_name = self.func_name if func_name is None: return None if self.instance: return getattr(self.instance, func_name) module_name, func_name = func_name.rsplit('.', 1) module = importlib.import_module(module_name) return getattr(module, func_name) def get_schedule_options(self): """Humanized schedule options""" s = [] if isinstance(self.repeat, integer_types) and self.repeat < 0: s.append('repeat forever') elif isinstance(self.repeat, integer_types) and self.repeat > 0: s.append('repeat %i times' % self.repeat) elif isinstance(self.repeat, datetime): s.append('repeat until %s,' % self.repeat.isoformat()[:16]) if self.interval and \ (isinstance(self.interval, relativedelta) or \ isinstance(self.interval, timedelta)) \ and self.interval.seconds > 0: s.append('every %s' % str(self.interval)) if self.between: s.append('between %s' % self.between) if self.weekdays: s.append('on any') for day in self.weekdays: if isinstance(day, weekday): s.append('%s,' % str(day)) else: s.append('%s,' % str(wdays[day])) if s: s = ' '.join(s) if s[-1] == ',': s = s[:-1] first_letter = s[0].capitalize() s = first_letter + s[1:] return s get_schedule_options.short_description = 'schedule options' def get_ttl(self, default_ttl=None): """Returns ttl for a job that determines how long a job and its result will be persisted. In the future, this method will also be responsible for determining ttl for repeated jobs. """ return default_ttl if self.result_ttl is None else self.result_ttl # Representation def get_call_string(self): # noqa """Returns a string representation of the call, formatted as a regular Python function invocation statement. """ if self.func_name is None: return 'None' arg_list = [repr(arg) for arg in self.args] arg_list += ['%s=%r' % (k, v) for k, v in self.kwargs.items()] args = ', '.join(arg_list) return '%s(%s)' % (self.func_name, args) # Job execution def perform(self): # noqa """Invokes the job function with the job arguments.""" self.result = self.func(*self.args, **self.kwargs) return self.result def save(self, *args, **kwargs): kwargs.setdefault('using', self.connection) if not self.enqueued_at: self.enqueued_at = now() if not self.scheduled_for: self.scheduled_for = self.enqueued_at super(Job, self).save(*args, **kwargs) class FailedJob(Job): class Meta: proxy = True class QueuedJob(Job): class Meta: proxy = True class ScheduledJob(Job): class Meta: proxy = True class DequeuedJob(Job): class Meta: proxy = True
"""\ This class implements polynomial functions over a single variable. It represents the polynomial as a list of numbers and allows most arithmetic operations, using conventional Python syntax. It does not do symbolic manipulations. Instead, you can do things like this: >>> x = SimplePolynomial() >>> quadratic = (x+1)*(x-1) >>> str(quadratic) 'X**2 - 1' >>> quadratic(4) 15 >>> for i in range(4): ... polynomial = (x+1)**i ... i, str(polynomial), polynomial(1) (0, '1', 1) (1, 'X + 1', 2) (2, 'X**2 + 2*X + 1', 4) (3, 'X**3 + 3*X**2 + 3*X + 1', 8) """ from __future__ import division, generators from operator import add try: from itertools import izip_longest except ImportError: # The izip_longest function was added in version 2.6 # If we can't find it, use an equivalent implementation. from itertools import chain, repeat class ZipExhausted(Exception): pass def izip_longest(*args, **kwds): # izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D- fillvalue = kwds.get('fillvalue') counter = [len(args) - 1] def sentinel(): if not counter[0]: raise ZipExhausted counter[0] -= 1 yield fillvalue fillers = repeat(fillvalue) iterators = [chain(it, sentinel(), fillers) for it in args] try: while iterators: yield tuple(map(next, iterators)) except ZipExhausted: pass try: from numbers import Number except ImportError: # The numbers module was added in version 2.6 # If we can't find it, use an equivalent implementation. Number = (int, float, long, complex) class SimplePolynomial(object): def __init__(self, terms=[0,1]): """\ >>> str(SimplePolynomial()) 'X' """ try: while terms[-1] == 0: del terms[-1] except IndexError: pass self.terms = list(terms) def __str__(self): """\ Needs some work, but adequate. """ l = len(self.terms) if l == 0: return '0' if l == 1: return str(self.terms[0]) result = [] for i, c in reversed(list(enumerate(self.terms))): if c == 0: continue if c < 0: result.append('-') c = - c else: result.append('+') if c == 1: if i == 0: result.append('1') elif i == 1: result.append('X') else: result.append('X**%g' % i) else: if i == 0: result.append('%g' % c) elif i == 1: result.append('%g*X' % c) else: result.append('%g*X**%d' % (c, i)) if len(result) == 0: return '0' if result[0] == '-': result[1] = '-'+result[1] del result[0] return ' '.join(result) def __add__(self, other): """\ >>> str(SimplePolynomial() + 1) 'X + 1' >>> str(1 + SimplePolynomial()) 'X + 1' >>> str(SimplePolynomial() + SimplePolynomial()) '2*X' """ if len(self.terms) == 0: return other if isinstance(other, Number): terms = self.terms[:] terms[0] += other return SimplePolynomial(terms) return SimplePolynomial([ add(*pair) for pair in izip_longest(self.terms, other.terms, fillvalue=0) ]) # Since addition is commutative, reuse __add__ __radd__ = __add__ def __neg__(self): """\ >>> str(- SimplePolynomial()) '-X' """ return SimplePolynomial([-c for c in self.terms]) def __sub__(self, other): """\ >>> str(SimplePolynomial() - 1) 'X - 1' >>> str(SimplePolynomial() - SimplePolynomial()) '0' """ return self + -other def __rsub__(self, other): """\ >>> str(1 - SimplePolynomial()) '-X + 1' """ return -self + other def __mul__(self, other): """\ >>> str(SimplePolynomial() * 2) '2*X' >>> str(2 * SimplePolynomial()) '2*X' >>> str(SimplePolynomial() * SimplePolynomial()) 'X**2' """ if isinstance(other, Number): return SimplePolynomial([c * other for c in self.terms]) terms = [0]*(len(self.terms)+len(other.terms)) for i1, c1 in enumerate(self.terms): for i2, c2 in enumerate(other.terms): terms[i1+i2] += c1*c2 return SimplePolynomial(terms) # Since multiplication is commutative, reuse __mul__ __rmul__ = __mul__ def __truediv__(self, other): """\ Implements some simple forms of division. See http://en.wikipedia.org/wiki/Synthetic_division for details. >>> str(SimplePolynomial() / 2) '0.5*X' >>> x = SimplePolynomial() >>> quotient, remainder = (x**3 - 12*x**2 - 42) / (x - 3) >>> str(quotient), str(remainder) ('X**2 - 9*X - 27', '-123') """ if isinstance(other, Number): return SimplePolynomial([c / other for c in self.terms]) if len(other.terms) == 1: return self/other.terms[0] assert len(other.terms) == 2 dividend = self.terms[:] divisor = other.terms[:] assert divisor[-1] == 1 xi = 0 result = [] for i in xrange(-1, -len(dividend) - 1, -1): xi = dividend[i] - xi * divisor[0] result.insert(0, xi) return SimplePolynomial(result[1:]), result[0] raise NotImplementedError('synthetic division') __div__ = __truediv__ def __eq__(self, other): """\ >>> SimplePolynomial() == SimplePolynomial() True >>> SimplePolynomial() - SimplePolynomial() == 0 True """ if isinstance(other, SimplePolynomial): return self.terms == other.terms if isinstance(other, Number): if len(self.terms) > 1: return False try: self.terms[0] == other except IndexError: return other == 0 return False def __ne__(self, other): return not self == other def copy(self): return SimplePolynomial(self.terms[:]) def __pow__(self, exponent): """\ Uses the Russian Peasant Multiplication algorithm. >>> str(SimplePolynomial() ** 2) 'X**2' >>> str(SimplePolynomial() ** 0.5) Traceback (most recent call last): ... NotImplementedError: exponent is not an integer >>> str(SimplePolynomial() ** -1) Traceback (most recent call last): ... NotImplementedError: exponent is less than zero """ if not isinstance(exponent, int): raise NotImplementedError('exponent is not an integer') if exponent < 0: raise NotImplementedError('exponent is less than zero') tmp = self.copy() result = SimplePolynomial([1]) while exponent > 0: if exponent & 1: result *= tmp tmp *= tmp exponent >>= 1 return result def __call__(self, x): """\ Evaluate the polynomial for the given value using the Horner scheme. >>> SimplePolynomial()(1) 1 """ result = 0 for c in reversed(self.terms): result = result * x + c return result def derivative(self): """\ >>> str(SimplePolynomial().derivative()) '1' """ terms = [i*c for i, c in enumerate(self.terms)] return SimplePolynomial(terms[1:]) def integrate(self, const=0): """\ >>> str(SimplePolynomial().integrate()) '0.5*X**2' """ terms = [const] terms.extend([c/(i+1) for i, c in enumerate(self.terms)]) return SimplePolynomial(terms) if __name__ == '__main__': import doctest doctest.testmod()
# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import weakref from oslo_log import log as logging from six import iteritems from sqlalchemy.orm import exc as orm_exc from sqlalchemy import sql from tacker.common import exceptions as n_exc from tacker.db import sqlalchemyutils LOG = logging.getLogger(__name__) class CommonDbMixin(object): """Common methods used in core and service plugins.""" # Plugins, mixin classes implementing extension will register # hooks into the dict below for "augmenting" the "core way" of # building a query for retrieving objects from a model class. # To this aim, the register_model_query_hook and unregister_query_hook # from this class should be invoked _model_query_hooks = {} # This dictionary will store methods for extending attributes of # api resources. Mixins can use this dict for adding their own methods # TODO(salvatore-orlando): Avoid using class-level variables _dict_extend_functions = {} @classmethod def register_model_query_hook(cls, model, name, query_hook, filter_hook, result_filters=None): """Register a hook to be invoked when a query is executed. Add the hooks to the _model_query_hooks dict. Models are the keys of this dict, whereas the value is another dict mapping hook names to callables performing the hook. Each hook has a "query" component, used to build the query expression and a "filter" component, which is used to build the filter expression. Query hooks take as input the query being built and return a transformed query expression. Filter hooks take as input the filter expression being built and return a transformed filter expression """ model_hooks = cls._model_query_hooks.get(model) if not model_hooks: # add key to dict model_hooks = {} cls._model_query_hooks[model] = model_hooks model_hooks[name] = {'query': query_hook, 'filter': filter_hook, 'result_filters': result_filters} @property def safe_reference(self): """Return a weakref to the instance. Minimize the potential for the instance persisting unnecessarily in memory by returning a weakref proxy that won't prevent deallocation. """ return weakref.proxy(self) def _model_query(self, context, model): query = context.session.query(model) # define basic filter condition for model query # NOTE(jkoelker) non-admin queries are scoped to their tenant_id # NOTE(salvatore-orlando): unless the model allows for shared objects query_filter = None if not context.is_admin and hasattr(model, 'tenant_id'): if hasattr(model, 'shared'): query_filter = ((model.tenant_id == context.tenant_id) | (model.shared == sql.true())) else: query_filter = (model.tenant_id == context.tenant_id) # Execute query hooks registered from mixins and plugins for _name, hooks in iteritems(self._model_query_hooks.get(model, {})): query_hook = hooks.get('query') if isinstance(query_hook, basestring): query_hook = getattr(self, query_hook, None) if query_hook: query = query_hook(context, model, query) filter_hook = hooks.get('filter') if isinstance(filter_hook, basestring): filter_hook = getattr(self, filter_hook, None) if filter_hook: query_filter = filter_hook(context, model, query_filter) # NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the # condition, raising an exception if query_filter is not None: query = query.filter(query_filter) # Don't list the deleted entries if hasattr(model, 'deleted_at'): query = query.filter_by(deleted_at=None) return query def _fields(self, resource, fields): if fields: return dict(((key, item) for key, item in resource.items() if key in fields)) return resource def _get_tenant_id_for_create(self, context, resource): if context.is_admin and 'tenant_id' in resource: tenant_id = resource['tenant_id'] elif ('tenant_id' in resource and resource['tenant_id'] != context.tenant_id): reason = _('Cannot create resource for another tenant') raise n_exc.AdminRequired(reason=reason) else: tenant_id = context.tenant_id return tenant_id def _get_by_id(self, context, model, id): query = self._model_query(context, model) return query.filter(model.id == id).one() def _apply_filters_to_query(self, query, model, filters): if filters: for key, value in iteritems(filters): column = getattr(model, key, None) if column: query = query.filter(column.in_(value)) for _name, hooks in iteritems( self._model_query_hooks.get(model, {})): result_filter = hooks.get('result_filters', None) if isinstance(result_filter, basestring): result_filter = getattr(self, result_filter, None) if result_filter: query = result_filter(query, filters) return query def _apply_dict_extend_functions(self, resource_type, response, db_object): for func in self._dict_extend_functions.get( resource_type, []): args = (response, db_object) if isinstance(func, basestring): func = getattr(self, func, None) else: # must call unbound method - use self as 1st argument args = (self,) + args if func: func(*args) def _get_collection_query(self, context, model, filters=None, sorts=None, limit=None, marker_obj=None, page_reverse=False): collection = self._model_query(context, model) collection = self._apply_filters_to_query(collection, model, filters) if limit and page_reverse and sorts: sorts = [(s[0], not s[1]) for s in sorts] collection = sqlalchemyutils.paginate_query(collection, model, limit, sorts, marker_obj=marker_obj) return collection def _get_collection(self, context, model, dict_func, filters=None, fields=None, sorts=None, limit=None, marker_obj=None, page_reverse=False): query = self._get_collection_query(context, model, filters=filters, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) items = [dict_func(c, fields) for c in query] if limit and page_reverse: items.reverse() return items def _get_collection_count(self, context, model, filters=None): return self._get_collection_query(context, model, filters).count() def _get_marker_obj(self, context, resource, limit, marker): if limit and marker: return getattr(self, '_get_%s' % resource)(context, marker) return None def _filter_non_model_columns(self, data, model): """Removes attributes from data. Remove all the attributes from data which are not columns of the model passed as second parameter. """ columns = [c.name for c in model.__table__.columns] return dict((k, v) for (k, v) in iteritems(data) if k in columns) def _get_by_name(self, context, model, name): try: query = self._model_query(context, model) return query.filter(model.name == name).one() except orm_exc.NoResultFound: LOG.info(_("No result found for %(name)s in %(model)s table"), {'name': name, 'model': model})
from __future__ import division import numpy as np from numpy.testing import TestCase, assert_almost_equal from numpy.testing.decorators import slow from utils import assert_within_tol, run_module_suite from skmonaco import mcquad HALF_ROOT_PI = 0.5*np.sqrt(np.pi) class TestMCQuad(TestCase): """ The expected variance for N integration points is: (<f^2> - <f>^2) / N where <.> denotes the integration of function f. """ def setUp(self): self.gaussian = lambda x: np.exp(-sum(x**2)) def calc_volume(self,xl,xu): xl = np.array(xl) xu = np.array(xu) return np.multiply.reduce(abs(xu-xl)) def run_serial(self,f,npoints,expected_value,expected_variance,**kwargs): res, sd = mcquad(f,npoints,nprocs=1,**kwargs) volume = self.calc_volume(kwargs["xl"],kwargs["xu"]) error = volume*np.sqrt(expected_variance/float(npoints)) assert_within_tol(res,expected_value,3.*max(error,1e-10), "Error in <f> in serial run.") assert_within_tol(sd,error,0.1*max(error,1e-10), "Error in expected error in serial run.") def run_parallel(self,f,npoints,expected_value,expected_variance,**kwargs): batch_size = npoints/10 res, sd = mcquad(f,npoints,nprocs=2,batch_size=batch_size,**kwargs) volume = self.calc_volume(kwargs["xl"],kwargs["xu"]) error = volume*np.sqrt(expected_variance/float(npoints)) assert_within_tol(res,expected_value,3.*max(error,1e-10), "Error in <f> in parallel run.") assert_within_tol(sd,error,0.1*max(error,1e-10), "Error in expected error in parallel run.") def run_all(self,f,npoints,expected_value,expected_variance,**kwargs): self.run_serial(f,npoints,expected_value,expected_variance,**kwargs) self.run_parallel(f,npoints,expected_value,expected_variance,**kwargs) def run_check_unseeded_distribution(self,f,ntrials,*args,**kwargs): """ Check that the results returned by integrating f are normally distributed. Does not try to seed each trial. """ import scipy.stats results, errors = [], [] for itrial in range(ntrials): res, err = mcquad(f,*args,**kwargs) results.append(res) errors.append(err) results = np.array(results).flatten() w,p = scipy.stats.shapiro(results) self.assertGreater(p,0.1) def run_check_seeded_distribution(self,f,ntrials,*args,**kwargs): """ Check that the results returned by integrating f are normally distributed. Seeds each trial with the trial number. """ import scipy.stats results, errors = [], [] for itrial in range(ntrials): res, err = mcquad(f,*args,seed=itrial,**kwargs) results.append(res) errors.append(err) results = np.array(results).flatten() w,p = scipy.stats.shapiro(results) self.assertGreater(p,0.1) def const(self,x): """ Constant function. <f> = 1.0 <(f - <f>)^2> = 0.0 """ return 1.0 def prod(self,x): """ Product_i x_i. If the integral region is between 0 and 1: <f> = 1/2^d <(f-<f>)^2> = (1/3)^d - 1/2^(d-1)+1 """ return np.multiply.reduce(x) def prod_variance(self,d): """ Variance of the function Product_i x_i as a function of the dimensionality. """ return (1./3.**d) - 0.25**d def test_const_1d(self): """ Constant function between 0 and 1. Value : 1. Variance : 0. """ self.run_all(self.const,2000,1.,0.,xl=[0.],xu=[1.]) def test_const_1db(self): """ Constant function between -1 and 2. Value: 3. Variance 0. """ self.run_all(self.const,2000,3.0,0.0,xl=[-1.],xu=[2.]) def test_const_6d(self): """ Constant function between -1. and 2. in six dimensions. Value: 3**6 Variance: 0. """ self.run_all(self.const,20000,3.0**6,0.0,xl=[-1.]*6,xu=[2.]*6) def test_gaussian1d(self): pass def test_prod1d(self): """ f(x) = x between 0 and 1. """ npoints = 2000 variance = self.prod_variance(1) self.run_all(self.prod,npoints,0.5,variance,xl=[0.],xu=[1.]) def test_prod1db(self): """ f(x) = x between -2 and 1. """ npoints = 2000 variance = (1+2**3)/3. - 1.5**2 self.run_all(self.prod,npoints,-1.5,variance,xl=[-2.],xu=[1.]) def test_prod2d(self): """ f(x,y) = x*y between 0 and 1. """ npoints = 2000 variance = self.prod_variance(2) self.run_all(self.prod,npoints,0.25,variance,xl=[0.,0.],xu=[1.,1.]) def test_prod6d(self): """ f(x,...) = product_1..6 x_i between 0 and 1. """ npoints = 50000 variance = self.prod_variance(6) self.run_all(self.prod,npoints,0.5**6,variance,xl=[0.]*6,xu=[1.]*6) @slow def test_distribution_serial_unseeded(self): """ Check that unseeded integrals are normally distributed (serial). Use Shapiro-Wilkes test for normality. """ ntrials = 1000 npoints = 1e4 self.run_check_unseeded_distribution(lambda x:x**2, ntrials,npoints,[0.],[1.]) @slow def test_distribution_serial_seeded(self): """ Check that seeded integrals are normally distributed (serial). Use Shapiro-Wilkes test for normality. """ ntrials = 1000 npoints = 1e4 self.run_check_seeded_distribution(lambda x:x**2, ntrials,npoints,[0.],[1.]) @slow def test_distribution_parallel_unseeded(self): """ Check that unseeded integrals are normally distributed (parallel). Use Shapiro-Wilkes test for normality. """ ntrials = 1000 npoints = 1e4 self.run_check_unseeded_distribution(lambda x:x**2, ntrials,npoints,[0.],[1.],nprocs=2,batch_size=npoints/10) @slow def test_distribution_parallel_seeded(self): """ Check that seeded integrals are normally distributed (parallel). Use Shapiro-Wilkes test for normality. """ ntrials = 1000 npoints = 1e4 self.run_check_seeded_distribution(lambda x:x**2, ntrials,npoints,[0.],[1.],nprocs=2,batch_size=npoints/10) def test_args(self): """ Test passing args to the function. f(x ; a) = a*x """ a = 2. func = lambda x, a_: a_*x npoints = 2000 variance = 4.*self.prod_variance(1) self.run_all(func,npoints,a*0.5,variance,xl=[0.],xu=[1.],args=(a,)) def test_ret_arr(self): """ Test an integrand that returns an array. """ func = lambda x: np.array((x**2,x**3)) npoints = 2000 (res_sq, res_cb), (sd_sq, sd_cb) = mcquad(func,npoints,[0.],[1.],nprocs=1, seed=123456) res_sq2, sd_sq2 = mcquad(lambda x: x**2,npoints,[0.],[1.],nprocs=1,seed=123456) res_cb2, sd_cb2 = mcquad(lambda x: x**3,npoints,[0.],[1.],nprocs=1,seed=123456) assert_almost_equal(res_sq, res_sq2) assert_almost_equal(res_cb, res_cb2) assert_almost_equal(sd_sq, sd_sq2) assert_almost_equal(sd_cb, sd_cb2) def test_ret_arr_parallel(self): """ Test an integrand that returns an array: parallel implementation. """ func = lambda x: np.array((x**2,x**3)) npoints = 5000 nprocs = 2 (res_sq, res_cb), (sd_sq, sd_cb) = mcquad(func,npoints,[0.],[1.],nprocs=nprocs, seed=123456) res_sq2, sd_sq2 = mcquad(lambda x: x**2,npoints,[0.],[1.], nprocs=nprocs,seed=123456) res_cb2, sd_cb2 = mcquad(lambda x: x**3,npoints,[0.],[1.], nprocs=nprocs,seed=123456) assert_almost_equal(res_sq, res_sq2) assert_almost_equal(res_cb, res_cb2) assert_almost_equal(sd_sq, sd_sq2) assert_almost_equal(sd_cb, sd_cb2) def test_ret_arr_args(self): """ Test an integrand that returns an array with an argument. """ func = lambda x, a,b : np.array((a*x**2,b*x**3)) npoints = 2000 aval, bval = 4.,5. (res_sq, res_cb), (sd_sq, sd_cb) = mcquad(func,npoints,[0.],[1.],nprocs=1, seed=123456,args=(aval,bval)) res_sq2, sd_sq2 = mcquad(lambda x,a: a*x**2,npoints,[0.],[1.],nprocs=1, seed=123456,args=(aval,)) res_cb2, sd_cb2 = mcquad(lambda x,b: b*x**3,npoints,[0.],[1.],nprocs=1, seed=123456,args=(bval,)) assert_almost_equal(res_sq, res_sq2) assert_almost_equal(res_cb, res_cb2) assert_almost_equal(sd_sq, sd_sq2) assert_almost_equal(sd_cb, sd_cb2) def test_wrong_xl(self): """ Raise a ValueError if len(xl) != len(xu). """ with self.assertRaises(ValueError): mcquad(self.const,2000,xl=[0.,0.],xu=[1.]) def test_wrong_nprocs(self): """ Raise a ValueError if nprocs < 1 """ with self.assertRaises(ValueError): mcquad(self.const,2000,xl=[0.],xu=[1.],nprocs=-1) def test_wrong_npoints(self): """ Raise a ValueError if npoints < 2. """ with self.assertRaises(ValueError): mcquad(self.const,0,xl=[0.],xu=[1.]) def test_seed(self): """ Test same seed -> same result. """ npoints = 50000 res,error = mcquad(lambda x:x**2,npoints,xl=[0.],xu=[1.],seed=[1234,5678]) res2, error2 = mcquad(lambda x:x**2,npoints,xl=[0.],xu=[1.],seed=[1234,5678]) assert res == res2 assert error == error2 def test_seed_different(self): """ Test different seed -> different result. """ npoints = 50000 res,error = mcquad(lambda x: x**2,npoints,xl=[0.],xu=[1.],seed=[1235,5678]) res2, error2 = mcquad(lambda x: x**2,npoints,xl=[0.],xu=[1.],seed=[1234,5678]) assert res != res2 assert error != error2 def test_zero_volume(self): """ Passing an empty integration volume raises ValueError. """ with self.assertRaises(ValueError): mcquad(lambda x:x**2, 20000, [0.],[0.]) if __name__ == '__main__': # Command line arguments are passed directly to 'nose'. # Eg. run '$ python test_uniform.py --eval-attr="not slow"' to # avoid running the "slow" tests. import sys run_module_suite(argv=sys.argv)
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import datetime import shlex import re import os try: import simplejson as json except Exception: import json from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.common.base import JsonResponse, ConnectionUserAndKey from libcloud.common.base import KeyCertificateConnection from libcloud.common.types import InvalidCredsError from libcloud.container.base import (Container, ContainerDriver, ContainerImage) from libcloud.container.providers import Provider from libcloud.container.types import ContainerState VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] class DockerResponse(JsonResponse): valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] def parse_body(self): if len(self.body) == 0 and not self.parse_zero_length_body: return self.body try: # error responses are tricky in Docker. Eg response could be # an error, but response status could still be 200 content_type = self.headers.get('content-type', 'application/json') if content_type == 'application/json' or content_type == '': if self.headers.get('transfer-encoding') == 'chunked' and \ 'fromImage' in self.request.url: body = [json.loads(chunk) for chunk in self.body.strip().replace('\r', '').split('\n')] else: body = json.loads(self.body) else: body = self.body except ValueError: m = re.search('Error: (.+?)"', self.body) if m: error_msg = m.group(1) raise Exception(error_msg) else: raise Exception( 'ConnectionError: Failed to parse JSON response') return body def parse_error(self): if self.status == 401: raise InvalidCredsError('Invalid credentials') return self.body def success(self): return self.status in self.valid_response_codes class DockerException(Exception): def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "%s %s" % (self.code, self.message) def __repr__(self): return "DockerException %s %s" % (self.code, self.message) class DockerConnection(ConnectionUserAndKey): responseCls = DockerResponse timeout = 60 def add_default_headers(self, headers): """ Add parameters that are necessary for every request If user and password are specified, include a base http auth header """ headers['Content-Type'] = 'application/json' if self.user_id and self.key: user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key))) headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8')) return headers class DockertlsConnection(KeyCertificateConnection): responseCls = DockerResponse def __init__(self, key, secret, secure=True, host='localhost', port=4243, key_file='', cert_file='', **kwargs): super(DockertlsConnection, self).__init__(key_file=key_file, cert_file=cert_file, secure=secure, host=host, port=port, url=None, proxy_url=None, timeout=None, backoff=None, retry_delay=None) if key_file: keypath = os.path.expanduser(key_file) is_file_path = os.path.exists(keypath) and os.path.isfile(keypath) if not is_file_path: raise InvalidCredsError( 'You need an key PEM file to authenticate with ' 'Docker tls. This can be found in the server.' ) self.key_file = key_file certpath = os.path.expanduser(cert_file) is_file_path = os.path.exists( certpath) and os.path.isfile(certpath) if not is_file_path: raise InvalidCredsError( 'You need an certificate PEM file to authenticate with ' 'Docker tls. This can be found in the server.' ) self.cert_file = cert_file def add_default_headers(self, headers): headers['Content-Type'] = 'application/json' return headers class DockerContainerDriver(ContainerDriver): """ Docker container driver class. >>> from libcloud.container.providers import get_driver >>> driver = get_driver('docker') >>> conn = driver(host='198.61.239.128', port=4243) >>> conn.list_containers() or connecting to http basic auth protected https host: >>> conn = driver('user', 'pass', host='https://198.61.239.128', port=443) connect with tls authentication, by providing a hostname, port, a private key file (.pem) and certificate (.pem) file >>> conn = driver(host='https://198.61.239.128', >>> port=4243, key_file='key.pem', cert_file='cert.pem') """ type = Provider.DOCKER name = 'Docker' website = 'http://docker.io' connectionCls = DockerConnection supports_clusters = False version = '1.24' def __init__(self, key='', secret='', secure=False, host='localhost', port=4243, key_file=None, cert_file=None): """ :param key: API key or username to used (required) :type key: ``str`` :param secret: Secret password to be used (required) :type secret: ``str`` :param secure: Whether to use HTTPS or HTTP. Note: Some providers only support HTTPS, and it is on by default. :type secure: ``bool`` :param host: Override hostname used for connections. :type host: ``str`` :param port: Override port used for connections. :type port: ``int`` :param key_file: Path to private key for TLS connection (optional) :type key_file: ``str`` :param cert_file: Path to public key for TLS connection (optional) :type cert_file: ``str`` :return: ``None`` """ if key_file: self.connectionCls = DockertlsConnection self.key_file = key_file self.cert_file = cert_file secure = True if host.startswith('https://'): secure = True # strip the prefix prefixes = ['http://', 'https://'] for prefix in prefixes: if host.startswith(prefix): host = host.strip(prefix) super(DockerContainerDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, key_file=key_file, cert_file=cert_file) if key_file or cert_file: # docker tls authentication- # https://docs.docker.com/articles/https/ # We pass two files, a key_file with the # private key and cert_file with the certificate # libcloud will handle them through LibcloudHTTPSConnection if not (key_file and cert_file): raise Exception( 'Needs both private key file and ' 'certificate file for tls authentication') self.connection.secure = secure self.connection.host = host self.connection.port = port # set API version self.version = self._get_api_version() def _ex_connection_class_kwargs(self): kwargs = {} if hasattr(self, 'key_file'): kwargs['key_file'] = self.key_file if hasattr(self, 'cert_file'): kwargs['cert_file'] = self.cert_file return kwargs def install_image(self, path): """ Install a container image from a remote path. :param path: Path to the container image :type path: ``str`` :rtype: :class:`libcloud.container.base.ContainerImage` """ payload = { } data = json.dumps(payload) result = self.connection.request('/v%s/images/create?fromImage=%s' % (self.version, path), data=data, method='POST') if "errorDetail" in result.body: raise DockerException(None, result.body) image_id = None # the response is slightly different if the image is already present # and it's not downloaded. both messages below indicate that the image # is available for use to the daemon if re.search(r'Downloaded newer image', result.body) or \ re.search(r'"Status: Image is up to date', result.body): if re.search(r'sha256:(?P<id>[a-z0-9]{64})', result.body): image_id = re.findall(r'sha256:(?P<id>[a-z0-9]{64})', result.body)[-1] # if there is a failure message or if there is not an image id in the # response then throw an exception. if image_id is None: raise DockerException(None, 'failed to install image') image = ContainerImage( id=image_id, name=path, path=path, version=None, driver=self.connection.driver, extra={}) return image def list_images(self): """ List the installed container images :rtype: ``list`` of :class:`libcloud.container.base.ContainerImage` """ result = self.connection.request('/v%s/images/json' % (self.version)).object images = [] for image in result: try: name = image.get('RepoTags')[0] except Exception: name = image.get('Id') images.append(ContainerImage( id=image.get('Id'), name=name, path=name, version=None, driver=self.connection.driver, extra={ "created": image.get('Created'), "size": image.get('Size'), "virtual_size": image.get('VirtualSize'), }, )) return images def list_containers(self, image=None, all=True): """ List the deployed container images :param image: Filter to containers with a certain image :type image: :class:`libcloud.container.base.ContainerImage` :param all: Show all container (including stopped ones) :type all: ``bool`` :rtype: ``list`` of :class:`libcloud.container.base.Container` """ if all: ex = '?all=1' else: ex = '' try: result = self.connection.request( "/v%s/containers/json%s" % (self.version, ex)).object except Exception as exc: errno = getattr(exc, 'errno', None) if errno == 111: raise DockerException( errno, 'Make sure docker host is accessible' 'and the API port is correct') raise containers = [self._to_container(value) for value in result] return containers def deploy_container(self, name, image, parameters=None, start=True, command=None, hostname=None, user='', stdin_open=True, tty=True, mem_limit=0, ports=None, environment=None, dns=None, volumes=None, volumes_from=None, network_disabled=False, entrypoint=None, cpu_shares=None, working_dir='', domainname=None, memswap_limit=0, port_bindings=None, network_mode='bridge', labels=None): """ Deploy an installed container image For details on the additional parameters see : http://bit.ly/1PjMVKV :param name: The name of the new container :type name: ``str`` :param image: The container image to deploy :type image: :class:`libcloud.container.base.ContainerImage` :param parameters: Container Image parameters :type parameters: ``str`` :param start: Start the container on deployment :type start: ``bool`` :rtype: :class:`Container` """ command = shlex.split(str(command)) if port_bindings is None: port_bindings = {} params = { 'name': name } payload = { 'Hostname': hostname, 'Domainname': domainname, 'ExposedPorts': ports, 'User': user, 'Tty': tty, 'OpenStdin': stdin_open, 'StdinOnce': False, 'Memory': mem_limit, 'AttachStdin': True, 'AttachStdout': True, 'AttachStderr': True, 'Env': environment, 'Cmd': command, 'Dns': dns, 'Image': image.name, 'Volumes': volumes, 'VolumesFrom': volumes_from, 'NetworkDisabled': network_disabled, 'Entrypoint': entrypoint, 'CpuShares': cpu_shares, 'WorkingDir': working_dir, 'MemorySwap': memswap_limit, 'PublishAllPorts': True, 'PortBindings': port_bindings, 'NetworkMode': network_mode, 'Labels': labels, } data = json.dumps(payload) try: result = self.connection.request('/v%s/containers/create' % (self.version), data=data, params=params, method='POST') except Exception as e: message = e.message or str(e) if message.startswith('No such image:'): raise DockerException(None, 'No such image: %s' % image.name) else: raise DockerException(None, e) id_ = result.object['Id'] payload = { 'Binds': [], 'PublishAllPorts': True, 'PortBindings': port_bindings, } data = json.dumps(payload) if start: if float(self._get_api_version()) > 1.22: result = self.connection.request( '/v%s/containers/%s/start' % (self.version, id_), method='POST') else: result = self.connection.request( '/v%s/containers/%s/start' % (self.version, id_), data=data, method='POST') return self.get_container(id_) def get_container(self, id): """ Get a container by ID :param id: The ID of the container to get :type id: ``str`` :rtype: :class:`libcloud.container.base.Container` """ result = self.connection.request("/v%s/containers/%s/json" % (self.version, id)).object return self._to_container(result) def start_container(self, container): """ Start a container :param container: The container to be started :type container: :class:`libcloud.container.base.Container` :return: The container refreshed with current data :rtype: :class:`libcloud.container.base.Container` """ if float(self._get_api_version()) > 1.22: result = self.connection.request( '/v%s/containers/%s/start' % (self.version, container.id), method='POST') else: payload = { 'Binds': [], 'PublishAllPorts': True, } data = json.dumps(payload) result = self.connection.request( '/v%s/containers/%s/start' % (self.version, container.id), method='POST', data=data) if result.status in VALID_RESPONSE_CODES: return self.get_container(container.id) else: raise DockerException(result.status, 'failed to start container') def stop_container(self, container): """ Stop a container :param container: The container to be stopped :type container: :class:`libcloud.container.base.Container` :return: The container refreshed with current data :rtype: :class:`libcloud.container.base.Container` """ result = self.connection.request('/v%s/containers/%s/stop' % (self.version, container.id), method='POST') if result.status in VALID_RESPONSE_CODES: return self.get_container(container.id) else: raise DockerException(result.status, 'failed to stop container') def restart_container(self, container): """ Restart a container :param container: The container to be stopped :type container: :class:`libcloud.container.base.Container` :return: The container refreshed with current data :rtype: :class:`libcloud.container.base.Container` """ data = json.dumps({'t': 10}) # number of seconds to wait before killing the container result = self.connection.request('/v%s/containers/%s/restart' % (self.version, container.id), data=data, method='POST') if result.status in VALID_RESPONSE_CODES: return self.get_container(container.id) else: raise DockerException(result.status, 'failed to restart container') def destroy_container(self, container): """ Remove a container :param container: The container to be destroyed :type container: :class:`libcloud.container.base.Container` :return: True if the destroy was successful, False otherwise. :rtype: ``bool`` """ result = self.connection.request('/v%s/containers/%s' % (self.version, container.id), method='DELETE') return result.status in VALID_RESPONSE_CODES def ex_list_processes(self, container): """ List processes running inside a container :param container: The container to list processes for. :type container: :class:`libcloud.container.base.Container` :rtype: ``str`` """ result = self.connection.request("/v%s/containers/%s/top" % (self.version, container.id)).object return result def ex_rename_container(self, container, name): """ Rename a container :param container: The container to be renamed :type container: :class:`libcloud.container.base.Container` :param name: The new name :type name: ``str`` :rtype: :class:`libcloud.container.base.Container` """ result = self.connection.request('/v%s/containers/%s/rename?name=%s' % (self.version, container.id, name), method='POST') if result.status in VALID_RESPONSE_CODES: return self.get_container(container.id) def ex_get_logs(self, container, stream=False): """ Get container logs If stream == True, logs will be yielded as a stream From Api Version 1.11 and above we need a GET request to get the logs Logs are in different format of those of Version 1.10 and below :param container: The container to list logs for :type container: :class:`libcloud.container.base.Container` :param stream: Stream the output :type stream: ``bool`` :rtype: ``bool`` """ payload = {} data = json.dumps(payload) if float(self._get_api_version()) > 1.10: result = self.connection.request( "/v%s/containers/%s/logs?follow=%s&stdout=1&stderr=1" % (self.version, container.id, str(stream))).object logs = result else: result = self.connection.request( "/v%s/containers/%s/attach?logs=1&stream=%s&stdout=1&stderr=1" % (self.version, container.id, str(stream)), method='POST', data=data) logs = result.body return logs def ex_search_images(self, term): """Search for an image on Docker.io. Returns a list of ContainerImage objects >>> images = conn.ex_search_images(term='mistio') >>> images [<ContainerImage: id=rolikeusch/docker-mistio...>, <ContainerImage: id=mist/mistio, name=mist/mistio, driver=Docker ...>] :param term: The search term :type term: ``str`` :rtype: ``list`` of :class:`libcloud.container.base.ContainerImage` """ term = term.replace(' ', '+') result = self.connection.request('/v%s/images/search?term=%s' % (self.version, term)).object images = [] for image in result: name = image.get('name') images.append( ContainerImage( id=name, path=name, version=None, name=name, driver=self.connection.driver, extra={ "description": image.get('description'), "is_official": image.get('is_official'), "is_trusted": image.get('is_trusted'), "star_count": image.get('star_count'), }, )) return images def ex_delete_image(self, image): """ Remove image from the filesystem :param image: The image to remove :type image: :class:`libcloud.container.base.ContainerImage` :rtype: ``bool`` """ result = self.connection.request('/v%s/images/%s' % (self.version, image.name), method='DELETE') return result.status in VALID_RESPONSE_CODES def _to_container(self, data): """ Convert container in Container instances """ try: name = data.get('Name').strip('/') except Exception: try: name = data.get('Names')[0].strip('/') except Exception: name = data.get('Id') state = data.get('State') if isinstance(state, dict): status = data.get( 'Status', state.get('Status') if state is not None else None) else: status = data.get('Status') if 'Exited' in status: state = ContainerState.STOPPED elif status.startswith('Up '): state = ContainerState.RUNNING elif 'running' in status: state = ContainerState.RUNNING else: state = ContainerState.STOPPED image = data.get('Image') ports = data.get('Ports', []) created = data.get('Created') if isinstance(created, float): created = ts_to_str(created) extra = { 'id': data.get('Id'), 'status': data.get('Status'), 'created': created, 'image': image, 'ports': ports, 'command': data.get('Command'), 'sizerw': data.get('SizeRw'), 'sizerootfs': data.get('SizeRootFs'), } ips = [] if ports is not None: for port in ports: if port.get('IP') is not None: ips.append(port.get('IP')) return Container( id=data['Id'], name=name, image=ContainerImage( id=data.get('ImageID', None), path=image, name=image, version=None, driver=self.connection.driver ), ip_addresses=ips, state=state, driver=self.connection.driver, extra=extra) def _get_api_version(self): """ Get the docker API version information """ result = self.connection.request('/version').object result = result or {} api_version = result.get('ApiVersion') return api_version def ts_to_str(timestamp): """ Return a timestamp as a nicely formated datetime string. """ date = datetime.datetime.fromtimestamp(timestamp) date_string = date.strftime("%d/%m/%Y %H:%M %Z") return date_string
# ============================================================================= # Authors: PAR Government # Organization: DARPA # # Copyright (c) 2016 PAR Government # All rights reserved. # ============================================================================== import collections import json import os import tkSimpleDialog import maskgen.plugins import maskgen.software_loader from maskgen.group_filter import GroupOperationsLoader from maskgen.ui.autocomplete_it import * class PluginBuilder(tkSimpleDialog.Dialog): def __init__(self, master, gopLoader): """ :param master: :param gopLoader: @type gopLoader: GroupOperationsLoader """ self.gopLoader = gopLoader self.softwareLoader = maskgen.software_loader.SoftwareLoader() self.sourcefiletype = 'image' self.targetfiletype = 'image' self.master = master self.arguments = [] tkSimpleDialog.Dialog.__init__(self, master) def body(self, master): nameLabel = Label(master, text='Plugin Name: ') nameLabel.grid(row=0, column=0) self.nameEntry = Entry(master, width=40) self.nameEntry.grid(row=0, column=1, sticky='EW') descriptionLabel = Label(master, text='Description: ') descriptionLabel.grid(row=1, column=0) self.descriptionEntry = Text(master, width=40, height=3) self.descriptionEntry.grid(row=1, column=1, sticky='EW') cats = self.organizeOperationsByCategory() catlist = list(cats.keys()) catlist.sort() oplist = cats[catlist[0]] if len(cats) > 0 else [] self.opCatEntry = AutocompleteEntryInText(master, values=catlist, takefocus=False, width=40, state='readonly') self.opNameEntry = AutocompleteEntryInText(master, values=oplist, takefocus=False, width=40, state='readonly') self.softwareNameEntry = AutocompleteEntryInText(master, values=sorted(self.softwareLoader.get_names(self.sourcefiletype), key=str.lower), takefocus=False, width=40,state='readonly') self.softwareVersionEntry = AutocompleteEntryInText(master, values=self.softwareLoader.get_versions(self.softwareNameEntry.get(),software_type=self.sourcefiletype), initialValue=self.softwareLoader.get_preferred_version(name=self.softwareNameEntry.get()), takefocus=False, width=40) self.opCatEntry.bind("<Return>", self.newcategory) self.opCatEntry.bind("<<ComboboxSelected>>", self.newcategory) self.opNameEntry.bind("<Return>", self.newcommand) self.opNameEntry.bind("<<ComboboxSelected>>", self.newcommand) self.softwareNameEntry.bind("<Return>", self.newsoftware) self.softwareNameEntry.bind("<<ComboboxSelected>>", self.newsoftware) opCatLabel = Label(master, text='Operation Category: ') opCatLabel.grid(row=2, column=0) self.opCatEntry.grid(row=2, column=1, sticky='EW') opNameLabel = Label(master, text='Operation Name: ') opNameLabel.grid(row=3, column=0) self.opNameEntry.grid(row=3, column=1, sticky='EW') softwareNameLabel = Label(master, text='Software Name: ') softwareNameLabel.grid(row=4, column=0) self.softwareNameEntry.grid(row=4, column=1, sticky='EW') softwareVersionLabel = Label(master, text='Software Version: ') softwareVersionLabel.grid(row=5, column=0) self.softwareVersionEntry.grid(row=5, column=1, sticky='EW') # suffixLabel = Label(master, text='Suffix: ') # suffixLabel.grid(row=6, column=0) # self.suffixEntry = Entry(master, width=40) # self.suffixEntry.grid(row=6, column=1, sticky='EW') commandLabel1 = Label(master, text='Command (exactly as it would be typed in command line):') commandLabel1.grid(row=7, column=0, columnspan=8) self.commandEntry = Entry(master, width=40) self.commandEntry.grid(row=8, column=0, columnspan=8, sticky='EW') commandLabel2 = Label(master, text='Use \"{inputimage}\" and \"{outputimage}\" in place of input and output images, respectively.\n' 'If omitted, \"{inputimage}\" and \"{outputimage}\" will be appended to end of command.') commandLabel2.grid(row=9, column=0, columnspan=8) Label(master, text='Additional Arguments (optional):').grid(row=10) self.argFrame = Frame(master) self.argFrame.grid(row=11, column=0, columnspan=8) self.add_argument_row(row=0, col=0, initialize=True) def add_argument_row(self, row, col, initialize=False, event=None): if initialize == False: self.addArgButton.grid_forget() Label(self.argFrame, text='Arg Name: ').grid(row=row, column=col) argNameEntry = Entry(self.argFrame) argNameEntry.grid(row=row, column=col+1, sticky='EW') col+=2 Label(self.argFrame, text='Arg Type: ').grid(row=row, column=col) typeBox = ttk.Combobox(self.argFrame, values=['String', 'ImageFile', 'XMPFile', 'Donor', 'Float', 'Int', 'List', 'YesNo', 'Time', 'Coordinates']) typeBox.set('String') typeBox.grid(row=row, column=col+1, sticky='EW') col+=2 Label(self.argFrame, text='Default Value: ').grid(row=row, column=col) defaultValueBox = Entry(self.argFrame) defaultValueBox.grid(row=row, column=col+1, sticky='EW') row+=1 col=0 Label(self.argFrame, text='Description: ').grid(row=row, column=col) descriptionBox = Entry(self.argFrame) descriptionBox.grid(row=row, column=col+1, sticky='EW') col+=2 Label(self.argFrame, text='List Values: ').grid(row=row, column=col) valuesBox = Entry(self.argFrame, state='disabled') valuesBox.grid(row=row, column=col+1, sticky='EW') typeBox.correspondingValues = valuesBox typeBox.bind("<<ComboboxSelected>>", self.set_valuesbox_state) col+=2 insertButton = Button(self.argFrame, text='Insert', command=lambda:self.insert_arg(argNameEntry)) insertButton.grid(row=row, column=col, columnspan=2, sticky='EW') row+=1 col=0 ttk.Separator(self.argFrame, orient=HORIZONTAL).grid(row=row, column=col, columnspan=8, sticky='EW') row+=1 col=0 self.addArgButton = Button(self.argFrame, text='Add another argument', command=lambda: self.add_argument_row(row=row, col=col)) self.addArgButton.grid(row=row, column=col, columnspan=2) Fields = collections.namedtuple('Fields', 'argname, type, defaultvalue, description, values') f = Fields(argname=argNameEntry, type=typeBox, defaultvalue=defaultValueBox, description=descriptionBox, values=valuesBox) self.arguments.append(f) def insert_arg(self, entry): idx = self.commandEntry.index(INSERT) currentCommand = self.commandEntry.get() try: if currentCommand[idx-1] != ' ': self.commandEntry.insert(idx, ' ') idx+=1 except IndexError: pass self.commandEntry.insert(idx, '{' + entry.get().replace(' ', '') + '}') if len(entry.get()) >0 else '' idx = self.commandEntry.index(INSERT) currentCommand = self.commandEntry.get() try: if currentCommand[idx+1] != ' ': self.commandEntry.insert(idx, ' ') except IndexError: pass def set_valuesbox_state(self, event=None): if event is not None: val = event.widget.get() value_entry = event.widget.correspondingValues if val == 'List': value_entry.config(state='normal') else: value_entry.config(state='disabled') def apply(self): self.pluginName = self.nameEntry.get().replace(' ', '') opName = self.opNameEntry.get() opCat = self.opCatEntry.get() description = self.descriptionEntry.get("1.0",END).strip() softwareName = self.softwareNameEntry.get() softwareVersion = self.softwareVersionEntry.get() #suffix = self.suffixEntry.get() command = self.commandEntry.get().split(' ') if '{inputimage}' not in command: command.append('{inputimage}') if '{outputimage}' not in command: command.append('{outputimage}') platform = sys.platform self.data = {"name": self.pluginName, "operation": { "name": opName, "category": opCat, "description": description, "software": softwareName, "version": softwareVersion, "arguments": {}, "transitions": ['image.image'] }, #"suffix": suffix "command": { "default": command, platform: command } } self.export_arguments() self.path = os.path.join('plugins', 'Custom', self.pluginName) + '.json' # need to step up a directory to save the json with open(os.path.join('.', self.path), 'w') as newJSON: json.dump(self.data, newJSON, indent=4) maskgen.plugins.loadPlugins().loadCustom(self.pluginName, self.path) def cancel(self, event=None): self.destroy() def export_arguments(self): for argument in self.arguments: self.data['operation']['arguments'][argument.argname.get().replace(' ', '')] = { 'type':argument.type.get().lower(), 'defaultvalue':argument.defaultvalue.get(), 'description':argument.description.get(), } if argument.type.get() == 'List': vals = argument.values.get().replace(', ', ',').split(',') self.data['operation']['arguments'][argument.argname.get().replace(' ', '')]['values'] = vals """ the below functions are taken from the DescriptionCaptureDialog class in description_dialog.py (much of the code in this class has been borrowed from here) """ def newcategory(self, event): opByCat = self.organizeOperationsByCategory() if self.opCatEntry.get() in opByCat: oplist = opByCat[self.opCatEntry.get()] self.opNameEntry.set_completion_list(oplist) self.newcommand(event) else: self.opNameEntry.set_completion_list([]) def newcommand(self, event): op = self.gopLoader.getOperationWithGroups(self.opNameEntry.get()) def organizeOperationsByCategory(self): return self.gopLoader.getOperationsByCategoryWithGroups(self.sourcefiletype, self.targetfiletype) def newsoftware(self, event): sname = self.softwareNameEntry.get() self.softwareVersionEntry.set_completion_list(self.softwareLoader.get_versions(sname,software_type=self.sourcefiletype), initialValue=self.softwareLoader.get_preferred_version(name=sname)) def main(): maskgen.plugins.loadPlugins() root = Tk() root.withdraw() d = PluginBuilder(root) d.mainloop() if __name__ == '__main__': main()
from __future__ import unicode_literals import logging import os import re import subprocess from tempfile import mkstemp from django.utils import six from django.utils.translation import ugettext_lazy as _ from djblets.util.filesystem import is_exe_in_path from reviewboard.scmtools.core import (SCMTool, ChangeSet, HEAD, PRE_CREATION) from reviewboard.scmtools.errors import (SCMError, FileNotFoundError, RepositoryNotFoundError) from reviewboard.diffviewer.parser import DiffParser class PlasticTool(SCMTool): scmtool_id = 'plastic' name = "Plastic SCM" diffs_use_absolute_paths = True supports_pending_changesets = True field_help_text = { 'path': _('The Plastic repository spec in the form of ' '[repo]@[hostname]:[port].'), } dependencies = { 'executables': ['cm'], } REP_RE = re.compile(r'^(?P<reponame>.*)@(?P<hostname>.*):(?P<port>\d+)$') CS_RE = re.compile(r'^(?P<csid>\d+) (?P<user>[^\s]+) (?P<revid>\d+) ' r'(?P<file>.*)$') REPOLIST_RE = re.compile(r'^\s*\d+\s*(?P<reponame>[^\s]+)\s*.*:.*$') UNKNOWN_REV = "rev:revid:-1" def __init__(self, repository): super(PlasticTool, self).__init__(repository) self.reponame, self.hostname, self.port = \ self.parse_repository(repository.path) self.client = PlasticClient(repository.path, self.reponame, self.hostname, self.port) def get_changeset(self, changesetid, allow_empty=False): logging.debug('Plastic: get_changeset %s' % (changesetid)) changesetdata = self.client.get_changeset(changesetid) logging.debug('Plastic: changesetdata %s' % (changesetdata)) # Changeset data is in the form of multiple lines of: # <changesetid> <user> <revid> <file spec> # # We assume the user and comment will be the same for each item, so # read it out of the first. # changeset = ChangeSet() changeset.changenum = changesetid split = changesetdata.split('\n') m = self.CS_RE.match(split[0]) revid = m.group("revid") changeset.username = m.group("user") changeset.summary = self.client.get_changeset_comment(changesetid, revid) logging.debug('Plastic: changeset user %s summary %s' % (changeset.username, changeset.summary)) for line in split: if line: m = self.CS_RE.match(line) if not m: logging.debug('Plastic: bad re %s failed to match %s' % (self.CS_RE, line)) raise SCMError("Error looking up changeset") if m.group("csid") != six.text_type(changesetid): logging.debug('Plastic: csid %s != %s' % (m.group("csid"), changesetid)) raise SCMError('The server returned a changeset ID that ' 'was not requested') logging.debug('Plastic: adding file %s' % (m.group("file"))) changeset.files += m.group("file") return changeset def get_file(self, path, revision=HEAD, **kwargs): logging.debug('Plastic: get_file %s revision %s' % (path, revision)) if revision == PRE_CREATION: return b'' # Check for new files if revision == self.UNKNOWN_REV: return b'' return self.client.get_file(path, revision) def file_exists(self, path, revision=HEAD, **kwargs): logging.debug('Plastic: file_exists %s revision %s' % (path, revision)) if revision == PRE_CREATION: return True # Check for new files if revision == self.UNKNOWN_REV: return True try: return self.client.get_file(path, revision) except FileNotFoundError: return False def parse_diff_revision(self, filename, revision, *args, **kwargs): """Parse and return a filename and revision from a diff. Args: filename (bytes): The filename as represented in the diff. revision (bytes): The revision as represented in the diff. *args (tuple, unused): Unused positional arguments. **kwargs (dict, unused): Unused keyword arguments. Returns: tuple: A tuple containing two items: 1. The normalized filename as a byte string. 2. The normalized revision as a byte string or a :py:class:`~reviewboard.scmtools.core.Revision`. """ assert isinstance(filename, bytes), ( 'filename must be a byte string, not %s' % type(filename)) assert isinstance(revision, bytes), ( 'revision must be a byte string, not %s' % type(revision)) logging.debug('Plastic: parse_diff_revision file %s revision %s' % (file_str, revision_str)) if revision == b'PRE-CREATION': revision = PRE_CREATION return filename, revision def get_parser(self, data): return PlasticDiffParser(data) @classmethod def parse_repository(cls, path): m = cls.REP_RE.match(path) if m: repopath = m.group("reponame") hostname = m.group("hostname") port = m.group("port") return repopath, hostname, port else: raise RepositoryNotFoundError() @classmethod def check_repository(cls, path, username=None, password=None, local_site_name=None): m = cls.REP_RE.match(path) if not m: raise RepositoryNotFoundError() # Can't use 'cm checkconnection' here as it only checks the # pre-configured server server = "%s:%s" % (m.group("hostname"), m.group("port")) reponame = m.group("reponame") logging.debug('Plastic: Checking repository %s@%s' % (reponame, server)) repositories = PlasticClient.get_repositories(server) split = repositories.splitlines() for rep in split: m = cls.REPOLIST_RE.match(rep) if m and m.group("reponame") == reponame: break else: raise RepositoryNotFoundError() class PlasticDiffParser(DiffParser): """ This class is able to parse diffs created with the plastic client support in post-review. """ # As the diff creation is based on the Perforce code, so this is based # on the PerforceDiffParser (specifically, the binary file markers) BINARY_RE = re.compile(r'^==== ([^\s]+) \(([^\)]+)\) ==([ACIMR])==$') def __init__(self, data): super(PlasticDiffParser, self).__init__(data) def parse_diff_header(self, linenum, info): m = self.BINARY_RE.match(self.lines[linenum]) if m: info['origFile'] = m.group(1) info['origInfo'] = m.group(2) info['newFile'] = m.group(1) info['newInfo'] = "" linenum += 1 if (linenum < len(self.lines) and (self.lines[linenum].startswith(b"Binary files ") or self.lines[linenum].startswith(b"Files "))): info['binary'] = True linenum += 1 # In this case, this *is* our diff header. We don't want to # let the next line's real diff header be a part of this one, # so return now return linenum return super(PlasticDiffParser, self).parse_diff_header(linenum, info) class PlasticClient(object): def __init__(self, repository, reponame, hostname, port): if not is_exe_in_path('cm'): # This is technically not the right kind of error, but it's the # pattern we use with all the other tools. raise ImportError self.reponame = reponame self.hostname = hostname self.port = port def get_file(self, path, revision): logging.debug('Plastic: get_file %s rev %s' % (path, revision)) repo = "rep:%s@repserver:%s:%s" % (self.reponame, self.hostname, self.port) # Work around a plastic bug, where 'cm cat --file=blah' gets an # extra newline, but plain 'cm cat' doesn't fd, tmpfile = mkstemp() os.close(fd) p = subprocess.Popen( ['cm', 'cat', revision + '@' + repo, '--file=' + tmpfile], stderr=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=(os.name != 'nt')) errmsg = six.text_type(p.stderr.read()) failure = p.wait() if failure: if not errmsg: errmsg = p.stdout.read() raise SCMError(errmsg) with open(tmpfile, 'rb') as readtmp: contents = readtmp.read() os.unlink(tmpfile) return contents def get_changeset(self, changesetid): logging.debug('Plastic: get_changeset %s' % (changesetid)) repo = "rep:%s@repserver:%s:%s" % (self.reponame, self.hostname, self.port) p = subprocess.Popen(['cm', 'find', 'revs', 'where', 'changeset=' + six.text_type(changesetid), 'on', 'repository', '\'' + repo + '\'', '--format={changeset} {owner} {id} {item}', '--nototal'], stderr=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=(os.name != 'nt')) contents = p.stdout.read() errmsg = p.stderr.read() failure = p.wait() if failure: raise SCMError(errmsg) return contents def get_changeset_comment(self, changesetid, revid): logging.debug('Plastic: get_changeset_comment %s' % (changesetid)) repo = "rep:%s@repserver:%s:%s" % (self.reponame, self.hostname, self.port) p = subprocess.Popen(['cm', 'find', 'changesets', 'where', 'changesetid=' + six.text_type(changesetid), 'on', 'repository', '\'' + repo + '\'', '--format={comment}', '--nototal'], stderr=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=(os.name != 'nt')) contents = p.stdout.read() errmsg = p.stderr.read() failure = p.wait() if failure: raise SCMError(errmsg) return contents @classmethod def get_repositories(cls, server): logging.debug('Plastic: get_repositories %s' % (server)) p = subprocess.Popen(['cm', 'listrepositories', server], stderr=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=(os.name != 'nt')) repositories = p.stdout.read() errmsg = p.stderr.read() failure = p.wait() if failure: if not errmsg and repositories.startswith('Error:'): error = repositories else: error = errmsg raise SCMError(error) return repositories
import param import imagen from topo.base.arrayutil import DivideWithConstant, MultiplyWithConstant from topo import optimized, projection, sheet from . import Model from .gcal import ModelGCAL from .earlyvision import EarlyVisionModel @Model.definition class EarlyVisionSCAL(EarlyVisionModel): """ EarlyVisionModel subclass with spatially calibrated extents used for SCAL and other models. """ area = param.Number(default=2.0,bounds=(0,None), inclusive_bounds=(False,True),doc=""" Linear size of cortical area to simulate. SCAL and other spatially calibrated variants of GCAL require cortical areas larger than 1.0x1.0 to avoid strong suppressive edge effects.""") expand_sf_test_range=param.Boolean(default=False,doc=""" By default, measure_sine_pref() measures SF at the sizes of RF used, for speed, but if expand_sf_test_range is True, it will test over a larger range, including half the size of the smallest and twice the size of the largest.""") lgn_density = param.Number(default=16.0,bounds=(0,None), inclusive_bounds=(False,True),doc=""" The nominal_density to use for the LGN.""") num_inputs = param.Number(default=1.5, bounds=(0,None)) lgnaff_strength = param.Number(default=14, doc=""" Overall strength of the afferent projection from the retina to the LGN sheets.""") #=================# # Spatial extents # #=================# center_size = param.Number(default=0.2, bounds=(0, None), doc=""" The size of the central Gaussian used to compute the center-surround receptive field.""") surround_size = param.Number(default=0.3, bounds=(0, None), doc=""" The size of the surround Gaussian used to compute the center-surround receptive field.""") gain_control_size = param.Number(default=0.8, bounds=(0, None), doc=""" The size of the divisive inhibitory suppressive field used for contrast-gain control in the LGN sheets. This also acts as the corresponding bounds radius.""") lgnaff_radius = param.Number(default=0.4, bounds=(0, None), doc=""" Connection field radius of a unit in the LGN level to units in a retina sheet.""") lgnlateral_radius = param.Number(default=0.8, bounds=(0, None), doc=""" Connection field radius of a unit in the LGN level to surrounding units, in case gain control is used.""") def training_pattern_setup(self, **overrides): """ Only the size of Gaussian training patterns has been modified. The 'aspect_ratio' and 'scale' parameter values are unchanged. """ or_dim = 'or' in self.dims gaussian = (self.dataset == 'Gaussian') pattern_parameters = {'size':(0.2 if or_dim and gaussian else 3 * 0.1 if gaussian else 10.0), 'aspect_ratio': 4.6667 if or_dim else 1.0, 'scale': self.contrast / 100.0} return super(EarlyVisionSCAL, self).training_pattern_setup( pattern_parameters=pattern_parameters, position_bound_x=self.area/2.0+self.v1aff_radius, position_bound_y=self.area/2.0+self.v1aff_radius) def analysis_setup(self): super(EarlyVisionSCAL, self).analysis_setup() from topo.analysis.command import measure_sine_pref, measure_or_pref sf_relative_sizes = [self.sf_spacing ** (sf_channel - 1) for sf_channel in self['SF']] wide_relative_sizes = ([0.5 * sf_relative_sizes[0]] + sf_relative_sizes + [2.0 * sf_relative_sizes[-1]]) relative_sizes = (wide_relative_sizes if self.expand_sf_test_range else sf_relative_sizes) frequencies = [1.5 * s for s in relative_sizes] measure_sine_pref.frequencies = frequencies measure_or_pref.frequencies= frequencies @Model.definition class ModelSCAL(EarlyVisionSCAL, ModelGCAL): """ Spatially-tuned GCAL (SCAL) calibrated to represent a 3 degree parafoveal region of macaque primary visual cortex, assuming a 3 mm/deg magnification factor and 0.71 mm orientation hypercolumn distance. Changes from ModelGCAL include relative strengths, homeostatic sparsity constraints, connection radii and switching from subtractive to divisive inhibition. The explanation of the calibration process is explained in a forthcoming notebook. """ area = param.Number(default=2.0,bounds=(0,None), inclusive_bounds=(False,True),doc=""" Linear size of cortical area to simulate. SCAL and other spatially calibrated variants of GCAL require cortical areas larger than 1.0x1.0 to avoid strong suppressive edge effects.""") aff_strength = param.Number(default=2.4, bounds=(0.0, None), doc=""" Overall strength of the afferent projection to V1.""") exc_strength = param.Number(default=1.4, bounds=(0.0, None), doc=""" Overall strength of the lateral excitatory projection to V1.""") inh_strength = param.Number(default=2.0, bounds=(0.0, None), doc=""" Overall strength of the lateral inhibitory projection to V1.""") t_init = param.Number(default=0.45, doc=""" The initial threshold value for homeostatic adaptation in V1.""") t_settle = param.Integer(default=16, doc=""" Number of settling steps before applying a reset in the V1 sheet.""") #=================# # Spatial extents # #=================# latexc_radius = param.Number(default=0.1, bounds=(0, None), doc=""" Radius of the lateral excitatory bounds within V1.""") latinh_radius = param.Number(default=0.18, bounds=(0, None), doc=""" Radius of the lateral inhibitory bounds within V1.""") latexc_size = param.Number(default=0.06, bounds=(0, None), doc=""" Size of the lateral excitatory connections within V1.""") latinh_size = param.Number(default=0.115, bounds=(0, None), doc=""" Size of the lateral inhibitory connections within V1.""") v1aff_radius = param.Number(default=0.5, bounds=(0, None), doc=""" Connection field radius of a unit in V1 to units in a LGN sheet.""") #=====================# # Divisive inhibition # #=====================# division_constant = param.Number(default=1.0, doc=""" The constant offset on the denominator for divisive lateral inhibition to avoid divide-by-zero errors: divide(x,maximum(y,0) + division_constant).""") #=========================# # Long-range connectivity # #=========================# laterals = param.Boolean(default=False, doc=""" Instantiate long-range lateral connections. Expensive!""") latexc_strength=param.Number(default=0, doc=""" Lateral excitatory connection strength""") latexc_lr=param.Number(default=1.0, doc=""" Lateral excitatory connection learning rate.""") # Excitatory connection profiles # lateral_radius = param.Number(default=1.25, bounds=(0, None), doc=""" Radius of the lateral excitatory bounds within V1Exc.""") lateral_size = param.Number(default=2.5, bounds=(0, None), doc=""" Size of the lateral excitatory connections within V1Exc.""") def property_setup(self, properties): "Specify weight initialization, response function, and learning function" properties = super(ModelSCAL, self).property_setup(properties) projection.CFProjection.cf_shape=imagen.Disk(smoothing=0.0) projection.CFProjection.response_fn=optimized.CFPRF_DotProduct_cython() projection.CFProjection.learning_fn=optimized.CFPLF_Hebbian_cython() projection.CFProjection.weights_output_fns=[optimized.CFPOF_DivisiveNormalize_L1_cython()] projection.SharedWeightCFProjection.response_fn=optimized.CFPRF_DotProduct_cython() sheet.SettlingCFSheet.joint_norm_fn = optimized.compute_joint_norm_totals_cython @Model.CFProjection def lateral_inhibitory(self, src_properties, dest_properties): """ Switch to divisive inhibition, otherwise parameters unchanged. """ return Model.CFProjection.params( delay=0.05, name='LateralInhibitory', weights_generator=imagen.random.GaussianCloud( gaussian_size=self.latinh_size), strength=self.inh_strength, activity_group=(0.6, DivideWithConstant(c=self.division_constant)), learning_rate=self.inh_lr, nominal_bounds_template=sheet.BoundingBox( radius=self.latinh_radius)) @Model.matchconditions('V1', 'lr_lateral_excitatory') def lr_lateral_excitatory_conditions(self, properties): return {'level': 'V1'} if self.laterals else {'level': None} @Model.CFProjection def lr_lateral_excitatory(self, src_properties, dest_properties): return Model.CFProjection.params( delay=0.1, name='LRExcitatory', activity_group=(0.9, MultiplyWithConstant()), weights_generator=imagen.Gaussian(aspect_ratio=1.0, size=self.lateral_size), strength=self.latexc_strength, learning_rate=self.latexc_lr, nominal_bounds_template=sheet.BoundingBox(radius=self.lateral_radius))
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- from typing import ( # pylint: disable=unused-import Union, Optional, Any, Iterable, Dict, List, Type, Tuple, TYPE_CHECKING ) import logging from os import fstat from io import (SEEK_END, SEEK_SET, UnsupportedOperation) import isodate from azure.core.exceptions import raise_with_traceback _LOGGER = logging.getLogger(__name__) _REQUEST_DELIMITER_PREFIX = "batch_" _HTTP1_1_IDENTIFIER = "HTTP/1.1" _HTTP_LINE_ENDING = "\r\n" def serialize_iso(attr): """Serialize Datetime object into ISO-8601 formatted string. :param Datetime attr: Object to be serialized. :rtype: str :raises: ValueError if format invalid. """ if not attr: return None if isinstance(attr, str): attr = isodate.parse_datetime(attr) try: utc = attr.utctimetuple() if utc.tm_year > 9999 or utc.tm_year < 1: raise OverflowError("Hit max or min date") date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec) return date + 'Z' except (ValueError, OverflowError) as err: msg = "Unable to serialize datetime object." raise_with_traceback(ValueError, msg, err) except AttributeError as err: msg = "ISO-8601 object must be valid Datetime object." raise_with_traceback(TypeError, msg, err) def get_length(data): length = None # Check if object implements the __len__ method, covers most input cases such as bytearray. try: length = len(data) except: # pylint: disable=bare-except pass if not length: # Check if the stream is a file-like stream object. # If so, calculate the size using the file descriptor. try: fileno = data.fileno() except (AttributeError, UnsupportedOperation): pass else: try: return fstat(fileno).st_size except OSError: # Not a valid fileno, may be possible requests returned # a socket number? pass # If the stream is seekable and tell() is implemented, calculate the stream size. try: current_position = data.tell() data.seek(0, SEEK_END) length = data.tell() - current_position data.seek(current_position, SEEK_SET) except (AttributeError, OSError, UnsupportedOperation): pass return length def read_length(data): try: if hasattr(data, 'read'): read_data = b'' for chunk in iter(lambda: data.read(4096), b""): read_data += chunk return len(read_data), read_data if hasattr(data, '__iter__'): read_data = b'' for chunk in data: read_data += chunk return len(read_data), read_data except: # pylint: disable=bare-except pass raise ValueError("Unable to calculate content length, please specify.") def validate_and_format_range_headers( start_range, end_range, start_range_required=True, end_range_required=True, check_content_md5=False, align_to_page=False): # If end range is provided, start range must be provided if (start_range_required or end_range is not None) and start_range is None: raise ValueError("start_range value cannot be None.") if end_range_required and end_range is None: raise ValueError("end_range value cannot be None.") # Page ranges must be 512 aligned if align_to_page: if start_range is not None and start_range % 512 != 0: raise ValueError("Invalid page blob start_range: {0}. " "The size must be aligned to a 512-byte boundary.".format(start_range)) if end_range is not None and end_range % 512 != 511: raise ValueError("Invalid page blob end_range: {0}. " "The size must be aligned to a 512-byte boundary.".format(end_range)) # Format based on whether end_range is present range_header = None if end_range is not None: range_header = 'bytes={0}-{1}'.format(start_range, end_range) elif start_range is not None: range_header = "bytes={0}-".format(start_range) # Content MD5 can only be provided for a complete range less than 4MB in size range_validation = None if check_content_md5: if start_range is None or end_range is None: raise ValueError("Both start and end range requied for MD5 content validation.") if end_range - start_range > 4 * 1024 * 1024: raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") range_validation = 'true' return range_header, range_validation def add_metadata_headers(metadata=None): # type: (Optional[Dict[str, str]]) -> Dict[str, str] headers = {} if metadata: for key, value in metadata.items(): headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value return headers def serialize_batch_body(requests, batch_id): """ --<delimiter> <subrequest> --<delimiter> <subrequest> (repeated as needed) --<delimiter>-- Serializes the requests in this batch to a single HTTP mixed/multipart body. :param list[~azure.core.pipeline.transport.HttpRequest] requests: a list of sub-request for the batch request :param str batch_id: to be embedded in batch sub-request delimiter :return: The body bytes for this batch. """ if requests is None or len(requests) == 0: raise ValueError('Please provide sub-request(s) for this batch request') delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') batch_body = list() content_index = 0 for request in requests: request.headers.update({ "Content-ID": str(content_index), "Content-Length": str(0) }) batch_body.append(delimiter_bytes) batch_body.append(_make_body_from_sub_request(request)) batch_body.append(newline_bytes) content_index += 1 batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) # final line of body MUST have \r\n at the end, or it will not be properly read by the service batch_body.append(newline_bytes) return bytes().join(batch_body) def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): """ Gets the delimiter used for this batch request's mixed/multipart HTTP format. :param str batch_id: Randomly generated id :param bool is_prepend_dashes: Whether to include the starting dashes. Used in the body, but non on defining the delimiter. :param bool is_append_dashes: Whether to include the ending dashes. Used in the body on the closing delimiter only. :return: The delimiter, WITHOUT a trailing newline. """ prepend_dashes = '--' if is_prepend_dashes else '' append_dashes = '--' if is_append_dashes else '' return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes def _make_body_from_sub_request(sub_request): """ Content-Type: application/http Content-ID: <sequential int ID> Content-Transfer-Encoding: <value> (if present) <verb> <path><query> HTTP/<version> <header key>: <header value> (repeated as necessary) Content-Length: <value> (newline if content length > 0) <body> (if content length > 0) Serializes an http request. :param ~azure.core.pipeline.transport.HttpRequest sub_request: Request to serialize. :return: The serialized sub-request in bytes """ # put the sub-request's headers into a list for efficient str concatenation sub_request_body = list() # get headers for ease of manipulation; remove headers as they are used headers = sub_request.headers # append opening headers sub_request_body.append("Content-Type: application/http") sub_request_body.append(_HTTP_LINE_ENDING) sub_request_body.append("Content-ID: ") sub_request_body.append(headers.pop("Content-ID", "")) sub_request_body.append(_HTTP_LINE_ENDING) sub_request_body.append("Content-Transfer-Encoding: binary") sub_request_body.append(_HTTP_LINE_ENDING) # append blank line sub_request_body.append(_HTTP_LINE_ENDING) # append HTTP verb and path and query and HTTP version sub_request_body.append(sub_request.method) sub_request_body.append(' ') sub_request_body.append(sub_request.url) sub_request_body.append(' ') sub_request_body.append(_HTTP1_1_IDENTIFIER) sub_request_body.append(_HTTP_LINE_ENDING) # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) for header_name, header_value in headers.items(): if header_value is not None: sub_request_body.append(header_name) sub_request_body.append(": ") sub_request_body.append(header_value) sub_request_body.append(_HTTP_LINE_ENDING) # append blank line sub_request_body.append(_HTTP_LINE_ENDING) return ''.join(sub_request_body).encode()
# Copyright 2008 Divmod, Inc. See LICENSE file for details # -*- test-case-name: xmantissa.test.test_webapp,xmantissa.test.test_publicweb,xmantissa.test.test_website -*- """ This unfortunate module exists to contain code that would create an ugly dependency loop if it were somewhere else. """ from zope.interface import implements from twisted.cred.portal import IRealm from epsilon.structlike import record from axiom.userbase import getDomainNames from nevow import athena from nevow.rend import NotFound from nevow.inevow import IResource, IRequest from xmantissa.ixmantissa import (IWebViewer, INavigableFragment, ISiteRootPlugin) from xmantissa.websharing import UserIndexPage from xmantissa.error import CouldNotLoadFromThemes class WebViewerHelper(object): """ This is a mixin for the common logic in the two providers of L{IWebViewer} included with Mantissa, L{xmantissa.publicweb._AnonymousWebViewer} and L{xmantissa.webapp._AuthenticatedWebViewer}. @ivar _getDocFactory: a 1-arg callable which returns a nevow loader. @ivar _preferredThemes: a 0-arg callable which returns a list of nevow themes. """ def __init__(self, _getDocFactory, _preferredThemes): """ """ self._getDocFactory = _getDocFactory self._preferredThemes = _preferredThemes def _wrapNavFrag(self, fragment, useAthena): """ Subclasses must implement this to wrap a fragment. @param fragment: an L{INavigableFragment} provider that should be wrapped in the resulting page. @param useAthena: Whether the resulting L{IResource} should be a L{LivePage}. @type useAthena: L{bool} @return: a fragment to display to the user. @rtype: L{IResource} """ def wrapModel(self, model): """ Converts application-provided model objects to L{IResource} providers. """ res = IResource(model, None) if res is None: frag = INavigableFragment(model) fragmentName = getattr(frag, 'fragmentName', None) if fragmentName is not None: fragDocFactory = self._getDocFactory(fragmentName) if fragDocFactory is not None: frag.docFactory = fragDocFactory if frag.docFactory is None: raise CouldNotLoadFromThemes(frag, self._preferredThemes()) useAthena = isinstance(frag, (athena.LiveFragment, athena.LiveElement)) return self._wrapNavFrag(frag, useAthena) else: return res class MantissaViewHelper(object): """ This is the superclass of all Mantissa resources which act as a wrapper around an L{INavigableFragment} provider. This must be mixed in to some hierarchy with a C{locateChild} method, since it expects to cooperate in such a hierarchy. Due to infelicities in the implementation of some (pre-existing) subclasses, there is no __init__; but subclasses must set the 'fragment' attribute in theirs. """ fragment = None def locateChild(self, ctx, segments): """ Attempt to locate the child via the '.fragment' attribute, then fall back to normal locateChild behavior. """ if self.fragment is not None: # There are still a bunch of bogus subclasses of this class, which # are used in a variety of distasteful ways. 'fragment' *should* # always be set to something that isn't None, but there's no way to # make sure that it will be for the moment. Every effort should be # made to reduce public use of subclasses of this class (instead # preferring to wrap content objects with # IWebViewer.wrapModel()), so that the above check can be # removed. -glyph lc = getattr(self.fragment, 'locateChild', None) if lc is not None: x = lc(ctx, segments) if x is not NotFound: return x return super(MantissaViewHelper, self).locateChild(ctx, segments) class SiteRootMixin(object): """ Common functionality for L{AnonymousSite} and L{WebSite}. """ def locateChild(self, context, segments): """ Return a statically defined child or a child defined by a site root plugin or an avatar from guard. """ request = IRequest(context) webViewer = IWebViewer(self.store, None) childAndSegments = self.siteProduceResource(request, segments, webViewer) if childAndSegments is not None: return childAndSegments return NotFound # IMantissaSite def siteProduceResource(self, req, segments, webViewer): """ Retrieve a child resource and segments from rootChild_ methods on this object and SiteRootPlugins. @return: a 2-tuple of (resource, segments), suitable for return from locateChild. @param req: an L{IRequest} provider. @param segments: a tuple of L{str}s, the segments from the request. @param webViewer: an L{IWebViewer}, to be propagated through the child lookup process. """ # rootChild_* is not the same as child_, because its signature is # different. Maybe this should be done some other way. shortcut = getattr(self, 'rootChild_' + segments[0], None) if shortcut: res = shortcut(req, webViewer) if res is not None: return res, segments[1:] for plg in self.store.powerupsFor(ISiteRootPlugin): produceResource = getattr(plg, 'produceResource', None) if produceResource is not None: childAndSegments = produceResource(req, segments, webViewer) else: childAndSegments = plg.resourceFactory(segments) if childAndSegments is not None: return childAndSegments return None # IPowerupIndirector def indirect(self, interface): """ Create a L{VirtualHostWrapper} so it can have the first chance to handle web requests. """ if interface is IResource: siteStore = self.store.parent if self.store.parent is None: siteStore = self.store return VirtualHostWrapper( siteStore, IWebViewer(self.store), self) return self class VirtualHostWrapper(record('siteStore webViewer wrapped')): """ Resource wrapper which implements per-user virtual subdomains. This should be wrapped around any resource which sits at the root of the hierarchy. It will examine requests for their hostname and, when appropriate, redirect handling of the query to the appropriate sharing resource. @type siteStore: L{Store} @ivar siteStore: The site store which will be queried to determine which hostnames are associated with this server. @type webViewer: L{IWebViewer} @ivar webViewer: The web viewer representing the user. @type wrapped: L{IResource} provider @ivar wrapped: A resource to which traversal will be delegated if the request is not for a user subdomain. """ implements(IResource) def subdomain(self, hostname): """ Determine of which known domain the given hostname is a subdomain. @return: A two-tuple giving the subdomain part and the domain part or C{None} if the domain is not a subdomain of any known domain. """ hostname = hostname.split(":")[0] for domain in getDomainNames(self.siteStore): if hostname.endswith("." + domain): username = hostname[:-len(domain) - 1] if username != "www": return username, domain return None def locateChild(self, context, segments): """ Delegate dispatch to a sharing resource if the request is for a user subdomain, otherwise fall back to the wrapped resource's C{locateChild} implementation. """ request = IRequest(context) hostname = request.getHeader('host') info = self.subdomain(hostname) if info is not None: username, domain = info index = UserIndexPage(IRealm(self.siteStore), self.webViewer) resource = index.locateChild(None, [username])[0] return resource, segments return self.wrapped.locateChild(context, segments)
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import itertools import os import unittest from collections import defaultdict from tempfile import mkdtemp from textwrap import dedent from pants.base.build_file import BuildFile from pants.base.build_root import BuildRoot from pants.base.cmd_line_spec_parser import CmdLineSpecParser from pants.base.exceptions import TaskError from pants.base.file_system_project_tree import FileSystemProjectTree from pants.build_graph.address import Address from pants.build_graph.build_configuration import BuildConfiguration from pants.build_graph.build_file_address_mapper import BuildFileAddressMapper from pants.build_graph.build_file_aliases import BuildFileAliases from pants.build_graph.build_file_parser import BuildFileParser from pants.build_graph.mutable_build_graph import MutableBuildGraph from pants.build_graph.target import Target from pants.goal.goal import Goal from pants.source.source_root import SourceRootConfig from pants.subsystem.subsystem import Subsystem from pants.util.dirutil import safe_mkdir, safe_open, safe_rmtree from pants_test.base.context_utils import create_context from pants_test.option.util.fakes import create_options_for_optionables # TODO: Rename to 'TestBase', for uniformity, and also for logic: This is a baseclass # for tests, not a test of a thing called 'Base'. class BaseTest(unittest.TestCase): """A baseclass useful for tests requiring a temporary buildroot. :API: public """ def build_path(self, relpath): """Returns the canonical BUILD file path for the given relative build path. :API: public """ if os.path.basename(relpath).startswith('BUILD'): return relpath else: return os.path.join(relpath, 'BUILD') def create_dir(self, relpath): """Creates a directory under the buildroot. :API: public relpath: The relative path to the directory from the build root. """ path = os.path.join(self.build_root, relpath) safe_mkdir(path) return path def create_workdir_dir(self, relpath): """Creates a directory under the work directory. :API: public relpath: The relative path to the directory from the work directory. """ path = os.path.join(self.pants_workdir, relpath) safe_mkdir(path) return path def create_file(self, relpath, contents='', mode='wb'): """Writes to a file under the buildroot. :API: public relpath: The relative path to the file from the build root. contents: A string containing the contents of the file - '' by default.. mode: The mode to write to the file in - over-write by default. """ path = os.path.join(self.build_root, relpath) with safe_open(path, mode=mode) as fp: fp.write(contents) return path def create_workdir_file(self, relpath, contents='', mode='wb'): """Writes to a file under the work directory. :API: public relpath: The relative path to the file from the work directory. contents: A string containing the contents of the file - '' by default.. mode: The mode to write to the file in - over-write by default. """ path = os.path.join(self.pants_workdir, relpath) with safe_open(path, mode=mode) as fp: fp.write(contents) return path def add_to_build_file(self, relpath, target): """Adds the given target specification to the BUILD file at relpath. :API: public relpath: The relative path to the BUILD file from the build root. target: A string containing the target definition as it would appear in a BUILD file. """ self.create_file(self.build_path(relpath), target, mode='a') return BuildFile(self.address_mapper._project_tree, relpath=self.build_path(relpath)) def make_target(self, spec='', target_type=Target, dependencies=None, derived_from=None, synthetic=False, **kwargs): """Creates a target and injects it into the test's build graph. :API: public :param string spec: The target address spec that locates this target. :param type target_type: The concrete target subclass to create this new target from. :param list dependencies: A list of target instances this new target depends on. :param derived_from: The target this new target was derived from. :type derived_from: :class:`pants.build_graph.target.Target` """ address = Address.parse(spec) target = target_type(name=address.target_name, address=address, build_graph=self.build_graph, **kwargs) dependencies = dependencies or [] self.build_graph.inject_target(target, dependencies=[dep.address for dep in dependencies], derived_from=derived_from, synthetic=synthetic) # TODO(John Sirois): This re-creates a little bit too much work done by the BuildGraph. # Fixup the BuildGraph to deal with non BuildFileAddresses better and just leverage it. for traversable_dependency_spec in target.traversable_dependency_specs: traversable_dependency_address = Address.parse(traversable_dependency_spec, relative_to=address.spec_path) traversable_dependency_target = self.build_graph.get_target(traversable_dependency_address) if not traversable_dependency_target: raise ValueError('Tests must make targets for traversable dependency specs ahead of them ' 'being traversed, {} tried to traverse {} which does not exist.' .format(target, traversable_dependency_address)) if traversable_dependency_target not in target.dependencies: self.build_graph.inject_dependency(dependent=target.address, dependency=traversable_dependency_address) target.mark_transitive_invalidation_hash_dirty() return target @property def alias_groups(self): """ :API: public """ return BuildFileAliases(targets={'target': Target}) @property def build_ignore_patterns(self): """ :API: public """ return None def setUp(self): """ :API: public """ super(BaseTest, self).setUp() Goal.clear() Subsystem.reset() self.real_build_root = BuildRoot().path self.build_root = os.path.realpath(mkdtemp(suffix='_BUILD_ROOT')) self.addCleanup(safe_rmtree, self.build_root) self.pants_workdir = os.path.join(self.build_root, '.pants.d') safe_mkdir(self.pants_workdir) self.options = defaultdict(dict) # scope -> key-value mapping. self.options[''] = { 'pants_workdir': self.pants_workdir, 'pants_supportdir': os.path.join(self.build_root, 'build-support'), 'pants_distdir': os.path.join(self.build_root, 'dist'), 'pants_configdir': os.path.join(self.build_root, 'config'), 'cache_key_gen_version': '0-test', } self.options['cache'] = { 'read_from': [], 'write_to': [], } BuildRoot().path = self.build_root self.addCleanup(BuildRoot().reset) self._build_configuration = BuildConfiguration() self._build_configuration.register_aliases(self.alias_groups) self.build_file_parser = BuildFileParser(self._build_configuration, self.build_root) self.project_tree = FileSystemProjectTree(self.build_root) self.address_mapper = BuildFileAddressMapper(self.build_file_parser, self.project_tree, build_ignore_patterns=self.build_ignore_patterns) self.build_graph = MutableBuildGraph(address_mapper=self.address_mapper) def buildroot_files(self, relpath=None): """Returns the set of all files under the test build root. :API: public :param string relpath: If supplied, only collect files from this subtree. :returns: All file paths found. :rtype: set """ def scan(): for root, dirs, files in os.walk(os.path.join(self.build_root, relpath or '')): for f in files: yield os.path.relpath(os.path.join(root, f), self.build_root) return set(scan()) def reset_build_graph(self): """Start over with a fresh build graph with no targets in it.""" self.address_mapper = BuildFileAddressMapper(self.build_file_parser, FileSystemProjectTree(self.build_root)) self.build_graph = MutableBuildGraph(address_mapper=self.address_mapper) def set_options_for_scope(self, scope, **kwargs): self.options[scope].update(kwargs) def context(self, for_task_types=None, options=None, passthru_args=None, target_roots=None, console_outstream=None, workspace=None, for_subsystems=None): """ :API: public """ # Many tests use source root functionality via the SourceRootConfig.global_instance() # (typically accessed via Target.target_base), so we always set it up, for convenience. optionables = {SourceRootConfig} extra_scopes = set() for_subsystems = for_subsystems or () for subsystem in for_subsystems: if subsystem.options_scope is None: raise TaskError('You must set a scope on your subsystem type before using it in tests.') optionables.add(subsystem) for_task_types = for_task_types or () for task_type in for_task_types: scope = task_type.options_scope if scope is None: raise TaskError('You must set a scope on your task type before using it in tests.') optionables.add(task_type) extra_scopes.update([si.scope for si in task_type.known_scope_infos()]) optionables.update(Subsystem.closure( set([dep.subsystem_cls for dep in task_type.subsystem_dependencies_iter()]) | self._build_configuration.subsystems())) # Now default the option values and override with any caller-specified values. # TODO(benjy): Get rid of the options arg, and require tests to call set_options. options = options.copy() if options else {} for s, opts in self.options.items(): scoped_opts = options.setdefault(s, {}) scoped_opts.update(opts) options = create_options_for_optionables(optionables, extra_scopes=extra_scopes, options=options) Subsystem.reset(reset_options=True) Subsystem.set_options(options) context = create_context(options=options, passthru_args=passthru_args, target_roots=target_roots, build_graph=self.build_graph, build_file_parser=self.build_file_parser, address_mapper=self.address_mapper, console_outstream=console_outstream, workspace=workspace) return context def tearDown(self): """ :API: public """ super(BaseTest, self).tearDown() BuildFile.clear_cache() Subsystem.reset() def target(self, spec): """Resolves the given target address to a Target object. :API: public address: The BUILD target address to resolve. Returns the corresponding Target or else None if the address does not point to a defined Target. """ address = Address.parse(spec) self.build_graph.inject_address_closure(address) return self.build_graph.get_target(address) def targets(self, spec): """Resolves a target spec to one or more Target objects. :API: public spec: Either BUILD target address or else a target glob using the siblings ':' or descendants '::' suffixes. Returns the set of all Targets found. """ spec = CmdLineSpecParser(self.build_root).parse_spec(spec) addresses = list(self.address_mapper.scan_specs([spec])) for address in addresses: self.build_graph.inject_address_closure(address) targets = [self.build_graph.get_target(address) for address in addresses] return targets def create_files(self, path, files): """Writes to a file under the buildroot with contents same as file name. :API: public path: The relative path to the file from the build root. files: List of file names. """ for f in files: self.create_file(os.path.join(path, f), contents=f) def create_library(self, path, target_type, name, sources=None, **kwargs): """Creates a library target of given type at the BUILD file at path with sources :API: public path: The relative path to the BUILD file from the build root. target_type: valid pants target type. name: Name of the library target. sources: List of source file at the path relative to path. **kwargs: Optional attributes that can be set for any library target. Currently it includes support for resources, java_sources, provides and dependencies. """ if sources: self.create_files(path, sources) self.add_to_build_file(path, dedent(''' %(target_type)s(name='%(name)s', %(sources)s %(resources)s %(java_sources)s %(provides)s %(dependencies)s ) ''' % dict(target_type=target_type, name=name, sources=('sources=%s,' % repr(sources) if sources else ''), resources=('resources=["%s"],' % kwargs.get('resources') if 'resources' in kwargs else ''), java_sources=('java_sources=[%s],' % ','.join(map(lambda str_target: '"%s"' % str_target, kwargs.get('java_sources'))) if 'java_sources' in kwargs else ''), provides=('provides=%s,' % kwargs.get('provides') if 'provides' in kwargs else ''), dependencies=('dependencies=%s,' % kwargs.get('dependencies') if 'dependencies' in kwargs else ''), ))) return self.target('%s:%s' % (path, name)) def create_resources(self, path, name, *sources): """ :API: public """ return self.create_library(path, 'resources', name, sources) def assertUnorderedPrefixEqual(self, expected, actual_iter): """Consumes len(expected) items from the given iter, and asserts that they match, unordered. :API: public """ actual = list(itertools.islice(actual_iter, len(expected))) self.assertEqual(sorted(expected), sorted(actual)) def assertPrefixEqual(self, expected, actual_iter): """Consumes len(expected) items from the given iter, and asserts that they match, in order. :API: public """ self.assertEqual(expected, list(itertools.islice(actual_iter, len(expected))))
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created on Tue Mar 18 21:40:28 2014 @author: Vespa """ import urllib2 import time import sys from zerocommon import * def FindAnimate(dblist,inputname): """ find animate match input from the database """ idx = sorted(dblist) bestid = [] for id in idx: if len(re.findall(inputname.lower(),dblist[id].lower()))>0: bestid.append(id) return bestid def ShowAnimateFound(name,idlist,dblist,quickmode = 0): """ show the animate found, and let user make the choice """ if len(idlist) == 0: print name," Not Found!!" return 0 if len(idlist) == 1: if name == dblist[idlist[0]] or quickmode: return 1 else: print "\nDownload :",dblist[idlist[0]],"?[Y/N]" result = raw_input("") if result.lower() == "y": return 1 else: return 0 print "\nAnimate <<%s>> Found Are Listed Below,Select The Index You Want:\n"%(name) for (i,id) in zip(range(1,len(idlist)+1),idlist): print "[%d]:%s"%(i,dblist[id]) print "[Q]:quit\n" while True: result = raw_input("Input:") if result.lower() == "q": return 0 if not result.isdigit(): print "Input a Number" elif not int(result) in range(1,len(idlist)+1): print "Out of Range!!" else: return int(result) def ShowEpsInfo(EPSInfo): """ show Eps info for chioce 4 in a row """ eps_list = map(lambda x:x[1],EPSInfo) m_str = "" for (eps,epsinfo) in zip(range(1,len(eps_list)+1),eps_list): strtemp = "[%d]:%s"%(eps,epsinfo) m_str = m_str + strtemp.ljust(15) if not (eps % 4): m_str = m_str + "\n" print m_str def GetChoice(): while True: inputrange = raw_input("Input Download Range:(For Example:2-10 or 17 or all)") if inputrange.find('-')>=0: eps = inputrange.split('-') if len(eps)==2 and eps[0].isdigit() and eps[1].isdigit(): if int(eps[1]) >= int(eps[0]): return range(int(eps[0]),int(eps[1])+1) elif inputrange.isdigit(): return [int(inputrange)] elif inputrange.lower()=="all": return range(-1,1000);#Infinite return [] def GetEpsInfo(id): """ get (url,eps_name) """ if GetRE(id,r'^\d+$') != []: url = "http://dmxz.zerodm.tv/xiazai/"+id+".html" else: url = "http://dmxz.zerodm.tv/xiazai/"+id print url headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'} req = urllib2.Request(url = url,headers = headers) content = toUTF8(urllib2.urlopen(req).read()) regexp = r"<a\s*href=['\"](.+?xunlei.+?)['\"].*?>(.*?)</a>" return GetRE(content,regexp) def GetDLURL_xunlei(pageurl): try: content = urllib2.urlopen(pageurl).read() except: return [] regexp = r"href=\"(http://gdl\.lixian\.vip\.xunlei\.com/[^\"]*)\" download=\"([^\"]*)" urllist = GetRE(content,regexp) downloadlist = [] if len(urllist) == 0: return [] for url in urllist: url = url[0].replace("download",url[1]) downloadlist.append(url) return downloadlist def AnimateNameCheck(animate_name): """ make sure the file name is valid """ if animate_name.find(r"/")>=0: animate_name = animate_name.replace(r"/","") if animate_name.find(r":")>=0: animate_name = animate_name.replace(r":","") if animate_name.find(r"?")>=0: animate_name = animate_name.replace(r"?","") return animate_name def DownLoad(EPSInfo,eps_range,animate_name): animatename = AnimateNameCheck(encodeFileName(animate_name)) downloadfile = open(animatename+".txt","w") EpsFailList = [] for i in range(len(EPSInfo)): if i+1 in eps_range: print "Getting Download URL for ",EPSInfo[i][1],":", downloadlist = GetDLURL_xunlei(EPSInfo[i][0]) if len(downloadlist) == 0: print "Get Url Fail!!" EpsFailList.append(i) else: print "Get Daze!" for url in downloadlist: downloadfile.write(url+"\n\n") if i != len(EPSInfo)-1:#LAST ONE DON'T NEED DELAY time.sleep(3) if len(EpsFailList) != 0: print "\n",animate_name,":\nItem(s) shown below get url fail,Please download it manually..." downloadfile.write(animate_name+":\nItem(s) shown below get url fail:\n") for eps in EpsFailList: print EPSInfo[eps][1]," ", downloadfile.write(EPSInfo[eps][1]+":\n"+EPSInfo[eps][0]+"\n") print "\n" downloadfile.close() def DownloadSingleAnimate(dblist,argv): idlist = FindAnimate(dblist,argv) choice = ShowAnimateFound(argv,idlist,dblist)-1 if choice != -1: print "Getting Info of ",dblist[idlist[choice]],"......." EPSInfo = GetEpsInfo(idlist[choice]) ShowEpsInfo(EPSInfo) Eps_Range = GetChoice() DownLoad(EPSInfo,Eps_Range,dblist[idlist[choice]]) def downloadFile(dblist,filename): AnimateList = {} for animateName in open(filename,'r'): if animateName.find('\n')>=0: animateName = animateName.replace('\n','') if animateName.find('\r')>=0: animateName = animateName.replace('\r','') idlist = FindAnimate(dblist,animateName) choice = ShowAnimateFound(animateName,idlist,dblist,1)-1 if choice != -1: AnimateList[idlist[choice]]= dblist[idlist[choice]] for id in AnimateList: EPSInfo = GetEpsInfo(id) Eps_Range = range(-1,1000) print 'downloading ',AnimateList[id],'...' DownLoad(EPSInfo,Eps_Range,AnimateList[id]) print "Finished" def main(argv): if len(argv) == 1: print """ Usages: ======================================================== Single Animate: python zerodm.py AnimateName Animate in file: python zerodm.py downloadlist.txt ======================================================== Learn more detail,please visit: www.kylen314.com/archives/5729""" return dblist = GetAnimateList() if dblist == {}: print "Database Read Fail!" return command = argv[1][-4:].lower() if command == ".txt": downloadFile(dblist,argv[1]) else: DownloadSingleAnimate(dblist,toUTF8(argv[1])) if __name__ == '__main__': main(sys.argv)
from __future__ import division, print_function from matplotlib import pyplot as plt import numpy as np import IPython from itertools import product from itertools import chain, combinations import ujson import pickle from amazon_utils import load_amazon_ranking_data_dpp from amazon_experiment_specifications import datasets, n_folds, dim_ranking, MODEL_PATH, RANKING_DATA_PATH import multiprocessing N_CPUS = multiprocessing.cpu_count() plot = True def compute_acc_and_rank(true_set, suggested): """ Computes the accuracy and the rank of suggested, when the correct prediction would be true_set. (Note that the function is specialized to true_set containing only a single item.) """ assert len(true_set) == 1 accuracy = 1. if suggested[0] in true_set else 0. rank = 1. + suggested.index(true_set[0]) return (accuracy, rank) if __name__ == '__main__': # setup data structures for bookkeeping results_all_flid = dict() results_all_mod = dict() results_all_dpp = dict() rank_flid = dict() rank_mod = dict() rank_dpp = dict() for dataset in datasets: results_all_flid[dataset] = [] results_all_mod[dataset] = [] results_all_dpp[dataset] = [] rank_flid[dataset] = [] rank_mod[dataset] = [] rank_dpp[dataset] = [] for dataset, fold, dim in product(datasets, range(n_folds), [dim_ranking]): print('-' * 30) print('dataset: %s (fold %d)' % (dataset, fold + 1)) print('dim=%d' % dim) result_ranking_flid_f = '{0}/{1}_flid_d_{2}_fold_{3}.json'.format(RANKING_DATA_PATH, dataset, dim, fold + 1) result_ranking_mod_f = '{0}/{1}_mod_fold_{2}.json'.format(RANKING_DATA_PATH, dataset, fold + 1) result_ranking_dpp_f = '{0}/{1}_dpp_em_fold_{2}.json'.format(RANKING_DATA_PATH, dataset, fold + 1) result_ranking_gt_f = '{0}/{1}_gt_fold_{2}.json'.format(RANKING_DATA_PATH, dataset, fold + 1) GROUND_TRUTH = result_ranking_gt_f if dataset == "all_small": METHODS = { 'flid': result_ranking_flid_f, 'mod': result_ranking_mod_f, } else: METHODS = { 'flid': result_ranking_flid_f, 'mod': result_ranking_mod_f, 'dpp': result_ranking_dpp_f, } results = dict() with open(GROUND_TRUTH) as f_gt: list_gt = ujson.load(f_gt) for method, filename in METHODS.items(): # print('processing', method) result = None avg_score = [] avg_rank = [] with open(filename, 'r') as f_sc: print(filename) list_sc = ujson.load(f_sc) print("Read %d lists." % len(list_sc)) assert len(list_gt) == len(list_sc), "Length of ground truth does not match length of predictions." for true_set, suggested in zip(list_gt, list_sc): accuracy, rank = compute_acc_and_rank(true_set, suggested) avg_score.append(accuracy) avg_rank.append(rank) if method == 'flid': results_all_flid[dataset].append(np.mean(avg_score)) rank_flid[dataset].append(np.mean([1./rank for rank in avg_rank])) elif method == 'mod': results_all_mod[dataset].append(np.mean(avg_score)) rank_mod[dataset].append(np.mean([1./rank for rank in avg_rank])) elif method == 'dpp': results_all_dpp[dataset].append(np.mean(avg_score)) rank_dpp[dataset].append(np.mean([1./rank for rank in avg_rank])) else: assert False, "Invalid method." if not plot: print("Not plotting :(") import sys sys.exit(1) import IPython IPython.embed() from matplotlib import rc rc('font', **{'family': 'serif', 'serif': ['Times']}) rc('text', usetex=True) rc('font', size=9) rc('legend', fontsize=7) for plot_type in ['acc', 'mrr']: means_flid = [] err_flid = [] means_dpp = [] err_dpp = [] means_modular = [] err_modular = [] xticks = [] xtick_labels = [] if plot_type == 'acc': # accuracy plot filename = 'amazon_acc.pdf' max_x = 25 for dataset in datasets: means_modular.append(100 * np.mean(results_all_mod[dataset])) means_flid.append(100 * np.mean(results_all_flid[dataset])) means_dpp.append(100 * np.mean(results_all_dpp[dataset])) err_modular.append(100 * np.std(results_all_mod[dataset])) err_flid.append(100 * np.std(results_all_flid[dataset])) err_dpp.append(100 * np.std(results_all_dpp[dataset])) print(means_flid) elif plot_type == 'mrr': # mean reciprocal ranking filename = 'amazon_mrr.pdf' max_x = 50 # 1700 for dataset in datasets: means_flid.append(100 * np.mean(rank_flid[dataset])) means_dpp.append(100 * np.mean(rank_dpp[dataset])) means_modular.append(100 * np.mean(rank_mod[dataset])) err_modular.append(100 * np.std(rank_mod[dataset])) err_flid.append(100 * np.std(rank_flid[dataset])) err_dpp.append(100 * np.std(rank_dpp[dataset])) print(means_flid) else: assert False plt.figure(figsize=(4 / 2.54, 10 / 2.54)) index = np.arange(len(datasets)) bar_width = 0.25 opacity = 0.5 error_config = {'ecolor': '0.3'} for k in range(3): if k == 2: rects1 = plt.barh( index, means_flid, bar_width, alpha=opacity, color='b', error_kw=error_config, label='FLID', linewidth=0) #, #xerr=err_nce) plt.errorbar(means_flid, index + 0.5 * bar_width, capsize = 1, ecolor = '#666666', fmt=None, xerr=1*np.array(err_flid)) if k == 0: rects1 = plt.barh( index + 2*bar_width, means_modular, bar_width, alpha=opacity, color='k', error_kw=error_config, label='modular', linewidth=0)#, #xerr=err_picard_dpp) plt.errorbar(means_modular, index + 2.5 * bar_width, capsize = 1, ecolor = '#666666', fmt=None, xerr=1*np.array(err_modular)) if k == 1: rects2 = plt.barh( index + bar_width, means_dpp, bar_width, alpha=opacity, color='r', error_kw=error_config, label='DPP (EM)', linewidth=0)#, #xerr=err_em_dpp) plt.errorbar(means_dpp, index + 1.5 * bar_width, capsize = 1, ecolor = '#666666', fmt=None, xerr=1*np.array(err_dpp)) plt.yticks([]) # plt.xticks(xticks, xtick_labels) if plot_type == 'mrr': xrange = list(range(0, 60, 10)) plt.xticks(xrange) # remove ticks on y-axis ax = plt.gca() ax.tick_params(axis='y', which='both', length=0) # plt.tight_layout() plt.axis([0, max_x, -bar_width, len(datasets),]) if plot_type == 'mrr': plt.legend(loc=4, prop={'size': 6}) plt.subplots_adjust(left=0.2) # plt.subplots_adjust(bottom=0.1) # plt.show() if k == 2: # plt.savefig("registries%d.pdf" % k) plt.savefig(filename, bbox_inches='tight')
#!/usr/bin/env python # # Appcelerator Titanium Module Packager # # import os, subprocess, sys, glob, string, optparse, subprocess import zipfile from datetime import date cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename)) os.chdir(cwd) required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk'] module_defaults = { 'description':'My module', 'author': 'Your Name', 'license' : 'Specify your license', 'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year), } module_license_default = "TODO: place your license here and we'll include it in the module distribution" def find_sdk(config): sdk = config['TITANIUM_SDK'] return os.path.expandvars(os.path.expanduser(sdk)) def replace_vars(config,token): idx = token.find('$(') while idx != -1: idx2 = token.find(')',idx+2) if idx2 == -1: break key = token[idx+2:idx2] if not config.has_key(key): break token = token.replace('$(%s)' % key, config[key]) idx = token.find('$(') return token def read_ti_xcconfig(): contents = open(os.path.join(cwd,'titanium.xcconfig')).read() config = {} for line in contents.splitlines(False): line = line.strip() if line[0:2]=='//': continue idx = line.find('=') if idx > 0: key = line[0:idx].strip() value = line[idx+1:].strip() config[key] = replace_vars(config,value) return config def generate_doc(config): docdir = os.path.join(cwd,'documentation') if not os.path.exists(docdir): warn("Couldn't find documentation file at: %s" % docdir) return None try: import markdown2 as markdown except ImportError: import markdown documentation = [] for file in os.listdir(docdir): if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)): continue md = open(os.path.join(docdir,file)).read() html = markdown.markdown(md) documentation.append({file:html}); return documentation def compile_js(manifest,config): js_file = os.path.join(cwd,'assets','se.hypelab.webview.js') if not os.path.exists(js_file): return from compiler import Compiler try: import json except: import simplejson as json compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs') root_asset, module_assets = compiler.compile_module() root_asset_content = """ %s return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]); """ % root_asset module_asset_content = """ %s NSNumber *index = [map objectForKey:path]; if (index == nil) { return nil; } return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]); """ % module_assets from tools import splice_code assets_router = os.path.join(cwd,'Classes','SeHyperlabWebviewModuleAssets.m') splice_code(assets_router, 'asset', root_asset_content) splice_code(assets_router, 'resolve_asset', module_asset_content) # Generate the exports after crawling all of the available JS source exports = open('metadata.json','w') json.dump({'exports':compiler.exports }, exports) exports.close() def die(msg): print msg sys.exit(1) def info(msg): print "[INFO] %s" % msg def warn(msg): print "[WARN] %s" % msg def validate_license(): c = open(os.path.join(cwd,'LICENSE')).read() if c.find(module_license_default)!=-1: warn('please update the LICENSE file with your license text before distributing') def validate_manifest(): path = os.path.join(cwd,'manifest') f = open(path) if not os.path.exists(path): die("missing %s" % path) manifest = {} for line in f.readlines(): line = line.strip() if line[0:1]=='#': continue if line.find(':') < 0: continue key,value = line.split(':') manifest[key.strip()]=value.strip() for key in required_module_keys: if not manifest.has_key(key): die("missing required manifest key '%s'" % key) if module_defaults.has_key(key): defvalue = module_defaults[key] curvalue = manifest[key] if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key) return manifest,path ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README'] ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT'] def zip_dir(zf,dir,basepath,ignoreExt=[]): if not os.path.exists(dir): return for root, dirs, files in os.walk(dir): for name in ignoreDirs: if name in dirs: dirs.remove(name) # don't visit ignored directories for file in files: if file in ignoreFiles: continue e = os.path.splitext(file) if len(e) == 2 and e[1] in ignoreExt: continue from_ = os.path.join(root, file) to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1) zf.write(from_, to_) def glob_libfiles(): files = [] for libfile in glob.glob('build/**/*.a'): if libfile.find('Release-')!=-1: files.append(libfile) return files def build_module(manifest,config): from tools import ensure_dev_path ensure_dev_path() rc = os.system("xcodebuild -sdk iphoneos -configuration Release") if rc != 0: die("xcodebuild failed") rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release") if rc != 0: die("xcodebuild failed") # build the merged library using lipo moduleid = manifest['moduleid'] libpaths = '' for libfile in glob_libfiles(): libpaths+='%s ' % libfile os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid)) def generate_apidoc(apidoc_build_path): global options if options.skip_docs: info("Skipping documentation generation.") return False else: info("Module apidoc generation can be skipped using --skip-docs") apidoc_path = os.path.join(cwd, "apidoc") if not os.path.exists(apidoc_path): warn("Skipping apidoc generation. No apidoc folder found at: %s" % apidoc_path) return False if not os.path.exists(apidoc_build_path): os.makedirs(apidoc_build_path) ti_root = string.strip(subprocess.check_output(["echo $TI_ROOT"], shell=True)) if not len(ti_root) > 0: warn("Not generating documentation from the apidoc folder. The titanium_mobile repo could not be found.") warn("Set the TI_ROOT environment variable to the parent folder where the titanium_mobile repo resides (eg.'export TI_ROOT=/Path').") return False docgen = os.path.join(ti_root, "titanium_mobile", "apidoc", "docgen.py") if not os.path.exists(docgen): warn("Not generating documentation from the apidoc folder. Couldn't find docgen.py at: %s" % docgen) return False info("Generating documentation from the apidoc folder.") rc = os.system("\"%s\" --format=jsca,modulehtml --css=styles.css -o \"%s\" -e \"%s\"" % (docgen, apidoc_build_path, apidoc_path)) if rc != 0: die("docgen failed") return True def package_module(manifest,mf,config): name = manifest['name'].lower() moduleid = manifest['moduleid'].lower() version = manifest['version'] modulezip = '%s-iphone-%s.zip' % (moduleid,version) if os.path.exists(modulezip): os.remove(modulezip) zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED) modulepath = 'modules/iphone/%s/%s' % (moduleid,version) zf.write(mf,'%s/manifest' % modulepath) libname = 'lib%s.a' % moduleid zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname)) docs = generate_doc(config) if docs!=None: for doc in docs: for file, html in doc.iteritems(): filename = string.replace(file,'.md','.html') zf.writestr('%s/documentation/%s'%(modulepath,filename),html) apidoc_build_path = os.path.join(cwd, "build", "apidoc") if generate_apidoc(apidoc_build_path): for file in os.listdir(apidoc_build_path): if file in ignoreFiles or os.path.isdir(os.path.join(apidoc_build_path, file)): continue zf.write(os.path.join(apidoc_build_path, file), '%s/documentation/apidoc/%s' % (modulepath, file)) zip_dir(zf,'assets',modulepath,['.pyc','.js']) zip_dir(zf,'example',modulepath,['.pyc']) zip_dir(zf,'platform',modulepath,['.pyc','.js']) zf.write('LICENSE','%s/LICENSE' % modulepath) zf.write('module.xcconfig','%s/module.xcconfig' % modulepath) exports_file = 'metadata.json' if os.path.exists(exports_file): zf.write(exports_file, '%s/%s' % (modulepath, exports_file)) zf.close() if __name__ == '__main__': global options parser = optparse.OptionParser() parser.add_option("-s", "--skip-docs", dest="skip_docs", action="store_true", help="Will skip building documentation in apidoc folder", default=False) (options, args) = parser.parse_args() manifest,mf = validate_manifest() validate_license() config = read_ti_xcconfig() sdk = find_sdk(config) sys.path.insert(0,os.path.join(sdk,'iphone')) sys.path.append(os.path.join(sdk, "common")) compile_js(manifest,config) build_module(manifest,config) package_module(manifest,mf,config) sys.exit(0)
""" Maybe you want to randomly search for some interesting hyperparameters for you model? http://www.jmlr.org/papers/volume13/bergstra12a/bergstra12a.pdf Logs everything to ../logs/sweeper-$RUN_STRING.log """ from __future__ import print_function import sys sys.path.append("..") # ugly hack import argparse import logging from math import ceil, log10 from Callbacks import CompilationOfCallbacks from data_generator import VisualWordDataGenerator import models from train import GroundedTranslation import keras.callbacks from numpy.random import uniform # Set up logger logging.basicConfig(level=logging.INFO, stream=sys.stdout) logger = logging.getLogger(__name__) class Sweep(object): def __init__(self, args): ''' Initialise the model and set Theano debugging model if self.args.debug is true ''' self.args = args self.use_sourcelang = args.source_vectors is not None self.use_image = not args.no_image self.data_generator = None self.prepare_datagenerator() if self.args.debug: theano.config.optimizer = 'fast_compile' theano.config.exception_verbosity = 'high' def random_sweep(self): ''' Start randomly sweeping through hyperparameter ranges. This current only supports sweeping through the L2 regularisation strength, the learning rate, and the dropout probability. ''' model = GroundedTranslation(self.args, datagen=self.data_generator) handle = open("../logs/sweeper-%s.log" % self.args.run_string, "w") handle.write("{:3} | {:10} | {:10} | {:10} | {:10} | {:10} \n".format("Run", "loss", "val_loss", "lr", "reg", "dropin")) handle.close() for sweep in xrange(self.args.num_sweeps): # randomly sample a learning rate and an L2 regularisation handle = open("../logs/sweeper-%s.log" % self.args.run_string, "a") if self.args.min_lr == ceil(self.args.min_lr): # you provided an exponent, we'll search in log-space lr = 10**uniform(self.args.min_lr, self.args.max_lr) else: # you provided a specific number lr = 10**uniform(log10(self.args.min_lr), log10(self.args.max_lr)) if self.args.min_l2 == ceil(self.args.min_l2): # you provided an exponent, we'll search in log-space l2 = 10**uniform(self.args.min_l2, self.args.max_l2) else: # you provide a specific number l2 = 10**uniform(log10(self.args.min_l2), log10(self.args.max_l2)) drop_in = uniform(self.args.min_dropin, self.args.max_dropin) # modify the arguments that will be used to create the graph model.args.lr = lr model.args.l2reg = l2 model.args.dropin = drop_in logger.info("Setting learning rate to: %.5e", lr) logger.info("Setting l2reg to: %.5e", l2) logger.info("Setting dropout to: %f", drop_in) # initialise and compile a new model losses = model.train_model() handle.write("{:3d} | {:5.5f} | {:5.5f} | {:5e} | {:5e} | {:5.4f} \n".format(sweep, losses.history['loss'][-1], losses.history['val_loss'][-1], lr, l2, drop_in)) handle.close() def prepare_datagenerator(self): ''' Initialise the data generator and its datastructures, unless a valid data generator was already passed into the GroundedTranslation.__init() function. ''' # Initialise the data generator if it has not yet been initialised if self.data_generator == None: self.data_generator = VisualWordDataGenerator(self.args, self.args.dataset) # Extract the working vocabulary from the training dataset if self.args.existing_vocab != "": self.data_generator.set_vocabulary(self.args.existing_vocab) else: self.data_generator.extract_vocabulary() self.V = self.data_generator.get_vocab_size() if __name__ == "__main__": parser = argparse.ArgumentParser(description="Randomly sweep through some\ hyperparameters for your model.") # General options parser.add_argument("--run_string", default="", type=str, help="Optional string to help you identify the run") parser.add_argument("--debug", action="store_true", help="Print debug messages to stdout?") parser.add_argument("--init_from_checkpoint", help="Initialise the model\ parameters from a pre-defined checkpoint? Useful to\ continue training a model.", default=None, type=str) parser.add_argument("--enable_val_pplx", action="store_true", default=True, help="Calculate and report smoothed validation pplx\ alongside the Keras objective function loss.\ (default=true)") parser.add_argument("--fixed_seed", action="store_true", help="Start with a fixed random seed? Useful for\ reproding experiments. (default = False)") parser.add_argument("--num_sents", default=5, type=int, help="Number of descriptions/image for training") # Define the types of input data the model will receive parser.add_argument("--dataset", default="", type=str, help="Path to the\ HDF5 dataset to use for training / val input\ (defaults to flickr8k)") parser.add_argument("--supertrain_datasets", nargs="+", help="Paths to the\ datasets to use as additional training input (defaults\ to None)") parser.add_argument("--unk", type=int, help="unknown character cut-off. Default=3", default=3) parser.add_argument("--existing_vocab", type=str, default="", help="Use an existing vocabulary model to define the\ vocabulary and UNKing in this dataset?\ (default = "", which means we will derive the\ vocabulary from the training dataset") parser.add_argument("--no_image", action="store_true", help="Do not use image data.") parser.add_argument("--source_vectors", default=None, type=str, help="Path to final hidden representations of\ encoder/source language VisualWordLSTM model.\ (default: None.) Expects a final_hidden_representation\ vector for each image in the dataset") parser.add_argument("--source_enc", type=str, default=None, help="Which type of source encoder features? Expects\ either 'mt_enc' or 'vis_enc'. Required.") parser.add_argument("--source_type", type=str, default=None, help="Source features over gold or predicted tokens?\ Expects 'gold' or 'predicted'. Required") # Model hyperparameters parser.add_argument("--batch_size", default=100, type=int) parser.add_argument("--embed_size", default=256, type=int) parser.add_argument("--hidden_size", default=256, type=int) parser.add_argument("--dropin", default=0.5, type=float, help="Prob. of dropping embedding units. Default=0.5") parser.add_argument("--gru", action="store_true", help="Use GRU instead\ of LSTM recurrent state? (default = False)") parser.add_argument("--big_batch_size", default=10000, type=int, help="Number of examples to load from disk at a time;\ 0 loads entire dataset. Default is 10000") parser.add_argument("--mrnn", action="store_true", help="Use a Mao-style multimodal recurrent neural\ network?") parser.add_argument("--peeking_source", action="store_true", help="Input the source features at every timestep?\ Default=False.") # Optimisation details parser.add_argument("--optimiser", default="adam", type=str, help="Optimiser: rmsprop, momentum, adagrad, etc.") parser.add_argument("--lr", default=0.001, type=float) parser.add_argument("--beta1", default=None, type=float) parser.add_argument("--beta2", default=None, type=float) parser.add_argument("--epsilon", default=None, type=float) parser.add_argument("--stopping_loss", default="bleu", type=str, help="minimise cross-entropy or maximise BLEU?") parser.add_argument("--l2reg", default=1e-8, type=float, help="L2 cost penalty. Default=1e-8") parser.add_argument("--clipnorm", default=-1, type=float, help="Clip gradients? (default = -1, which means\ don't clip the gradients.") parser.add_argument("--max_epochs", default=50, type=int, help="Maxmimum number of training epochs. Used with\ --predefined_epochs") parser.add_argument("--patience", type=int, default=10, help="Training\ will be terminated if validation BLEU score does not\ increase for this number of epochs") parser.add_argument("--no_early_stopping", action="store_true") # Language generation details parser.add_argument("--generation_timesteps", default=30, type=int, help="Maximum number of words to generate for unseen\ data (default=10).") # Legacy options parser.add_argument("--generate_from_N_words", type=int, default=0, help="Use N words as starting point when generating\ strings. Useful mostly for mt-only model (in other\ cases, image provides enough useful starting\ context.)") parser.add_argument("--predefined_epochs", action="store_true", help="Do you want to stop training after a specified\ number of epochs, regardless of early-stopping\ criteria? Use in conjunction with --max_epochs.") # Neccesary but unused in this module parser.add_argument("--h5_writeable", action="store_true", help="Open the H5 file for write-access? Useful for\ serialising hidden states to disk. (default = False)") parser.add_argument("--use_predicted_tokens", action="store_true", help="Generate final hidden state\ activations over oracle inputs or from predicted\ inputs? Default = False ( == Oracle)") # Random parameter sweep options parser.add_argument("--num_sweeps", type=int, default=100, help="Number of different random initialisations to\ use for the hyperparameter search. More means it will\ take you longer to finish the sweep but it will be a\ better reflection of the parameter space. (Default =\ 100).") parser.add_argument("--min_lr", type=float, default=-3) parser.add_argument("--max_lr", type=float, default=0) parser.add_argument("--min_l2", type=float, default=-3) parser.add_argument("--max_l2", type=float, default=0) parser.add_argument("--min_dropin", type=float, default=0) parser.add_argument("--max_dropin", type=float, default=0.5) arguments = parser.parse_args() if arguments.source_vectors is not None: if arguments.source_type is None or arguments.source_enc is None: parser.error("--source_type and --source_enc are required when\ using --source_vectors") if arguments.fixed_seed: import numpy as np np.random.seed(1234) import theano sweep = Sweep(arguments) sweep.random_sweep()
# encoding: utf-8 __author__ = "Nils Tobias Schmidt" __email__ = "schmidt89 at informatik.uni-marburg.de" from copy import deepcopy from multiprocessing.process import Process from androlyze.analyze import AnalyzeUtil from androlyze.analyze.parallel import STOP_SENTINEL from androlyze.log.Log import clilog, log from androlyze.model.script import ScriptUtil from androlyze.storage.exception import StorageException from androlyze.model.android.apk.FastApk import FastApk class Worker(Process): ''' Worker process that does the actual analysis ''' def __init__(self, script_list, script_hashes, min_script_needs, work_queue, storage, sm_analyzed_apks, analyzed_apks, storage_results = None): ''' Parameters ---------- script_list: list<type<AndroScript>> List of `AndroScript`s references (not instantiated class!) script_hashes : list<str>, optional (default is None) If given, set the hash for the `AndroScript`s min_script_needs : tuple<bool> See :py:method:`ScriptUtil.get_maximal_script_options`. work_queue : Queue<str> Queue with paths to apks which shall be analyzed. storage: RedundantStorage The storage to store the results. sm_analyzed_apks : Value Shared memory to add number of analyzed apks. analyzed_apks : Queue<FastAPK> Holds the analyzed APKs. storage_results : Queue<tuple<str, bool>>, optional (default is None) Storage results. First component is the id of the entry and the second a boolean indication if the result has been stored in gridfs. Raises ------ AndroScriptError If an error happened while initializing some `AndroScript` ''' super(Worker, self).__init__() # instantiate scripts self.androscripts = sorted(ScriptUtil.instantiate_scripts(script_list, script_hashes = script_hashes)) self.min_script_needs = min_script_needs # queues self.work_queue = work_queue self.analyzed_apks = analyzed_apks self.analyzed_apks.cancel_join_thread() self.work_queue.cancel_join_thread() self.storage = storage self.__sm_analyzed_apks = sm_analyzed_apks self.__storage_results = storage_results self.__storage_results.cancel_join_thread() def get_storage_results(self): return self.__storage_results def set_storage_results(self, value): self.__storage_results = value def del_storage_results(self): del self.__storage_results def get_androscripts(self): return self.__androscripts def get_min_script_needs(self): return self.__min_script_needs def get_work_queue(self): return self.__work_queue def get_storage(self): return self.__storage def set_androscripts(self, value): self.__androscripts = value def set_min_script_needs(self, value): self.__min_script_needs = value def set_work_queue(self, value): self.__work_queue = value def set_storage(self, value): self.__storage = value def del_androscripts(self): del self.__androscripts def del_min_script_needs(self): del self.__min_script_needs def del_work_queue(self): del self.__work_queue def del_storage(self): del self.__storage androscripts = property(get_androscripts, set_androscripts, del_androscripts, "list<AndroScript> : List of `AndroScript`s") min_script_needs = property(get_min_script_needs, set_min_script_needs, del_min_script_needs, " tuple<bool> : See :py:method:`ScriptUtil.get_maximal_script_options`.") work_queue = property(get_work_queue, set_work_queue, del_work_queue, "Queue<str> : Queue with paths to apks which shall be analyzed.") storage = property(get_storage, set_storage, del_storage, "RedundantStorage : The storage to store the results.") storage_results = property(get_storage_results, set_storage_results, del_storage_results, "Queue<tuple<str, bool>> : Storage results. First component is the id of the entry and the second a boolean indication if the result has been stored in gridfs.") def add_storage_result(self, storage_result): ''' Add `res` to the `storage_results`. Parameters ---------- res : tuple<str, bool> Storage results. First component is the id of the entry and the second a boolean indication if the result has been stored in gridfs. ''' if self.storage_results is not None: self.storage_results.put(storage_result) def add_analyzed_apks_sm(self, cnt_analyzed_apks): ''' Add `cnt_analyzed_apks` to the shared counter. Operation uses an lock! ''' with self.__sm_analyzed_apks.get_lock(): self.__sm_analyzed_apks.value += cnt_analyzed_apks def analyze_apk(self, eandro_apk): ''' Analyze the `eandro_apk` and return the analysis results. Parameters ---------- eandro_apk : EAndroApk The apk to analyze. Returns ------- list<FastApk, AndroScript> None If error happened. ''' if eandro_apk is not None: # analysis res = AnalyzeUtil.analyze_apk(eandro_apk, self.androscripts, self.min_script_needs, reset_scripts = True) if res is not None: fastapk, scripts = res # we need to backup the scripts cause they will be reused for a new analysis res[1] = deepcopy(scripts) clilog.debug("analyzed %s", fastapk.short_description()) return res def __store_results(self, results): ''' Store the results and increase the analyzed apks counter. Parameters ---------- results : list<FastApk, AndroScript> ''' for res in results: # unpack results fastapk, script_results = res for script in script_results: try: storage_result = AnalyzeUtil.store_script_res(self.storage, script, fastapk) self.add_storage_result(storage_result) except StorageException as e: log.warn(e) self.add_analyzed_apks_sm(1) def run(self): work_queue = self.work_queue try: for work in iter(work_queue.get, STOP_SENTINEL): try: apk_path, _apk, _ = work eandro_apk = AnalyzeUtil.open_apk(apk_path, apk=_apk) # do the analysis res = self.analyze_apk(eandro_apk) # remember yet analyzed APKs if eandro_apk: self.analyzed_apks.put(FastApk.load_from_eandroapk(eandro_apk)) # collect results if res is not None: self.__store_results([res]) else: # increment analyzed apks counter self.add_analyzed_apks_sm(1) except KeyboardInterrupt as e: raise e except Exception as e: log.exception(e) finally: # signal one task done work_queue.task_done() # signal sentinel read work_queue.task_done() work_queue.close() # be silent except KeyboardInterrupt: pass
# (C) Copyright 2014-2017 Hewlett Packard Enterprise Development LP # Copyright 2015 Cray Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from datetime import datetime from datetime import timedelta from distutils import version import json import monasca_api.monitoring.client as monitoring_client import requests from influxdb import client from influxdb.exceptions import InfluxDBClientError from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from monasca_api.common.repositories import exceptions from monasca_api.common.repositories import metrics_repository from monasca_api.monitoring.metrics import INFLUXDB_QUERY_TIME, TSDB_ERRORS MEASUREMENT_NOT_FOUND_MSG = "measurement not found" LOG = log.getLogger(__name__) STATSD_CLIENT = monitoring_client.get_client() STATSD_TIMER = STATSD_CLIENT.get_timer() class MetricsRepository(metrics_repository.AbstractMetricsRepository): def __init__(self): self._statsd_tsdb_error_count = STATSD_CLIENT.get_counter(TSDB_ERRORS) try: self.conf = cfg.CONF self.influxdb_client = client.InfluxDBClient( self.conf.influxdb.ip_address, self.conf.influxdb.port, self.conf.influxdb.user, self.conf.influxdb.password, self.conf.influxdb.database_name) self._init_serie_builders() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) def _init_serie_builders(self): '''Initializes functions for serie builders that are specific to different versions of InfluxDB. ''' try: influxdb_version, cluster_version = self._get_influxdb_version() LOG.info('Found InfluxDB version %s, cluster: %s', influxdb_version, cluster_version) if influxdb_version < version.StrictVersion('0.11.0'): self._init_serie_builders_to_v0_11_0() else: self._init_serie_builders_from_v0_11_0() except Exception as ex: LOG.exception(ex) # Initialize the serie builders to v0_11_0. Not sure when SHOW DIAGNOSTICS added # support for a version string so to address backward compatibility initialize # InfluxDB serie builders < v0.11.0 self._init_serie_builders_to_v0_11_0() def _init_serie_builders_to_v0_11_0(self): '''Initialize function for InfluxDB serie builders < v0.11.0 ''' LOG.info('Initialize InfluxDB serie builders < v0.11.0') self._build_serie_dimension_values = self._build_serie_dimension_values_to_v0_11_0 self._build_serie_metric_list = self._build_serie_metric_list_to_v0_11_0 def _init_serie_builders_from_v0_11_0(self): '''Initialize function for InfluxDB serie builders >= v0.11.0. In InfluxDB v0.11.0 the SHOW SERIES output changed. See, https://github.com/influxdata/influxdb/blob/master/CHANGELOG.md#v0110-2016-03-22 ''' LOG.info('Initialize InfluxDB serie builders >= v0.11.0') self._build_serie_dimension_values = self._build_serie_dimension_values_from_v0_11_0 self._build_serie_metric_list = self._build_serie_metric_list_from_v0_11_0 def _get_influxdb_version(self): '''Determine version from the response to /ping ''' resp = requests.get("http://"+self.conf.influxdb.ip_address+":"+str(self.conf.influxdb.port)+"/ping") header_ver = resp.headers.get('x-influxdb-version', '0.0.0') g = re.match("([0-9.]*)-c([0-9.]*)", header_ver) if g: return g.group(1), g.group(2) else: return header_ver, None def _build_show_series_query(self, dimensions, name, tenant_id, region, start_timestamp=None, end_timestamp=None): where_clause = self._build_where_clause(dimensions, name, tenant_id, region, start_timestamp, end_timestamp) query = 'show series ' + where_clause return query def _build_show_measurements_query(self, dimensions, name, tenant_id, region): where_clause = self._build_where_clause(dimensions, name, tenant_id, region) query = 'show measurements ' + where_clause return query def _build_show_tag_values_query(self, metric_name, dimension_name, tenant_id, region): from_with_clause = '' if metric_name: from_with_clause += ' from "{}"'.format(metric_name) if dimension_name: from_with_clause += ' with key = {}'.format(dimension_name) where_clause = self._build_where_clause(None, None, tenant_id, region) query = 'show tag values' + from_with_clause + where_clause return query def _build_show_tag_keys_query(self, metric_name, tenant_id, region): from_with_clause = '' if metric_name: from_with_clause += ' from "{}"'.format(metric_name) where_clause = self._build_where_clause(None, None, tenant_id, region) query = 'show tag keys' + from_with_clause + where_clause return query def _build_select_measurement_query(self, dimensions, name, tenant_id, region, start_timestamp, end_timestamp, offset, group_by, limit): from_clause = self._build_from_clause(dimensions, name, tenant_id, region, start_timestamp, end_timestamp) offset_clause = self._build_offset_clause(offset) group_by_clause = self._build_group_by_clause(group_by) limit_clause = self._build_limit_clause(limit) query = 'select value, value_meta '\ + from_clause + offset_clause\ + group_by_clause + limit_clause return query def _build_statistics_query(self, dimensions, name, tenant_id, region, start_timestamp, end_timestamp, statistics, period, offset, group_by, limit): from_clause = self._build_from_clause(dimensions, name, tenant_id, region, start_timestamp, end_timestamp) if period is None: period = str(300) if offset: if '_' in offset: tmp = datetime.strptime(str(offset).split('_')[1], "%Y-%m-%dT%H:%M:%SZ") tmp = tmp + timedelta(seconds=int(period)) # Leave out any ID as influx doesn't understand it offset = tmp.isoformat() else: tmp = datetime.strptime(offset, "%Y-%m-%dT%H:%M:%SZ") offset = tmp + timedelta(seconds=int(period)) offset_clause = (" and time > '{}'".format(offset)) from_clause += offset_clause statistics = [statistic.replace('avg', 'mean') for statistic in statistics] statistics = [statistic + '(value)' for statistic in statistics] statistic_string = ",".join(statistics) query = 'select ' + statistic_string + ' ' + from_clause query += self._build_group_by_clause(group_by, period) limit_clause = self._build_limit_clause(limit) query += limit_clause return query def _build_where_clause(self, dimensions, name, tenant_id, region, start_timestamp=None, end_timestamp=None): where_clause = '' # name - optional if name: # replace ' with \' to make query parsable clean_name = name.replace("'", "\\'") where_clause += ' from "{}" '.format(clean_name.encode('utf8')) # tenant id where_clause += " where _tenant_id = '{}' ".format(tenant_id.encode( "utf8")) # region where_clause += " and _region = '{}' ".format(region.encode('utf8')) # dimensions - optional if dimensions: for dimension_name, dimension_value in iter( sorted(dimensions.iteritems())): # replace ' with \' to make query parsable clean_dimension_name = dimension_name.replace("\'", "\\'") if dimension_value == "": where_clause += " and \"{}\" =~ /.+/ ".format( clean_dimension_name) elif '|' in dimension_value: # replace ' with \' to make query parsable clean_dimension_value = dimension_value.replace("\'", "\\'") where_clause += " and \"{}\" =~ /^{}$/ ".format( clean_dimension_name.encode('utf8'), clean_dimension_value.encode('utf8')) else: # replace ' with \' to make query parsable clean_dimension_value = dimension_value.replace("\'", "\\'") where_clause += " and \"{}\" = '{}' ".format( clean_dimension_name.encode('utf8'), clean_dimension_value.encode('utf8')) if start_timestamp is not None: where_clause += " and time > " + str(int(start_timestamp * 1000000)) + "u" if end_timestamp is not None: where_clause += " and time < " + str(int(end_timestamp * 1000000)) + "u" return where_clause def _build_from_clause(self, dimensions, name, tenant_id, region, start_timestamp=None, end_timestamp=None): from_clause = self._build_where_clause(dimensions, name, tenant_id, region, start_timestamp, end_timestamp) return from_clause def list_metrics(self, tenant_id, region, name, dimensions, offset, limit, start_timestamp=None, end_timestamp=None): try: query = self._build_show_series_query(dimensions, name, tenant_id, region) query += " limit {}".format(limit + 1) if offset: query += ' offset {}'.format(int(offset) + 1) result = self._query_influxdb(query) json_metric_list = self._build_serie_metric_list(result, tenant_id, region, start_timestamp, end_timestamp, offset) return json_metric_list except InfluxDBClientError as ex: if ex.message.startswith(MEASUREMENT_NOT_FOUND_MSG): return [] else: LOG.exception(ex) raise exceptions.RepositoryException(ex) except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) def _build_serie_dimension_values_to_v0_11_0(self, series_names, dimension_name): dim_value_set = set() json_dim_value_list = [] if not series_names: return json_dim_value_list if 'series' not in series_names.raw: return json_dim_value_list if not dimension_name: return json_dim_value_list for series in series_names.raw['series']: if 'columns' not in series: continue if u'values' not in series: continue for value in series[u'values']: dim_value_set.add(value[0]) for value in dim_value_set: json_dim_value_list.append({u'dimension_value': value}) json_dim_value_list = sorted(json_dim_value_list) return json_dim_value_list def _build_serie_dimension_values_from_v0_11_0(self, series_names, dimension_name): '''In InfluxDB v0.11.0 the SHOW TAG VALUES output changed. See, https://github.com/influxdata/influxdb/blob/master/CHANGELOG.md#v0110-2016-03-22 ''' dim_value_set = set() json_dim_value_list = [] if not series_names: return json_dim_value_list if 'series' not in series_names.raw: return json_dim_value_list if not dimension_name: return json_dim_value_list for series in series_names.raw['series']: if 'columns' not in series: continue columns = series['columns'] if 'key' not in columns: continue if u'values' not in series: continue for value in series[u'values']: if len(value) < 2: continue for tag in value[1:]: dim_value_set.add(tag) for value in dim_value_set: json_dim_value_list.append({u'dimension_value': value}) json_dim_value_list = sorted(json_dim_value_list) return json_dim_value_list def _build_serie_dimension_names(self, series_names): dim_name_set = set() json_dim_name_list = [] if not series_names: return json_dim_name_list if 'series' not in series_names.raw: return json_dim_name_list for series in series_names.raw['series']: if 'columns' not in series: continue if u'values' not in series: continue for value in series[u'values']: tag_key = value[0] if tag_key.startswith(u'_'): continue dim_name_set.add(tag_key) for name in dim_name_set: json_dim_name_list.append({u'dimension_name': name}) json_dim_name_list = sorted(json_dim_name_list) return json_dim_name_list def _build_serie_metric_list_to_v0_11_0(self, series_names, tenant_id, region, start_timestamp, end_timestamp, offset): json_metric_list = [] if not series_names: return json_metric_list if 'series' not in series_names.raw: return json_metric_list metric_id = 0 if offset: metric_id = int(offset) + 1 for series in series_names.raw['series']: for tag_values in series[u'values']: dimensions = { name: value for name, value in zip(series[u'columns'], tag_values) if value and not name.startswith(u'_') } if self._has_measurements(tenant_id, region, series[u'name'], dimensions, start_timestamp, end_timestamp): metric = {u'id': str(metric_id), u'name': series[u'name'], u'dimensions': dimensions} metric_id += 1 json_metric_list.append(metric) return json_metric_list def _build_serie_metric_list_from_v0_11_0(self, series_names, tenant_id, region, start_timestamp, end_timestamp, offset): '''In InfluxDB v0.11.0 the SHOW SERIES output changed. See, https://github.com/influxdata/influxdb/blob/master/CHANGELOG.md#v0110-2016-03-22 ''' json_metric_list = [] if not series_names: return json_metric_list if 'series' not in series_names.raw: return json_metric_list metric_id = 0 if offset: metric_id = int(offset) + 1 for series in series_names.raw['series']: if 'columns' not in series: continue columns = series['columns'] if 'key' not in columns: continue key_index = columns.index('key') if u'values' not in series: continue for value in series[u'values']: split_value = value[key_index].split(',') if len(split_value) < 2: continue serie_name = split_value[0] dimensions = {} for tag in split_value[1:]: tag_key_value = tag.split('=') if len(tag_key_value) < 2: continue tag_key = tag_key_value[0] tag_value = tag_key_value[1] if tag_key.startswith(u'_'): continue dimensions[tag_key] = tag_value if not self._has_measurements(tenant_id, region, serie_name, dimensions, start_timestamp, end_timestamp): continue metric = {u'id': str(metric_id), u'name': serie_name, u'dimensions': dimensions} metric_id += 1 json_metric_list.append(metric) return json_metric_list def _build_measurement_name_list(self, measurement_names): """read measurement names from InfluxDB response Extract the measurement names (InfluxDB terminology) from the SHOW MEASURMENTS result to yield metric names :param measurement_names: result from SHOW MEASUREMENTS call (json-dict) :return: list of metric-names (Monasca terminology) """ json_metric_list = [] if not measurement_names: return json_metric_list for name in measurement_names.raw.get(u'series', [{}])[0].get(u'values', []): entry = {u'name': name[0]} json_metric_list.append(entry) json_metric_list = sorted(json_metric_list) return json_metric_list def _get_dimensions(self, tenant_id, region, name, dimensions): metrics_list = self.list_metrics(tenant_id, region, name, dimensions, None, 2) if len(metrics_list) > 1: raise exceptions.MultipleMetricsException(self.MULTIPLE_METRICS_MESSAGE) if not metrics_list: return {} return metrics_list[0]['dimensions'] def measurement_list(self, tenant_id, region, name, dimensions, start_timestamp, end_timestamp, offset, limit, merge_metrics_flag, group_by): json_measurement_list = [] try: query = self._build_select_measurement_query(dimensions, name, tenant_id, region, start_timestamp, end_timestamp, offset, group_by, limit) if not group_by and not merge_metrics_flag: dimensions = self._get_dimensions(tenant_id, region, name, dimensions) query += " slimit 1" result = self._query_influxdb(query) if not result: return json_measurement_list offset_id = 0 if offset is not None: offset_tuple = offset.split('_') offset_id = int(offset_tuple[0]) if len(offset_tuple) > 1 else 0 index = offset_id for serie in result.raw['series']: if 'values' in serie: measurements_list = [] for point in serie['values']: value_meta = json.loads(point[2]) if point[2] else {} timestamp = point[0][:19] + '.' + point[0][20:-1].ljust(3, '0') + 'Z' measurements_list.append([timestamp, point[1], value_meta]) measurement = {u'name': serie['name'], u'id': str(index), u'columns': [u'timestamp', u'value', u'value_meta'], u'measurements': measurements_list} if not group_by: measurement[u'dimensions'] = dimensions else: measurement[u'dimensions'] = {key: value for key, value in serie['tags'].iteritems() if not key.startswith('_')} json_measurement_list.append(measurement) index += 1 return json_measurement_list except exceptions.RepositoryException as ex: if (isinstance(ex.message, InfluxDBClientError) and ex.message.message.startswith(MEASUREMENT_NOT_FOUND_MSG)): return json_measurement_list else: LOG.exception(ex) raise ex except InfluxDBClientError as ex: if ex.message.startswith(MEASUREMENT_NOT_FOUND_MSG): return json_measurement_list else: LOG.exception(ex) raise exceptions.RepositoryException(ex) except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) def list_metric_names(self, tenant_id, region, dimensions): try: query = self._build_show_measurements_query(dimensions, None, tenant_id, region) result = self._query_influxdb(query) json_name_list = self._build_measurement_name_list(result) return json_name_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) def metrics_statistics(self, tenant_id, region, name, dimensions, start_timestamp, end_timestamp, statistics, period, offset, limit, merge_metrics_flag, group_by): json_statistics_list = [] try: query = self._build_statistics_query(dimensions, name, tenant_id, region, start_timestamp, end_timestamp, statistics, period, offset, group_by, limit) if not group_by and not merge_metrics_flag: dimensions = self._get_dimensions(tenant_id, region, name, dimensions) query += " slimit 1" result = self._query_influxdb(query) if not result: return json_statistics_list offset_id = 0 if offset is not None: offset_tuple = offset.split('_') # If offset_id is given, add 1 since we want the next one if len(offset_tuple) > 1: offset_id = int(offset_tuple[0]) + 1 index = offset_id for serie in result.raw['series']: if 'values' in serie: columns = [column.replace('time', 'timestamp').replace('mean', 'avg') for column in serie['columns']] stats_list = [] for stats in serie['values']: # remove sub-second timestamp values (period can never be less than 1) timestamp = stats[0] if '.' in timestamp: stats[0] = str(timestamp)[:19] + 'Z' for stat in stats[1:]: # Only add row if there is a valid value in the row if stat is not None: stats_list.append(stats) break statistic = {u'name': serie['name'], u'id': str(index), u'columns': columns, u'statistics': stats_list} if not group_by: statistic[u'dimensions'] = dimensions else: statistic[u'dimensions'] = {key: value for key, value in serie['tags'].iteritems() if not key.startswith('_')} json_statistics_list.append(statistic) index += 1 return json_statistics_list except exceptions.RepositoryException as ex: if (isinstance(ex.message, InfluxDBClientError) and ex.message.message.startswith(MEASUREMENT_NOT_FOUND_MSG)): return json_statistics_list else: LOG.exception(ex) raise ex except InfluxDBClientError as ex: if ex.message.startswith(MEASUREMENT_NOT_FOUND_MSG): return json_statistics_list else: LOG.exception(ex) if ex.code == 400: LOG.error("Invalid query: %s", query) raise exceptions.RepositoryException(ex) except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) def _build_offset_clause(self, offset): if offset: offset_clause = " and time > '{}'".format(offset) else: offset_clause = "" return offset_clause def _build_group_by_clause(self, group_by, period=None): if group_by is not None and not isinstance(group_by, list): group_by = str(group_by).split(',') if group_by or period: items = [] if group_by: items.extend(group_by) if period: items.append("time(" + str(period) + "s)") clause = " group by " + ','.join(items) else: clause = "" return clause def _build_limit_clause(self, limit): return " limit {} ".format(str(limit + 1)) def _has_measurements(self, tenant_id, region, name, dimensions, start_timestamp, end_timestamp): has_measurements = True # # No need for the additional query if we don't have a start timestamp. # if not start_timestamp: return True # # We set limit to 1 for the measurement_list call, as we are only # interested in knowing if there is at least one measurement, and # not ask too much of influxdb. # measurements = self.measurement_list(tenant_id, region, name, dimensions, start_timestamp, end_timestamp, None, 1, False, None) if len(measurements) == 0: has_measurements = False return has_measurements def alarm_history(self, tenant_id, alarm_id_list, offset, limit, start_timestamp=None, end_timestamp=None): try: json_alarm_history_list = [] if not alarm_id_list: return json_alarm_history_list for alarm_id in alarm_id_list: if '\'' in alarm_id or ';' in alarm_id: raise Exception( "Input from user contains single quote ['] or " "semi-colon [;] characters[ {} ]".format(alarm_id)) query = """ select alarm_id, metrics, new_state, old_state, reason, reason_data, sub_alarms, tenant_id from alarm_state_history """ where_clause = ( " where tenant_id = '{}' ".format(tenant_id.encode('utf8'))) alarm_id_where_clause_list = ( [" alarm_id = '{}' ".format(id.encode('utf8')) for id in alarm_id_list]) alarm_id_where_clause = " or ".join(alarm_id_where_clause_list) where_clause += ' and (' + alarm_id_where_clause + ')' time_clause = '' if start_timestamp: time_clause += " and time >= " + str(int(start_timestamp * 1000000)) + "u " if end_timestamp: time_clause += " and time <= " + str(int(end_timestamp * 1000000)) + "u " offset_clause = self._build_offset_clause(offset) order_by_clause = " order by time desc" limit_clause = self._build_limit_clause(limit) query += where_clause + time_clause + offset_clause + order_by_clause + limit_clause result = self._query_influxdb(query) if not result: return json_alarm_history_list if 'values' in result.raw['series'][0]: for point in result.raw['series'][0]['values']: alarm_point = {u'timestamp': point[0], u'alarm_id': point[1], u'metrics': json.loads(point[2]), u'new_state': point[3], u'old_state': point[4], u'reason': point[5], u'reason_data': point[6], u'sub_alarms': json.loads(point[7]), u'id': str(self._get_millis_from_timestamp( timeutils.parse_isotime(point[0])))} # java api formats these during json serialization if alarm_point[u'sub_alarms']: for sub_alarm in alarm_point[u'sub_alarms']: sub_expr = sub_alarm['sub_alarm_expression'] metric_def = sub_expr['metric_definition'] sub_expr['metric_name'] = metric_def['name'] sub_expr['dimensions'] = metric_def['dimensions'] del sub_expr['metric_definition'] json_alarm_history_list.append(alarm_point) return json_alarm_history_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) def _get_millis_from_timestamp(self, dt): dt = dt.replace(tzinfo=None) return int((dt - datetime(1970, 1, 1)).total_seconds() * 1000) def list_dimension_values(self, tenant_id, region, metric_name, dimension_name): try: query = self._build_show_tag_values_query(metric_name, dimension_name, tenant_id, region) result = self._query_influxdb(query) json_dim_name_list = self._build_serie_dimension_values( result, dimension_name) return json_dim_name_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) def list_dimension_names(self, tenant_id, region, metric_name): try: query = self._build_show_tag_keys_query(metric_name, tenant_id, region) result = self._query_influxdb(query) json_dim_name_list = self._build_serie_dimension_names(result) return json_dim_name_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) @STATSD_TIMER.timed(INFLUXDB_QUERY_TIME, sample_rate=0.01) def _query_influxdb(self, query): try: result = self.influxdb_client.query(query) return result except Exception as ex: self._statsd_tsdb_error_count.increment(1) raise ex
# -*- coding: utf-8 -*- from pyparsing import (Literal, Word, MatchFirst, CaselessKeyword, Regex, QuotedString as QString, Suppress, Optional, Group, FollowedBy, Combine, operatorPrecedence, opAssoc, ParseException, ParserElement, alphanums, And, OneOrMore) from ..util import load_module class PrimitiveFactoryError(Exception): pass class PrimitiveFactory(object): @staticmethod def build_from_conf(conf, parser): _cls = load_module(conf['class']) if 'parse_method' in conf: _kwargs = {'parse_method': getattr(parser, conf['parse_method'])} elif 'range_parse_method' in conf: _kwargs = { 'range_parse_method': getattr(parser, conf['range_parse_method']), 'item_parse_method': getattr(parser, conf['item_parse_method']) } else: raise PrimitiveFactoryError("Invalid Primitive definition, parsing method invalid or not defined.") return _cls(precedence=conf['precedence'], **_kwargs) def concatenate(elems, operator='OR', class_to_embed_elem=None): """ Receives a list of elements to be concatenated, to generate a type MatchFirst from pyParsing. Order is important given that it matches with the one found first :param elems: list of elements to concatenate :param operator: type of operator to concatenate with :param class_to_embed_elem: class to use to initialize each element in the list :return: MatchFirst object representing the optional matching with any of the elements in the list """ combined_elems = class_to_embed_elem(elems[0]) if class_to_embed_elem else elems[0] for e in elems[1:]: elem_to_concat = class_to_embed_elem(e) if class_to_embed_elem else e if operator == 'OR': combined_elems = combined_elems | elem_to_concat elif operator == 'AND': combined_elems = combined_elems & elem_to_concat elif operator == 'LONGEST_OR': # OR that matches the longest expression combined_elems = combined_elems ^ elem_to_concat return combined_elems class BaseType(object): name = 'base' def __init__(self, precedence): self.precedence = precedence class BaseWord(Word, BaseType): name = 'base_word' def __init__(self, chars, precendece): Word.__init__(self, chars) BaseType.__init__(self, precendece) class SimpleWord(BaseWord): name = 'simple_word' def __init__(self, parse_method=None, extra_chars=None, precedence=0): extra_chars = extra_chars if extra_chars else '' extra_chars += '_.-' super(SimpleWord, self).__init__(alphanums+extra_chars, precedence) self.addParseAction(lambda t: t[0].replace('\\\\', chr(127)).replace('\\', '').replace(chr(127), '\\')) if parse_method: self.addParseAction(parse_method) class FieldName(BaseWord): name = 'field_name' def __init__(self, parse_method=None, extra_chars=None, precedence=0): extra_chars = extra_chars if extra_chars else '' extra_chars += '_-' super(FieldName, self).__init__(alphanums+extra_chars, precedence) if parse_method: self.addParseAction(parse_method) class PartialString(SimpleWord): name = 'partial_string' def __init__(self, parse_method=None, precedence=3): super(PartialString, self).__init__(parse_method, precedence=precedence) class QuotedString(MatchFirst, BaseType): name = 'quoted_string' def __init__(self, parse_method=None, precedence=2): MatchFirst.__init__(self, [QString('"'), QString("'")]) BaseType.__init__(self, precedence) if parse_method: self.addParseAction(parse_method) class Phrase(OneOrMore, BaseType): name = 'phrase' def __init__(self, parse_method=None, precedence=2): OneOrMore.__init__(self, SimpleWord()+Optional(OneOrMore(Regex(r'[\b|\s]')))) BaseType.__init__(self, precedence) if parse_method: self.addParseAction(parse_method) class Any(MatchFirst, BaseType): name = 'any' def __init__(self, elems, precedence=4): MatchFirst.__init__(self, concatenate(elems)) BaseType.__init__(self, precedence) class Integer(Regex, BaseType): name = 'integer' def __init__(self, parse_method=None, precedence=6): Regex.__init__(self, r"\d+") BaseType.__init__(self, precedence) if parse_method: self.addParseAction(parse_method) class IntegerComparison(And, BaseType): name = 'integer_comparison' def __init__(self, parse_method=None, precedence=9): gt_lt_e = Literal('<') ^ Literal("<=") ^ Literal('>') ^ Literal(">=") And.__init__(self, [gt_lt_e + Integer()]) BaseType.__init__(self, precedence) if parse_method: self.addParseAction(parse_method) class IntegerRange(And, BaseType): name = 'integer_range' def __init__(self, range_parse_method=None, item_parse_method=None, range_symbol='..', precedence=10): And.__init__(self, [Integer(item_parse_method) + Literal(range_symbol) + Integer(item_parse_method)]) BaseType.__init__(self, precedence) if range_parse_method: self.addParseAction(range_parse_method) class Field(And, BaseType): name = 'field' def __init__(self, parse_method=None, field_separator=':', precedence=11): And.__init__(self, [FieldName() + Literal(field_separator)]) BaseType.__init__(self, precedence) if parse_method: self.addParseAction(parse_method) class MultiField(OneOrMore, BaseType): name = 'multi_field' def __init__(self, parse_method=None, field_separator=':', precedence=12): OneOrMore.__init__(self, Field(field_separator=field_separator)) BaseType.__init__(self, precedence) if parse_method: self.addParseAction(parse_method) class StringProximity(And, BaseType): name = 'string_proximity' def __init__(self, parse_method=None, precedence=11): And.__init__(self, [QuotedString() + Literal('~') + Integer()]) BaseType.__init__(self, precedence) if parse_method: self.addParseAction(parse_method)
# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.conf import paths NOVA_NET_API = 'nova.network.api.API' network_opts = [ cfg.StrOpt("flat_network_bridge", help=""" This option determines the bridge used for simple network interfaces when no bridge is specified in the VM creation request. Please note that this option is only used when using nova-network instead of Neutron in your deployment. Possible values: Any string representing a valid network bridge, such as 'br100' * Services that use this: ``nova-network`` * Related options: ``use_neutron`` """), cfg.StrOpt("flat_network_dns", default="8.8.4.4", help=""" This is the address of the DNS server for a simple network. If this option is not specified, the default of '8.8.4.4' is used. Please note that this option is only used when using nova-network instead of Neutron in your deployment. Possible values: Any valid IP address. * Services that use this: ``nova-network`` * Related options: ``use_neutron`` """), cfg.BoolOpt("flat_injected", default=False, help=""" This option determines whether the network setup information is injected into the VM before it is booted. Please note that this option is only used when using nova-network instead of Neutron in your deployment. Possible values: True, False (default) * Services that use this: ``nova-network`` * Related options: ``use_neutron`` """), cfg.StrOpt("flat_interface", help=""" This option is the name of the virtual interface of the VM on which the bridge will be built. Please note that this option is only used when using nova-network instead of Neutron in your deployment. Possible values: Any valid virtual interface name, such as 'eth0' * Services that use this: ``nova-network`` * Related options: ``use_neutron`` """), cfg.IntOpt("vlan_start", default=100, min=1, max=4094, help=""" This is the VLAN number used for private networks. Note that the when creating the networks, if the specified number has already been assigned, nova-network will increment this number until it finds an available VLAN. Please note that this option is only used when using nova-network instead of Neutron in your deployment. It also will be ignored if the configuration option for `network_manager` is not set to the default of 'nova.network.manager.VlanManager'. Possible values: Any integer between 1 and 4094. Values outside of that range will raise a ValueError exception. Default = 100. * Services that use this: ``nova-network`` * Related options: ``network_manager``, ``use_neutron`` """), cfg.StrOpt("vlan_interface", help=""" This option is the name of the virtual interface of the VM on which the VLAN bridge will be built. Please note that this option is only used when using nova-network instead of Neutron in your deployment. It also will be ignored if the configuration option for `network_manager` is not set to the default of 'nova.network.manager.VlanManager'. Possible values: Any valid virtual interface name, such as 'eth0' * Services that use this: ``nova-network`` * Related options: ``use_neutron`` """), cfg.IntOpt("num_networks", default=1, help="Number of networks to support"), cfg.StrOpt("vpn_ip", default="$my_ip", help="Public IP for the cloudpipe VPN servers"), cfg.IntOpt("vpn_start", default=1000, help="First Vpn port for private networks"), cfg.IntOpt("network_size", default=256, help="Number of addresses in each private subnet"), cfg.StrOpt("fixed_range_v6", default="fd00::/48", help="Fixed IPv6 address block"), cfg.StrOpt("gateway", help="Default IPv4 gateway"), cfg.StrOpt("gateway_v6", help="Default IPv6 gateway"), cfg.IntOpt("cnt_vpn_clients", default=0, help="Number of addresses reserved for vpn clients"), cfg.IntOpt("fixed_ip_disassociate_timeout", default=600, help="Seconds after which a deallocated IP is disassociated"), cfg.IntOpt("create_unique_mac_address_attempts", default=5, help="Number of attempts to create unique mac address"), cfg.BoolOpt("fake_call", default=False, help="If True, skip using the queue and make local calls"), cfg.BoolOpt("teardown_unused_network_gateway", default=False, help="If True, unused gateway devices (VLAN and bridge) are " "deleted in VLAN network mode with multi hosted " "networks"), cfg.BoolOpt("force_dhcp_release", default=True, help="If True, send a dhcp release on instance termination"), cfg.BoolOpt("update_dns_entries", default=False, help="If True, when a DNS entry must be updated, it sends a " "fanout cast to all network hosts to update their DNS " "entries in multi host mode"), cfg.IntOpt("dns_update_periodic_interval", default=-1, help="Number of seconds to wait between runs of updates to DNS " "entries."), cfg.StrOpt("dhcp_domain", default="novalocal", help="Domain to use for building the hostnames"), cfg.StrOpt("l3_lib", default="nova.network.l3.LinuxNetL3", help="Indicates underlying L3 management library"), cfg.BoolOpt("share_dhcp_address", default=False, deprecated_for_removal=True, help=""" DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK. If True in multi_host mode, all compute hosts share the same dhcp address. The same IP address used for DHCP will be added on each nova-network node which is only visible to the VMs on the same host. The use of this configuration has been deprecated and may be removed in any release after Mitaka. It is recommended that instead of relying on this option, an explicit value should be passed to 'create_networks()' as a keyword argument with the name 'share_address'. * Services that use this: ``nova-network`` * Related options: None """), # NOTE(mriedem): Remove network_device_mtu in Newton. cfg.IntOpt("network_device_mtu", deprecated_for_removal=True, help=""" DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK. MTU (Maximum Transmission Unit) setting for a network interface. The use of this configuration has been deprecated and may be removed in any release after Mitaka. It is recommended that instead of relying on this option, an explicit value should be passed to 'create_networks()' as a keyword argument with the name 'mtu'. * Services that use this: ``nova-network`` * Related options: None """), cfg.StrOpt('network_api_class', default=NOVA_NET_API, help='DEPRECATED: The full class name of the ' 'network API class to use. ``use_neutron`` ' 'should be used instead.', deprecated_for_removal=True), cfg.BoolOpt('use_neutron', default=False, help="Whether to use Neutron or Nova Network as the back end " "for networking. Defaults to False (indicating Nova " "network).Set to True to use neutron.") ] linux_net_opts = [ cfg.MultiStrOpt('dhcpbridge_flagfile', default=['/etc/nova/nova-dhcpbridge.conf'], help=""" This option is a list of full paths to one or more configuration files for dhcpbridge. In most cases the default path of '/etc/nova/nova-dhcpbridge.conf' should be sufficient, but if you have special needs for configuring dhcpbridge, you can change or add to this list. * Possible values A list of strings, where each string is the full path to a dhcpbridge configuration file. * Services that use this: ``nova-network`` * Related options: None """), cfg.StrOpt('networks_path', default=paths.state_path_def('networks'), help=""" The location where the network configuration files will be kept. The default is the 'networks' directory off of the location where nova's Python module is installed. * Possible values A string containing the full path to the desired configuration directory * Services that use this: ``nova-network`` * Related options: None """), cfg.StrOpt('public_interface', default='eth0', help=""" This is the name of the network interface for public IP addresses. The default is 'eth0'. * Possible values: Any string representing a network interface name * Services that use this: ``nova-network`` * Related options: None """), cfg.StrOpt('dhcpbridge', default=paths.bindir_def('nova-dhcpbridge'), help=""" The location of the binary nova-dhcpbridge. By default it is the binary named 'nova-dhcpbridge' that is installed with all the other nova binaries. * Possible values: Any string representing the full path to the binary for dhcpbridge * Services that use this: ``nova-network`` * Related options: None """), cfg.StrOpt('routing_source_ip', default='$my_ip', help=""" This is the public IP address of the network host. It is used when creating a SNAT rule. * Possible values: Any valid IP address * Services that use this: ``nova-network`` * Related options: force_snat_range """), cfg.IntOpt('dhcp_lease_time', default=86400, help=""" The lifetime of a DHCP lease, in seconds. The default is 86400 (one day). Possible values: Any positive integer value. * Services that use this: ``nova-network`` * Related options: None """), cfg.MultiStrOpt("dns_server", default=[], help=""" Despite the singular form of the name of this option, it is actually a list of zero or more server addresses that dnsmasq will use for DNS nameservers. If this is not empty, dnsmasq will not read /etc/resolv.conf, but will only use the servers specified in this option. If the option use_network_dns_servers is True, the dns1 and dns2 servers from the network will be appended to this list, and will be used as DNS servers, too. Possible values: A list of strings, where each string is etiher an IP address or a FQDN. * Services that use this: ``nova-network`` * Related options: use_network_dns_servers """), cfg.BoolOpt("use_network_dns_servers", default=False, help=""" When this option is set to True, the dns1 and dns2 servers for the network specified by the user on boot will be used for DNS, as well as any specified in the `dns_server` option. Possible values: True, False (default) * Services that use this: ``nova-network`` * Related options: dns_server """), cfg.ListOpt("dmz_cidr", default=[], help=""" This option is a list of zero or more IP address ranges in your network's DMZ that should be accepted. Possible values: A list of strings, each of which should be a valid CIDR. * Services that use this: ``nova-network`` * Related options: None """), cfg.MultiStrOpt("force_snat_range", default=[], help=""" This is a list of zero or more IP ranges that traffic from the `routing_source_ip` will be SNATted to. If the list is empty, then no SNAT rules are created. Possible values: A list of strings, each of which should be a valid CIDR. * Services that use this: ``nova-network`` * Related options: routing_source_ip """), cfg.StrOpt("dnsmasq_config_file", default="", help=""" The path to the custom dnsmasq configuration file, if any. Possible values: The full path to the configuration file, or an empty string if there is no custom dnsmasq configuration file. * Services that use this: ``nova-network`` * Related options: None """), cfg.StrOpt("linuxnet_interface_driver", default="nova.network.linux_net.LinuxBridgeInterfaceDriver", help=""" This is the class used as the ethernet device driver for linuxnet bridge operations. The default value should be all you need for most cases, but if you wish to use a customized class, set this option to the full dot-separated import path for that class. Possible values: Any string representing a dot-separated class path that Nova can import. * Services that use this: ``nova-network`` * Related options: None """), cfg.StrOpt("linuxnet_ovs_integration_bridge", default="br-int", help=""" The name of the Open vSwitch bridge that is used with linuxnet when connecting with Open vSwitch." Possible values: Any string representing a valid bridge name. * Services that use this: ``nova-network`` * Related options: None """), cfg.BoolOpt("send_arp_for_ha", default=False, help=""" When True, when a device starts up, and upon binding floating IP addresses, arp messages will be sent to ensure that the arp caches on the compute hosts are up-to-date. Possible values: True, False (default) * Services that use this: ``nova-network`` * Related options: send_arp_for_ha_count """), cfg.IntOpt("send_arp_for_ha_count", default=3, help=""" When arp messages are configured to be sent, they will be sent with the count set to the value of this option. Of course, if this is set to zero, no arp messages will be sent. Possible values: Any integer greater than or equal to 0 * Services that use this: ``nova-network`` * Related options: send_arp_for_ha """), cfg.BoolOpt("use_single_default_gateway", default=False, help=""" When set to True, only the firt nic of a VM will get its default gateway from the DHCP server. Possible values: True, False (default) * Services that use this: ``nova-network`` * Related options: None """), cfg.MultiStrOpt("forward_bridge_interface", default=["all"], help=""" One or more interfaces that bridges can forward traffic to. If any of the items in this list is the special keyword 'all', then all traffic will be forwarded. Possible values: A list of zero or more interface names, or the word 'all'. * Services that use this: ``nova-network`` * Related options: None """), cfg.StrOpt('metadata_host', default='$my_ip', help='The IP address for the metadata API server'), cfg.IntOpt('metadata_port', default=8775, min=1, max=65535, help='The port for the metadata API port'), cfg.StrOpt('iptables_top_regex', default='', help='Regular expression to match the iptables rule that ' 'should always be on the top.'), cfg.StrOpt('iptables_bottom_regex', default='', help='Regular expression to match the iptables rule that ' 'should always be on the bottom.'), cfg.StrOpt('iptables_drop_action', default='DROP', help='The table that iptables to jump to when a packet is ' 'to be dropped.'), cfg.IntOpt('ovs_vsctl_timeout', default=120, help='Amount of time, in seconds, that ovs_vsctl should wait ' 'for a response from the database. 0 is to wait forever.'), cfg.BoolOpt('fake_network', default=False, help='If passed, use fake network devices and addresses'), cfg.IntOpt('ebtables_exec_attempts', default=3, help='Number of times to retry ebtables commands on failure.'), cfg.FloatOpt('ebtables_retry_interval', default=1.0, help='Number of seconds to wait between ebtables retries.'), ] ldap_dns_opts = [ cfg.StrOpt('ldap_dns_url', default='ldap://ldap.example.com:389', help='URL for LDAP server which will store DNS entries'), cfg.StrOpt('ldap_dns_user', default='uid=admin,ou=people,dc=example,dc=org', help='User for LDAP DNS'), cfg.StrOpt('ldap_dns_password', default='password', help='Password for LDAP DNS', secret=True), cfg.StrOpt('ldap_dns_soa_hostmaster', default='hostmaster@example.org', help='Hostmaster for LDAP DNS driver Statement of Authority'), cfg.MultiStrOpt('ldap_dns_servers', default=['dns.example.org'], help='DNS Servers for LDAP DNS driver'), cfg.StrOpt('ldap_dns_base_dn', default='ou=hosts,dc=example,dc=org', help='Base DN for DNS entries in LDAP'), cfg.StrOpt('ldap_dns_soa_refresh', default='1800', help='Refresh interval (in seconds) for LDAP DNS driver ' 'Statement of Authority'), cfg.StrOpt('ldap_dns_soa_retry', default='3600', help='Retry interval (in seconds) for LDAP DNS driver ' 'Statement of Authority'), cfg.StrOpt('ldap_dns_soa_expiry', default='86400', help='Expiry interval (in seconds) for LDAP DNS driver ' 'Statement of Authority'), cfg.StrOpt('ldap_dns_soa_minimum', default='7200', help='Minimum interval (in seconds) for LDAP DNS driver ' 'Statement of Authority'), ] security_group_opts = [ cfg.StrOpt('security_group_api', default='nova', help='DEPRECATED: Full class name of the security API class', deprecated_for_removal=True), ] driver_opts = [ cfg.StrOpt('network_driver', default='nova.network.linux_net', help='Driver to use for network creation'), ] rpcapi_opts = [ cfg.StrOpt('network_topic', default='network', help='The topic network nodes listen on'), cfg.BoolOpt('multi_host', default=False, help='Default value for multi_host in networks. Also, if set, ' 'some rpc network calls will be sent directly to host.'), ] ALL_DEFAULT_OPTS = (linux_net_opts + network_opts + ldap_dns_opts + security_group_opts + rpcapi_opts + driver_opts) def register_opts(conf): conf.register_opts(linux_net_opts) conf.register_opts(network_opts) conf.register_opts(ldap_dns_opts) conf.register_opts(security_group_opts) conf.register_opts(driver_opts) conf.register_opts(rpcapi_opts) def list_opts(): return {"DEFAULT": ALL_DEFAULT_OPTS}
from __future__ import print_function import argparse import gzip import os import shutil import struct import tarfile import tempfile import h5py import numpy import six from numpy.testing import assert_equal, assert_raises from six.moves import range, zip, cPickle from fuel.converters.base import (fill_hdf5_file, check_exists, MissingInputFiles) from fuel.converters import binarized_mnist, cifar10, mnist if six.PY3: getbuffer = memoryview else: getbuffer = numpy.getbuffer class TestFillHDF5File(object): def setUp(self): self.h5file = h5py.File( 'file.hdf5', mode='w', driver='core', backing_store=False) self.train_features = numpy.arange( 16, dtype='uint8').reshape((4, 2, 2)) self.test_features = numpy.arange( 8, dtype='uint8').reshape((2, 2, 2)) + 3 self.train_targets = numpy.arange( 4, dtype='float32').reshape((4, 1)) self.test_targets = numpy.arange( 2, dtype='float32').reshape((2, 1)) + 3 def tearDown(self): self.h5file.close() def test_data(self): fill_hdf5_file( self.h5file, (('train', 'features', self.train_features, '.'), ('train', 'targets', self.train_targets), ('test', 'features', self.test_features), ('test', 'targets', self.test_targets))) assert_equal(self.h5file['features'], numpy.vstack([self.train_features, self.test_features])) assert_equal(self.h5file['targets'], numpy.vstack([self.train_targets, self.test_targets])) def test_dtype(self): fill_hdf5_file( self.h5file, (('train', 'features', self.train_features), ('train', 'targets', self.train_targets), ('test', 'features', self.test_features), ('test', 'targets', self.test_targets))) assert_equal(str(self.h5file['features'].dtype), 'uint8') assert_equal(str(self.h5file['targets'].dtype), 'float32') def test_multiple_length_error(self): train_targets = numpy.arange(8, dtype='float32').reshape((8, 1)) assert_raises(ValueError, fill_hdf5_file, self.h5file, (('train', 'features', self.train_features), ('train', 'targets', train_targets))) def test_multiple_dtype_error(self): test_features = numpy.arange( 8, dtype='float32').reshape((2, 2, 2)) + 3 assert_raises( ValueError, fill_hdf5_file, self.h5file, (('train', 'features', self.train_features), ('test', 'features', test_features))) def test_multiple_shape_error(self): test_features = numpy.arange( 16, dtype='uint8').reshape((2, 4, 2)) + 3 assert_raises( ValueError, fill_hdf5_file, self.h5file, (('train', 'features', self.train_features), ('test', 'features', test_features))) class TestMNIST(object): def setUp(self): MNIST_IMAGE_MAGIC = 2051 MNIST_LABEL_MAGIC = 2049 numpy.random.seed(9 + 5 + 2015) self.train_features_mock = numpy.random.randint( 0, 256, (10, 1, 28, 28)).astype('uint8') self.train_targets_mock = numpy.random.randint( 0, 10, (10, 1)).astype('uint8') self.test_features_mock = numpy.random.randint( 0, 256, (10, 1, 28, 28)).astype('uint8') self.test_targets_mock = numpy.random.randint( 0, 10, (10, 1)).astype('uint8') self.tempdir = tempfile.mkdtemp() self.train_images_path = os.path.join( self.tempdir, 'train-images-idx3-ubyte.gz') self.train_labels_path = os.path.join( self.tempdir, 'train-labels-idx1-ubyte.gz') self.test_images_path = os.path.join( self.tempdir, 't10k-images-idx3-ubyte.gz') self.test_labels_path = os.path.join( self.tempdir, 't10k-labels-idx1-ubyte.gz') self.wrong_images_path = os.path.join(self.tempdir, 'wrong_images.gz') self.wrong_labels_path = os.path.join(self.tempdir, 'wrong_labels.gz') with gzip.open(self.train_images_path, 'wb') as f: f.write(struct.pack('>iiii', *(MNIST_IMAGE_MAGIC, 10, 28, 28))) f.write(getbuffer(self.train_features_mock.flatten())) with gzip.open(self.train_labels_path, 'wb') as f: f.write(struct.pack('>ii', *(MNIST_LABEL_MAGIC, 10))) f.write(getbuffer(self.train_targets_mock.flatten())) with gzip.open(self.test_images_path, 'wb') as f: f.write(struct.pack('>iiii', *(MNIST_IMAGE_MAGIC, 10, 28, 28))) f.write(getbuffer(self.test_features_mock.flatten())) with gzip.open(self.test_labels_path, 'wb') as f: f.write(struct.pack('>ii', *(MNIST_LABEL_MAGIC, 10))) f.write(getbuffer(self.test_targets_mock.flatten())) with gzip.open(self.wrong_images_path, 'wb') as f: f.write(struct.pack('>iiii', *(2000, 10, 28, 28))) with gzip.open(self.wrong_labels_path, 'wb') as f: f.write(struct.pack('>ii', *(2000, 10))) def tearDown(self): shutil.rmtree(self.tempdir) def test_converter(self): filename = os.path.join(self.tempdir, 'mock_mnist.hdf5') parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser = subparsers.add_parser('mnist') subparser.set_defaults( directory=self.tempdir, output_file=filename) mnist.fill_subparser(subparser) args = parser.parse_args(['mnist']) args_dict = vars(args) func = args_dict.pop('func') func(**args_dict) h5file = h5py.File(filename, mode='r') assert_equal( h5file['features'][...], numpy.vstack( [self.train_features_mock, self.test_features_mock])) assert_equal( h5file['targets'][...], numpy.vstack([self.train_targets_mock, self.test_targets_mock])) assert_equal(str(h5file['features'].dtype), 'uint8') assert_equal(str(h5file['targets'].dtype), 'uint8') assert_equal(tuple(dim.label for dim in h5file['features'].dims), ('batch', 'channel', 'height', 'width')) assert_equal(tuple(dim.label for dim in h5file['targets'].dims), ('batch', 'index')) def test_wrong_image_magic(self): assert_raises( ValueError, mnist.read_mnist_images, self.wrong_images_path) def test_wrong_label_magic(self): assert_raises( ValueError, mnist.read_mnist_labels, self.wrong_labels_path) def test_read_image_bool(self): assert_equal(mnist.read_mnist_images(self.train_images_path, 'bool'), self.train_features_mock >= 128) def test_read_image_float(self): rval = mnist.read_mnist_images(self.train_images_path, 'float32') assert_equal(rval, self.train_features_mock.astype('float32') / 255.) assert_equal(str(rval.dtype), 'float32') def test_read_image_value_error(self): assert_raises(ValueError, mnist.read_mnist_images, self.train_images_path, 'int32') class TestBinarizedMNIST(object): def setUp(self): numpy.random.seed(9 + 5 + 2015) self.train_mock = numpy.random.randint(0, 2, (5, 784)) self.valid_mock = numpy.random.randint(0, 2, (5, 784)) self.test_mock = numpy.random.randint(0, 2, (5, 784)) self.tempdir = tempfile.mkdtemp() numpy.savetxt( os.path.join(self.tempdir, 'binarized_mnist_train.amat'), self.train_mock) numpy.savetxt( os.path.join(self.tempdir, 'binarized_mnist_valid.amat'), self.valid_mock) numpy.savetxt( os.path.join(self.tempdir, 'binarized_mnist_test.amat'), self.test_mock) def tearDown(self): shutil.rmtree(self.tempdir) def test_converter(self): filename = os.path.join(self.tempdir, 'mock_binarized_mnist.hdf5') parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser = subparsers.add_parser('binarized_mnist') subparser.set_defaults(directory=self.tempdir, output_file=filename) binarized_mnist.fill_subparser(subparser) args = parser.parse_args(['binarized_mnist']) args_dict = vars(args) func = args_dict.pop('func') func(**args_dict) h5file = h5py.File(filename, mode='r') assert_equal(h5file['features'][...], numpy.vstack([self.train_mock, self.valid_mock, self.test_mock]).reshape((-1, 1, 28, 28))) assert_equal(str(h5file['features'].dtype), 'uint8') assert_equal(tuple(dim.label for dim in h5file['features'].dims), ('batch', 'channel', 'height', 'width')) class TestCIFAR10(object): def setUp(self): numpy.random.seed(9 + 5 + 2015) self.train_features_mock = [ numpy.random.randint(0, 256, (10, 3, 32, 32)).astype('uint8') for i in range(5)] self.train_targets_mock = [ numpy.random.randint(0, 10, (10,)).astype('uint8') for i in range(5)] self.test_features_mock = numpy.random.randint( 0, 256, (10, 3, 32, 32)).astype('uint8') self.test_targets_mock = numpy.random.randint( 0, 10, (10,)).astype('uint8') self.tempdir = tempfile.mkdtemp() cwd = os.getcwd() os.chdir(self.tempdir) os.mkdir('cifar-10-batches-py') for i, (x, y) in enumerate(zip(self.train_features_mock, self.train_targets_mock)): filename = os.path.join( 'cifar-10-batches-py', 'data_batch_{}'.format(i + 1)) with open(filename, 'wb') as f: cPickle.dump({'data': x, 'labels': y}, f) filename = os.path.join('cifar-10-batches-py', 'test_batch') with open(filename, 'wb') as f: cPickle.dump({'data': self.test_features_mock, 'labels': self.test_targets_mock}, f) with tarfile.open('cifar-10-python.tar.gz', 'w:gz') as tar_file: tar_file.add('cifar-10-batches-py') os.chdir(cwd) def tearDown(self): shutil.rmtree(self.tempdir) def test_converter(self): filename = os.path.join(self.tempdir, 'mock_cifar10.hdf5') parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser = subparsers.add_parser('cifar10') subparser.set_defaults(directory=self.tempdir, output_file=filename) cifar10.fill_subparser(subparser) args = parser.parse_args(['cifar10']) args_dict = vars(args) func = args_dict.pop('func') func(**args_dict) h5file = h5py.File(filename, mode='r') assert_equal( h5file['features'][...], numpy.vstack( self.train_features_mock + [self.test_features_mock])) assert_equal( h5file['targets'][...], numpy.hstack(self.train_targets_mock + [self.test_targets_mock])) assert_equal(str(h5file['features'].dtype), 'uint8') assert_equal(str(h5file['targets'].dtype), 'uint8') assert_equal(tuple(dim.label for dim in h5file['features'].dims), ('batch', 'channel', 'height', 'width')) assert_equal(h5file['targets'].dims[0].label, 'batch') def test_check_exists(): try: directory = tempfile.mkdtemp() with open(os.path.join(directory, 'abcdef.txt'), 'w') as f: print('\n', file=f) @check_exists(required_files=['abcdef.txt']) def foo(directory, a=None, b=None): pass try: foo(directory) except MissingInputFiles: assert False, "MissingInputFiles raised when files present" @check_exists(required_files=['ghijkl.txt']) def bar(directory, c=None, d=None): pass assert_raises(MissingInputFiles, bar, directory) @check_exists(required_files=['abcdef.txt', 'ghijkl.txt']) def baz(directory, x, y=None): pass assert_raises(MissingInputFiles, baz, directory, 9) try: baz(directory, 9) except MissingInputFiles as e: assert e.filenames == ['ghijkl.txt'] with open(os.path.join(directory, 'ghijkl.txt'), 'w') as f: print('\n\n', file=f) try: bar(directory) baz(directory, 44) except MissingInputFiles: assert False, "MissingInputFiles raised when files present" finally: os.remove(os.path.join(directory, 'abcdef.txt')) os.remove(os.path.join(directory, 'ghijkl.txt')) os.rmdir(directory)
# -*- coding:utf-8 -*- # Copyright 2015 NEC Corporation. # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # import copy import signal import sys import threading import time from functools import partial from org.o3project.odenos.core.component.network.flow.ofpflow.ofp_flow_match import OFPFlowMatch from org.o3project.odenos.core.component.network.flow.basic.basic_flow import BasicFlow from org.o3project.odenos.core.component.network.flow.basic.basic_flow_match import BasicFlowMatch from org.o3project.odenos.core.component.network.flow.basic.flow_action_output import FlowActionOutput from org.o3project.odenos.core.component.network.flow.flow import Flow from org.o3project.odenos.core.component.network.flow.flow_set import FlowSet from org.o3project.odenos.core.component.network.flow.ofpflow.ofp_flow import OFPFlow from org.o3project.odenos.core.component.network.packet.in_packet import InPacket from org.o3project.odenos.core.component.network.packet.ofp_in_packet import OFPInPacket from org.o3project.odenos.core.component.network.packet.ofp_out_packet import OFPOutPacket from org.o3project.odenos.core.component.network.packet.out_packet import OutPacket from org.o3project.odenos.core.component.network.packet.packet import Packet from org.o3project.odenos.core.component.network.packet.packet_status import PacketStatus from org.o3project.odenos.core.component.network.topology.link import Link from org.o3project.odenos.core.component.network.topology.node import Node from org.o3project.odenos.core.component.network.topology.port import Port from org.o3project.odenos.core.component.network.topology.topology import Topology from org.o3project.odenos.core.util.network_interface import NetworkInterface from org.o3project.odenos.core.util.remote_object_interface import RemoteObjectInterface from org.o3project.odenos.core.util.system_manager_interface import SystemManagerInterface from org.o3project.odenos.remoteobject.manager.system.component_connection import ComponentConnection from org.o3project.odenos.remoteobject.manager.system.component_connection_logic_and_network import ComponentConnectionLogicAndNetwork from org.o3project.odenos.remoteobject.message.request import Request from org.o3project.odenos.remoteobject.message.response import Response from org.o3project.odenos.remoteobject.object_property import ObjectProperty from org.o3project.odenos.remoteobject.transport.message_dispatcher import MessageDispatcher def signal_handler(num, stack, obj): print 'Received signal %d' % num del obj sys.exit() class ServerThread(threading.Thread): def __init__(self, dispatcher): threading.Thread.__init__(self) self.disp = dispatcher def run(self): self.disp.start() class OdenosConfigurator(object): CM1 = "romgr1" CM2 = "romgr2" CM3 = "romgr3" DEF_ATTR = {"oper_status": "UP"} DEF_VENDOR = "VENDOR1" def __init__(self, dispatcher=None): if dispatcher is None: bound_func = partial(signal_handler, obj=self) signal.signal(signal.SIGINT, bound_func) signal.signal(signal.SIGTERM, bound_func) self.disp = dispatcher if dispatcher is None: self.disp = MessageDispatcher() self.thread = ServerThread(self.disp) self.thread.start() self.sysmgr = SystemManagerInterface(self.disp) self.stations = {} self.packet_id = 0; def __del__(self): self.thread.join() self.disp.stop() def create_component(self, type, name, cm_id): obj = ObjectProperty(type, name) obj.set_property("version", "1") obj.set_property(ObjectProperty.CM_ID, cm_id) ret = self.sysmgr.put_components(obj).status_code if ret != 201: print "failed to create(ret): " + type + " " + name + " @ " + cm_id return RemoteObjectInterface(self.disp, name) def create_aggregator(self, name, cm_id=CM1): return self.create_component("Aggregator", name, cm_id) def create_federator(self, name, cm_id=CM1): return self.create_component("Federator", name, cm_id) def get_fed_boundaries(self, federator): resp = federator._get_object_to_remote_object("settings/boundaries") if resp.is_error(Request.Method.GET): return None return resp.body def get_ll_boundaries(self, linklayerizer): return self.get_fed_boundaries(linklayerizer) def set_fed_boundaries(self, federator, boundaries): n = 0 for ports in boundaries: net1 = ports[0][0] net2 = ports[1][0] phy_port1 = ports[0][1] phy_port2 = ports[1][1] port1 = net1.get_physical_port(phy_port1) port2 = net2.get_physical_port(phy_port2) while not port1: print "cannot get port by %s from %s" % (phy_port1, net1.network_id) port1 = net1.get_physical_port(phy_port1) time.sleep(2) while not port2: print "cannot get port by %s from %s" % (phy_port2, net2.network_id) port2 = net2.get_physical_port(phy_port2) time.sleep(2) bond_id = "bond_%s" % str(n) bond = {"id": bond_id, "type": "Federator", "network1": net1.network_id, "node1": port1.node_id, "port1": port1.port_id, "network2": net2.network_id, "node2": port2.node_id, "port2": port2.port_id } n = n + 1 federator._put_object_to_remote_object("settings/boundaries/%s" % bond_id, bond) def set_ll_boundaries(self, linklayerizer, boundaries): n = 0 for ports in boundaries: net1 = ports[0][0] net2 = ports[1][0] phy_port1 = ports[0][1] phy_port2 = ports[1][1] port1 = net1.get_physical_port(phy_port1) port2 = net2.get_physical_port(phy_port2) while not port1: print "cannot get port by %s from %s" % (phy_port1, net1.network_id) port1 = net1.get_physical_port(phy_port1) time.sleep(2) while not port2: print "cannot get port by %s from %s" % (phy_port2, net2.network_id) port2 = net2.get_physical_port(phy_port2) time.sleep(2) bond_id = "bond_" + str(n) + "_low_" + port1.port_id + "_up_" + port2.port_id bond = {"id": bond_id, "type": "LinkLayerizer", "lower_nw": net1.network_id, "lower_nw_node": port1.node_id, "lower_nw_port": port1.port_id, "upper_nw": net2.network_id, "upper_nw_node": port2.node_id, "upper_nw_port": port2.port_id } n = n + 1 linklayerizer._put_object_to_remote_object("settings/boundaries/%s" % bond_id, bond) def create_l2switch(self, name, cm_id=CM1): return self.create_component("LearningSwitch", name, cm_id) def create_linklayerizer(self, name, cm_id=CM1): return self.create_component("LinkLayerizer", name, cm_id) def create_dummydriver(self, name, cm_id=CM1): return self.create_component("DummyDriver", name, cm_id) def create_ofdriver(self, name, cm_id=CM3): return self.create_component("OpenFlowDriver", name, cm_id) def create_network(self, name, cm_id=CM1): self.create_component("Network", name, cm_id) return NetworkInterface(self.disp, name) def get_network(self, name): return NetworkInterface(self.disp, name) def create_node(self, network, node_id, attr=DEF_ATTR): attr = copy.deepcopy(attr) attr.update({"physical_id": node_id}) if not attr.has_key("vendor"): attr.update({"vendor": self.DEF_VENDOR}) return network.put_node(Node("Node", "0", node_id, {}, attr)) def create_port(self, network, node_id, port_id, attr=DEF_ATTR): attr = copy.deepcopy(attr) attr.update({"physical_id": "%s@%s" % (port_id, node_id)}) attr.update({"max_bandwidth": "10000000"}) attr.update({"unreserved_bandwidth": "10000000"}) if not attr.has_key("vendor"): attr.update({"vendor": self.DEF_VENDOR}) return network.put_port(Port("Port", "0", port_id, node_id, "", "", attr)) def create_link(self, network, link_id, snode, sport, dnode, dport, attr=DEF_ATTR): attr = copy.deepcopy(attr) attr.update({"max_bandwidth": "10000000"}) attr.update({"unreserved_bandwidth": "10000000"}) return network.put_link(Link("Link", "0", link_id, snode, sport, dnode, dport, attr)) def create_simple_basicFlow(self, network, flow_id, in_node, in_port, path, out_node, out_port, attr={}): matches = [] matches.append(BasicFlowMatch("BasicFlowMatch", in_node, in_port)) edge_actions = {} edge_actions[out_node] = [FlowActionOutput("FlowActionOutput", out_port)] attributes = attr flow = BasicFlow("BasicFlow", "0", flow_id, "simple_basicFlow", True, None, None, attributes, matches, path, edge_actions) return network.put_flow(flow) def create_ofp_flow(self, network, flow_id, matches, path, edge_actions): attributes = {} flow = OFPFlow("OFPFlow", "0", flow_id, "flowSetter", True, None, None, attributes, matches, None, None, path, edge_actions) return network.put_flow(flow) def add_ofp_inpacket(self, network, node_id, port_id, header, data, attr={}): self.packet_id = self.packet_id + 1 attr = copy.deepcopy(attr) network.post_in_packet( OFPInPacket(str(self.packet_id), "OFPInPacket", attr, node_id, port_id, header, data)) def add_station(self, network, sid, node_id, port_id, mac): self.stations.update( {sid:{"network": network, "node_id": node_id, "port_id": port_id, "mac": mac}}) def ping(self, src_sid, dst_sid): src = self.stations[src_sid] dst = self.stations[dst_sid] data = "deadbeef" header = OFPFlowMatch("OFPFlowMatch", src["node_id"], src["port_id"]) header.eth_src = src["mac"] header.eth_dst = dst["mac"] self.add_ofp_inpacket(src["network"], src["node_id"], src["port_id"], header, data) def create_slicer(self, name, cm_id=CM1): return self.create_component("Slicer", name, cm_id) def set_slice_condition(self, slicer, priority, cond_id, conn_id, match): path = "settings/slice_condition_table/%s/conditions/%s" % (priority, cond_id) body = {"id": cond_id, "type":"BasicSliceCondition", "connection": conn_id} body.update(match) slicer._put_object_to_remote_object(path, body) def create_ofpslicer(self, name, cm_id=CM1): return self.create_component("OpenFlowSlicer", name, cm_id) def set_ofpslice_condition(self, slicer, priority, cond_id, conn_id, match): path = "settings/slice_condition_table/%s/conditions/%s" % (priority, cond_id) body = {"id": cond_id, "type":"OpenFlowSliceCondition", "connection": conn_id} body.update(match) slicer._put_object_to_remote_object(path, body) def connect(self, logic, network, type): conn_id = logic.object_id + "-" + network.object_id conn = ComponentConnectionLogicAndNetwork( conn_id, type, ComponentConnection.State.INITIALIZING, logic.object_id, network.object_id) if self.sysmgr.put_connection(conn).status_code != 201: print "failed to connect(ret): " + conn_id + " as " + type return conn_id
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin from maskrcnn_benchmark import NHWC from apex import amp from torch.autograd.function import once_differentiable import collections from itertools import repeat def _ntuple(n): def parse(x): if isinstance(x, collections.Iterable): return x return tuple(repeat(x, n)) return parse _single = _ntuple(1) _pair = _ntuple(2) _triple = _ntuple(3) _quadruple = _ntuple(4) class conv2d_NHWC_impl(torch.autograd.Function): @staticmethod def forward(ctx, x, w, bias=None, stride=(1,1), padding=(0,0), dilation=(1,1), groups=1): # Save constants for bprop ctx.stride = stride ctx.padding = padding ctx.dilation = dilation ctx.groups = groups ctx.need_bias_grad = bias is not None ## pad K-dimension to multiple of 8 for better perf K = w.shape[0] is_padded = (K % 8) != 0 if is_padded: K_padded = 8 * ((K + 7) // 8) padded_filter_shape = [K_padded, w.shape[1], w.shape[2], w.shape[3]] padded_w = torch.zeros(padded_filter_shape, dtype = w.dtype, device = w.device) padded_w[:K,:,:,:] = w ctx.save_for_backward(x, padded_w)##debug##, only use padding in fprop # ctx.save_for_backward(x, w) if bias is not None: padded_bias = torch.zeros([K_padded], dtype = bias.dtype, device = bias.device) padded_bias[:K] = bias else: ctx.save_for_backward(x, w) # try padding only in grad if bias is None: if is_padded: output = NHWC.cudnn_convolution_nhwc(x, padded_w, padding, stride, dilation, groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic) return output[:,:,:,:K].contiguous() else: return NHWC.cudnn_convolution_nhwc(x, w, padding, stride, dilation, groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic) else: if is_padded: output = NHWC.cudnn_convolution_with_bias_nhwc(x, padded_w, padded_bias, padding, stride, dilation, groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic) return output[:,:,:,:K].contiguous() else: return NHWC.cudnn_convolution_with_bias_nhwc(x, w, bias, padding, stride, dilation, groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic) @staticmethod @once_differentiable def backward(ctx, grad_y): x, w = ctx.saved_variables ## if padding is used in fprop, we should pad grad_y here also for better perf K = grad_y.shape[3] is_padded = (K % 8) != 0 if is_padded: K_padded = 8 * ((K + 7) // 8) padded_grad_shape = [grad_y.shape[0], grad_y.shape[1], grad_y.shape[2], K_padded] padded_grad = torch.zeros(padded_grad_shape, dtype = grad_y.dtype, device = grad_y.device) padded_grad[:,:,:,:K] = grad_y # print("padded grad shape", padded_grad.shape) #grad_y = padded_grad if ctx.need_bias_grad: if not is_padded: dx, dw, db = NHWC.cudnn_convolution_backward_with_bias_nhwc(x, grad_y, w, ctx.padding, ctx.stride, ctx.dilation, ctx.groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, list(ctx.needs_input_grad[0:3])) else: dx, dw, db = NHWC.cudnn_convolution_backward_with_bias_nhwc(x, padded_grad, w, ctx.padding, ctx.stride, ctx.dilation, ctx.groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, list(ctx.needs_input_grad[0:3])) if ctx.needs_input_grad[0]: if not is_padded: return dx, dw, db, None, None, None, None else: return dx, dw[:K,:,:,:].contiguous(), db[:K].contiguous(), None, None, None, None else: if not is_padded: return None, dw, db, None, None, None, None else: return None, dw[:K,:,:,:].contiguous(), db[:K].contiguous(), None, None, None, None else: if (not ctx.needs_input_grad[1] ): return None, None, None, None, None, None, None if not is_padded: dx, dw = NHWC.cudnn_convolution_backward_nhwc(x, grad_y, w, ctx.padding, ctx.stride, ctx.dilation, ctx.groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, list(ctx.needs_input_grad[0:2])) else: dx, dw = NHWC.cudnn_convolution_backward_nhwc(x, padded_grad, w, ctx.padding, ctx.stride, ctx.dilation, ctx.groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, list(ctx.needs_input_grad[0:2])) if (not ctx.needs_input_grad[1]): return None, None, None, None, None, None, None elif ctx.needs_input_grad[0]: if not is_padded: return dx, dw, None, None, None, None, None else: return dx, dw[:K,:,:,:].contiguous(), None, None, None, None, None else: if not is_padded: return None, dw, None, None, None, None, None else: return None, dw[:K,:,:,:].contiguous(), None, None, None, None, None class conv2d_transpose_NHWC_impl(torch.autograd.Function): @staticmethod def forward(ctx, x, w, bias=None, stride=(1,1), padding=(0,0), output_padding=(0,0), dilation=(1,1), groups=1): # Save constants for bprop ctx.stride = stride ctx.padding = padding ctx.output_padding = output_padding ctx.dilation = dilation ctx.groups = groups ctx.need_bias_grad = bias is not None ctx.save_for_backward(x, w) if bias is None: return NHWC.cudnn_convolution_transpose_nhwc(x, w, padding, output_padding, stride, dilation, groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic) else: return NHWC.cudnn_convolution_transpose_with_bias_nhwc(x, w, bias, padding, output_padding, stride, dilation, groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic) @staticmethod @once_differentiable def backward(ctx, grad_y): x, w = ctx.saved_variables if ctx.need_bias_grad: dx, dw, db = NHWC.cudnn_convolution_transpose_backward_with_bias_nhwc(x, grad_y, w, ctx.padding, ctx.output_padding, ctx.stride, ctx.dilation, ctx.groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, list(ctx.needs_input_grad[0:3])) if ctx.needs_input_grad[0]: return dx, dw, db, None, None, None, None, None else: return None, dw, db, None, None, None, None, None else: if (not ctx.needs_input_grad[1] and not ctx.needs_input_grad[0]): return None, None, None, None, None, None, None dx, dw = NHWC.cudnn_convolution_transpose_backward_nhwc(x, grad_y, w, ctx.padding, ctx.output_padding, ctx.stride, ctx.dilation, ctx.groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, list(ctx.needs_input_grad[0:2])) if (not ctx.needs_input_grad[1]): return None, None, None, None, None, None, None, None elif ctx.needs_input_grad[0]: return dx, dw, None, None, None, None, None, None else: return None, dw, None, None, None, None, None, None amp.register_half_function(conv2d_NHWC_impl,'apply') amp.register_half_function(conv2d_transpose_NHWC_impl,'apply') class Conv2d_NHWC(_ConvNd): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(Conv2d_NHWC, self).__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias=bias, padding_mode='zeros') # permute filters self.weight = torch.nn.Parameter(self.weight.permute(0, 2, 3, 1).contiguous()) def forward(self, x): if self.bias is None: return conv2d_NHWC_impl.apply(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) # Use pytorch + operator instead of cudnn bias else: result = conv2d_NHWC_impl.apply(x, self.weight, None, self.stride, self.padding, self.dilation, self.groups) return result + self.bias class ConvTranspose2d_NHWC(_ConvTransposeMixin, _ConvNd): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1, padding_mode='zeros'): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) output_padding = _pair(output_padding) super(ConvTranspose2d_NHWC, self).__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, True, output_padding, groups, bias, padding_mode) # permute filters self.weight = torch.nn.Parameter(self.weight.permute(0, 2, 3, 1).contiguous()) def forward(self, input, output_size=None): if self.padding_mode != 'zeros': raise ValueError('Only `zeros` padding mode is supported for ConvTranspose2d') output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size) if self.bias is None: return conv2d_transpose_NHWC_impl.apply( input, self.weight, self.bias, self.stride, self.padding, output_padding, self.dilation, self.groups) # Use pytorch + operator instead of cudnn bias else: result = conv2d_transpose_NHWC_impl.apply( input, self.weight, None, self.stride, self.padding, output_padding, self.dilation, self.groups) return result + self.bias
import pprint, re import lacuna.bc class Empire(lacuna.bc.LacunaObject): """ A generic Empire object that can be used with a non-logged-in Guest.""" path = 'empire' ### All of these relate to creating a new empire. I don't see any burning ### need to create a new empire with Python, and the thought of testing ### this out by creating a bunch of new empires is making me sad. So I'm ### punting. def create(self, *args, **kwargs): """ Unimplemented. It's assumed nobody is going to use MontyLacuna to create a new empire. """ raise NotImplementedError( "Creating an empire is not implemented." ) def found(self, *args, **kwargs): """ Unimplemented. It's assumed nobody is going to use MontyLacuna to create a new empire. """ raise NotImplementedError( "Founding an empire is not implemented." ) def update_species(self, *args, **kwargs): """ Unimplemented. It's assumed nobody is going to use MontyLacuna to create a new empire. """ raise NotImplementedError( "Updating a species is not implemented." ) @lacuna.bc.LacunaObject.call_guest_meth def fetch_captcha( self, *args, **kwargs ): """ Get location of a captcha puzzle to solve. There also exists a Captcha class, which requires the user to already be logged in. This fetch_captcha method exists to allow a brand new, not-yet-logged-in user to get a captcha, the solution to which they can pass to :meth:`create`. Since :meth:`create` is unimplemented, you should probably never use this for anything. If you're looking for a captcha for anything other than new user creation, go use the Captcha class. Returns dict containing 'guid' and 'url' keys. """ return kwargs['rslt'] class MyEmpire( Empire ): """ The Empire object belonging to the current Member's empire. Object Attributes:: id "xxxx", rpc_count 321, # the number of calls made to the server is_isolationist 1, # hasn't sent out probes or colony ships name "The Syndicate", status_message "A spy's work is never done.", home_planet_id "id-goes-here", has_new_messages 4, latest_message_id 1234, essentia 0, planets Dict of all of the bodies that belong to you, keyed off ID. This includes both colonies and space stations. { "id-goes-here" : "Earth", "id-goes-here" : "Mars", "id-goes-here" : "ISS", "id-goes-here" : "Halo", }, planet_names The reverse of planets; a dict keyed on the name. colonies Dict of just your colonies, keyed off ID. { "id-goes-here" : "Earth", "id-goes-here" : "Mars", }, colony_names The reverse of colonies; a dict keyed on the name. stations Dict of just your space stations, keyed off ID. { "id-goes-here" : "ISS", "id-goes-here" : "Halo", }, station_names The reverse of stations; a dict keyed on the name. tech_level" 20, # Highest level university has gotten to. self_destruct_active 0, self_destruct_date "" """ pp = pprint.PrettyPrinter( indent = 4 ) ### These appear in clients.py, not here: ### login() ### send_password_reset_message() ### ### Involves email, which the server isn't producing, so skipping: ### reset_password() @lacuna.bc.LacunaObject.call_member_meth def logout( self, *args, **kwargs ): pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def get_status( self, *args, **kwargs ): """ Gets your empire's current status information. There shouldn't ever be a need to call this. Empire status data gets returned with every empire method call, and the current :class:`MyEmpire` object's attributes get updated each time as a result. """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def get_invite_friend_url( self, *args, **kwargs ): """ Get a unique URL you can send to a friend to use to sign up. Your empire will receive rewards for each friend who joins using such a URL. Returns a dict including the key ``referral_url``. """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def invite_friend( self, email:str, message:str = '', *args, **kwargs ): """ Sends email to a friend inviting them to join the game. Args: email (str): The email address to send the invitation to message (str):An optional personal message to include. Raises: lacuna.exceptions.ServerError: 1010 if you have not set up an email address of your own in your profile. """ pass @lacuna.bc.LacunaObject.call_returning_meth def view_profile( self, *args, **kwargs ): """ View your own empire's profile. Requires login with your real, not sitter, password, Returns: lacuna.empire.OwnProfile: Your profile Raises: lacuna.exceptions.ServerError: 1015 ("Sitters cannot modify preferences") if the user is logged in with their sitter. """ return OwnProfile(self.client, kwargs['rslt']['profile']) @lacuna.bc.LacunaObject.call_returning_meth def view_public_profile( self, empire_id:int, *args, **kwargs ): """ View public profile info on any empire. Args: empire_id (int): ID of the empire to view. Returns: lacuna.empire.PublicProfile: Requested empire's profile """ return PublicProfile(self.client, kwargs['rslt']['profile']) @lacuna.bc.LacunaObject.call_member_meth def edit_profile( self, profile:dict, *args, **kwargs ): ### The rv does contain a 'status' dict, but it's in a different ### format from what's expected, so skip the set_empire_status ### decorator. """ Edit your empire's profile. Requires that you're logged in with your real, not sitter, password. Args: profile (dict): With the keys: - description - email - sitter_password - status_message - city - country - notes - skype - player_name - public_medals (list of medal IDs to display) - skip_happiness_warnings - skip_resource_warnings - skip_pollution_warnings - skip_medal_messages - skip_facebook_wall_posts - skip_found_nothing - skip_excavator_resources - skip_excavator_glyph - skip_excavator_plan - skip_spy_recovery - skip_probe_detected - skip_attack_messages - skip_incoming_ships The skip_* keys are booleans, and require 1 for "on" or 0 for "off". """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def change_password( self, password:str, confirm:str, *args, **kwargs ): """ Changes your full password. Args: password (str): the desired new password confirm (str): the same string as 'password' """ self.client.password = password @lacuna.bc.LacunaObject.call_returning_meth def find( self, name_segment:str, *args, **kwargs ): """ Find an empire by name. Args:: name_segment (str): :ref:`Standard TLE search string <gloss_std_search_string>`. Returns: lacuna.empire.FoundEmpire: list """ mylist = [] for i in kwargs['rslt']['empires']: mylist.append( FoundEmpire(self.client, i) ) return mylist @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def set_status_message( self, message:str, *args, **kwargs ): """ Sets your empire status message. Args: message (str): The message to set as your empire's profile's status. """ self.status_message = message pass @lacuna.bc.LacunaObject.call_returning_meth def view_boosts( self, *args, **kwargs ): """ Shows your current boosts and their expiration dates. Returns: lacuna.empire.Boosts: Current boosts """ return Boosts( self.client, kwargs['rslt']['boosts'] ) @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def boost_storage( self, *args, **kwargs ): """ Spends 5 E to set a +25% storage boost for one week. """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def boost_food( self, *args, **kwargs ): """ Spends 5 E to set a +25% food boost for one week. """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def boost_water( self, *args, **kwargs ): """ Spends 5 E to set a +25% water boost for one week. """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def boost_energy( self, *args, **kwargs ): """ Spends 5 E to set a +25% energy boost for one week. """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def boost_ore( self, *args, **kwargs ): """ Spends 5 E to set a +25% ore boost for one week. """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def boost_happiness( self, *args, **kwargs ): """ Spends 5 E to set a +25% happiness boost for one week. """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def boost_building( self, *args, **kwargs ): """ Spends 5 E to set a +25% building speed boost for one week. """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def boost_spy_training( self, *args, **kwargs ): """ Spends 5 E to set a +50% spy training boost for one week. """ pass #@lacuna.bc.LacunaObject.set_empire_status #@lacuna.bc.LacunaObject.call_member_meth def enable_self_destruct( self, *args, **kwargs ): """ Sets the self destruct timer on your empire. After 24 hours, your empire will be deleted. Such a deletion is *not recoverable*. While testing, it was discovered that the :meth:`disable_self_destruct` method does not function, server-side. So if you enable self destruct on your empire, you cannot turn it off again yourself; you'd need to find an admin to do it for you. Admins are usually around, but it's not unthinkable that a 24-hour period could pass during which you could not find an admin. If that were to happen, your empire would be permanently deleted. :meth:`disable_self_destruct` appears to be working again, but I'm leaving this method disabled anyway. If you're making a complete client using this module, see me about getting this fixed, but I tend to doubt that's going to happen. """ print( "Please don't ever call this." ) quit() @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def disable_self_destruct( self, *args, **kwargs ): """ Simply does not work on the server side. This is a known problem, and the reason :meth:`enable_self_destruct` has been disabled. """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def redeem_essentia_code( self, code: str, *args, **kwargs ): """ Redeems an essentia code. Args: code (str): An essentia code uuid, eg ``56cc359e-8ba7-4db7-b608-8cb861c65510`` Raises: lacuna.exception.ServerError: 1010 if you try to redeem a code that's already been redeemed. The example code above is such an already-redeemed code; you can use that to test the exception. Essentia codes can be obtained by purchasing essentia, or sometimes by admins during contests. Each code can only be used once, so if you have one, don't share it with anybody; if they use it, the E represented by that code will be gone. """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def redefine_species_limits( self, *args, **kwargs ): """ Returns the limits to be imposed if you redefine your species. Returns: dict: species limits:: 'can': 1, 'essentia_cost': 100, 'max_orbit': 3, 'min_growth': 1, 'min_orbit': 3, 'reason': None, ``can`` will be 0 if the user currently cannot redefine. If ``can`` is 0, ``reason`` will contain a string explaining why not. eg "You have already redefined in the past 30 days", etc. """ pass @lacuna.bc.LacunaObject.set_empire_status @lacuna.bc.LacunaObject.call_member_meth def redefine_species( self, params: dict, *args, **kwargs ): """ Actually does the deed of redefining a player's species for 100 E. Args: params (dict): species settings:: 'name': String name of the species (required) 'description': String 'min_orbit': Integer 1-7 inclusive 'max_orbit': Integer 1-7 inclusive. Must be >= min_orbit. 'manufacturing_affinity': Integer 1-7 inclusive. 'deception_affinity': Integer 1-7 inclusive. 'research_affinity': Integer 1-7 inclusive. 'management_affinity': Integer 1-7 inclusive. 'farming_affinity': Integer 1-7 inclusive. 'mining_affinity': Integer 1-7 inclusive. 'science_affinity': Integer 1-7 inclusive. 'environmental_affinity': Integer 1-7 inclusive. 'political_affinity': Integer 1-7 inclusive. 'trade_affinity': Integer 1-7 inclusive. 'growth_affinity': Integer 1-7 inclusive. """ pass @lacuna.bc.LacunaObject.call_returning_meth def view_species_stats( self, *args, **kwargs ): """ Get information about your empire's species. Returns: lacuna.empire.Species: Your species' info """ return Species(self.client, kwargs['rslt']['species']) @lacuna.bc.LacunaObject.call_returning_meth def get_species_templates( self, *args, **kwargs ): """ Get the species templates that are presented to a new player upon initial species creation (Average, Warmonger, Resilient, Viral, etc). Returns: lacuna.empire.SpeciesTemplate: list of templates """ mylist = [] for i in kwargs['rslt']: mylist.append( SpeciesTemplate(self.client, i) ) return mylist class Species(lacuna.bc.SubClass): """ The attributes associated with an empire's species. Object Attributes:: name "Human", description "The descendants of Earth.", min_orbit 3, max_orbit 3, manufacturing_affinity 4, deception_affinity 4, research_affinity 4, management_affinity 4, farming_affinity 4, mining_affinity 4, science_affinity 4, environmental_affinity 4, political_affinity 4, trade_affinity 4, growth_affinity 4 """ ### This will usually be accessed from something like the Library of 10 ### colonies, and then 10 stations, just to show that they're separate. ### the idea of an empire that this seemed the best place for it. def to_dict(self): """ Returns the object's attributes as a dict, suitable for passing to :class:`lacuna.empire.redefine_species`. """ mydict = {} attribs = ( 'name', 'description', 'min_orbit', 'max_orbit', 'manufacturing_affinity', 'deception_affinity', 'research_affinity', 'management_affinity', 'farming_affinity', 'mining_affinity', 'science_affinity', 'environmental_affinity', 'political_affinity', 'trade_affinity', 'growth_affinity' ) for i in attribs: mydict[i] = getattr(self, i) return mydict class SpeciesTemplate(lacuna.bc.SubClass): """ These are the presets presented to a new player in the process of setting up a new empire. Object Attributes:: name "Average", description "A race of average intellect, and weak constitution.', min_orbit 3, max_orbit 3, manufacturing_affinity 4, deception_affinity 4, research_affinity 4, management_affinity 4, farming_affinity 4, mining_affinity 4, science_affinity 4, environmental_affinity 4, political_affinity 4, trade_affinity 4, growth_affinity 4 """ class OwnProfile(lacuna.bc.SubClass): """ This is the user's own profile info. Another empire's public profile will contain less data. Object Attributes:: description "description goes here", status_message "status message goes here", medals Dict: { "Integer Medal ID" : { "name" : "Built Level 1 Building", "image" : "building1", "date" : "01 31 2010 13:09:05 +0600", "public" : 1, "times_earned" : 4 }, ... }, city "Madison", country "USA", notes "notes go here", skype "joeuser47", player_name "Joe User", skip_happiness_warnings 0, skip_resource_warnings 0, skip_pollution_warnings 0, skip_medal_messages 0, skip_facebook_wall_posts 0, skip_found_nothing 0, skip_excavator_resources 0, skip_excavator_glyph 0, skip_excavator_plan 0, skip_spy_recovery 0, skip_probe_detected 0, skip_attack_messages 0, skip_incoming_ships 0, email "joe@example.com", sitter_password "abcdefgh" """ class PublicProfile(lacuna.bc.SubClass): """ This is the public profile of any empire. Object Attributes:: id "empire-id-goes-here", name "Lacuna Expanse Corp", colony_count 1, status_message "Looking for Essentia." description "We are the original inhabitants of the Lacuna Expanse.", city "Madison", country "USA", skype "joeuser47", player_name "Joe User", medals { "id-goes-here" : { "name" : "Built Level 1 Building", "image" : "building1", "date" : "01 31 2010 13:09:05 +0600", "times_earned" : 4 }, }, last_login "01 31 2010 13:09:05 +0600", date_founded "01 31 2010 13:09:05 +0600", "Lacunan", alliance { "id" : "id-goes-here", "name" : "The Confederacy" }, known_colonies [ { "id" : "id-goes-here", "x" : "1", "y" : "-543", "name" : "Earth", "image" : "p12-3" }, { more of the same }, ] """ class FoundEmpire(lacuna.bc.SubClass): """ Object Attributes:: id Integer ID of the empire name String name of the empire """ class OwningEmpire(lacuna.bc.SubClass): """ An empire that owns a given body. Object Attributes:: id Integer ID of the empire name String name of the empire alignment 'ally' # 'ally', 'self', or 'hostile' is_isolationist 1 or 0 """ class Boosts(lacuna.bc.SubClass): """ Object Attributes:: food "01 31 2010 13:09:05 +0600", ore "01 31 2010 13:09:05 +0600", energy "01 31 2010 13:09:05 +0600", water "01 31 2010 13:09:05 +0600", happiness "01 31 2010 13:09:05 +0600", storage "01 31 2010 13:09:05 +0600", building "01 31 2010 13:09:05 +0600", spy_training "01 31 2010 13:09:05 +0600" """
# -*- coding: utf-8 -*- """ Copyright [2009-2018] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import pytest from rnacentral_pipeline.rnacentral.precompute.rna_type import rna_type_of from .helpers import load_data @pytest.mark.parametrize( "rna_id,rna_type", [ # pylint: disable=no-member ("URS0000000DBF_6239", "piRNA"), ("URS0000005270_9606", "rRNA_5S"), ("URS0000005270_9606", "rRNA_5_8S"), ("URS00000081EA_9606", "U1_snRNA"), ("URS00000101E5_9606", "lnc_RNA"), ("URS0000016972_6239", "miRNA"), ("URS000001E7BA_559292", "glutaminyl_tRNA"), ("URS00000478B7_9606", "SRP_RNA"), ("URS00000490A3_6239", "miRNA"), ("URS000004F67F_109352", "rRNA_primary_transcript"), ("URS0000050E80_67003", "rRNA_primary_transcript"), ("URS000006F31F_4932", "RNase_MRP_RNA"), ("URS0000086133_9606", "transcript"), ("URS00000AEE53_380749", "tmRNA"), ("URS00000B3045_7227", "guide_RNA"), ("URS00000CF490_2786", "small_subunit_rRNA"), ("URS00000DA486_3702", "siRNA"), ("URS00000F9D45_9606", "rRNA_5S"), ("URS00000F9D45_9606", "rRNA_5S"), ("URS00000FFD14_109352", "rRNA_primary_transcript"), ("URS0000118C49_9606", "U5_snRNA"), ("URS00001232FE_175275", "rRNA_18S"), ("URS0000127C85_175245", "rRNA_primary_transcript"), ("URS000012DE89_9606", "autocatalytically_spliced_intron"), ("URS0000130A6B_3702", "pre_miRNA"), ("URS00001380AF_9606", "U5_snRNA"), ("URS000013F331_9606", "RNase_P_RNA"), ("URS0000149178_9606", "U4_snRNA"), ("URS0000157BA2_4896", "antisense_RNA"), ("URS000015995E_4615", "miRNA"), ("URS000015F227_9606", "scaRNA"), ("URS0000162C88_9606", "rRNA_5_8S"), ("URS00001872A7_9606", "U6_snRNA"), ("URS000018B855_1270", "small_subunit_rRNA"), ("URS000018EB2E_3702", "lnc_RNA"), ("URS0000193C7E_9606", "lnc_RNA"), ("URS000019E0CD_9606", "lnc_RNA"), ("URS00001B2620_9606", "scaRNA"), ("URS00001D1383_1299", "Y_RNA"), ("URS00001DEEBE_562", "prolyl_tRNA"), ("URS00001E2C22_3702", "rRNA_5_8S"), ("URS00001F11B5_6239", "spliced_leader_RNA"), ("URS00002078D9_216597", "Y_RNA"), ("URS000020CEFD_9606", "pre_miRNA"), ("URS0000210B3E_216597", "Y_RNA"), ("URS000021515D_322710", "group_II_intron"), ("URS000021BC29_9606", "scaRNA"), ("URS000024083D_9606", "SRP_RNA"), ("URS000024F0F7_4932", "H_ACA_box_snoRNA"), ("URS000025C52E_9606", "ncRNA"), ("URS00002963C4_4565", "SRP_RNA"), ("URS00002985F4_3094", "rRNA_large_subunit_primary_transcript"), ("URS00002AE808_10090", "miRNA"), ("URS00002B64E6_7227", "scaRNA"), # Why flybase disagree? ("URS00002BF60E_7227", "scaRNA"), # Why flybase disagree? ("URS00002F216C_36329", "antisense_RNA"), ("URS00002F21DA_7227", "pre_miRNA"), ("URS00003054F4_6239", "piRNA"), ("URS000032B6B6_9606", "U1_snRNA"), ("URS000034C5CB_7227", "SRP_RNA"), ("URS000036E62B_9593", "pre_miRNA"), ("URS000037602E_9606", "tmRNA"), ("URS00003936E9_7227", "pre_miRNA"), ("URS00003A60A1_10090", "rRNA_primary_transcript"), ("URS00003AC4AA_3702", "siRNA"), ("URS00003BECAC_9606", "lnc_RNA"), ("URS00003CE153_9606", "lnc_RNA"), ("URS00003EBD9A_9913", "telomerase_RNA"), ("URS00003EC772_10092", "autocatalytically_spliced_intron"), ("URS00003EE18C_9544", "vault_RNA"), ("URS00003EE995_9606", "U2_snRNA"), ("URS00003F07BD_9606", "U4_snRNA"), ("URS0000409697_3702", "tRNA"), ("URS000040F7EF_4577", "siRNA"), ("URS00004151E2_5693", "SRP_RNA"), ("URS0000443498_9606", "U6atac_snRNA"), ("URS000045EBF2_9606", "lnc_RNA"), ("URS0000466DE6_6239", "miRNA"), ("URS000048471C_9606", "antisense_lncRNA"), ("URS000048B30C_3702", "tRNA"), ("URS000049E122_9606", "ncRNA"), ("URS000049EC81_7227", "scaRNA"), ("URS00004A2461_9606", "Y_RNA"), ("URS00004B11CA_223283", "ncRNA"), ("URS00004E52D3_10090", "H_ACA_box_snoRNA"), ("URS00004E9E38_7227", "miRNA"), ("URS00004FB44B_6239", "rRNA_25S"), ("URS0000508014_7227", "scaRNA"), # Why flybase disagree? ("URS000051DCEC_10090", "C_D_box_snoRNA"), ("URS000053F313_559292", "U5_snRNA"), ("URS00005511ED_6239", "snoRNA"), ("URS000055786A_7227", "miRNA"), ("URS0000563A36_7227", "snoRNA"), ("URS0000564CC6_224308", "tmRNA"), ("URS0000569A4A_9606", "scaRNA"), ("URS000059A8B2_7227", "rasiRNA"), ("URS000059EA49_32644", "tmRNA"), ("URS00005A245E_10090", "tRNA"), ("URS00005A2612_9606", "H_ACA_box_snoRNA"), ("URS00005CA2D5_6239", "rRNA_primary_transcript"), ("URS00005CDD41_352472", "RNase_P_RNA"), ("URS00005CF03F_9606", "Y_RNA"), ("URS00005D0BAB_9606", "piRNA"), ("URS00005EB5B7_78454", "pre_miRNA"), ("URS00005EB5B7_9447", "pre_miRNA"), ("URS00005EB5B7_9509", "pre_miRNA"), ("URS00005EB5B7_9519", "pre_miRNA"), ("URS00005EB5B7_9593", "pre_miRNA"), ("URS00005EF0FF_4577", "siRNA"), ("URS00005F2C2D_4932", "rRNA_18S"), ("URS00005F4CAF_3702", "methionyl_tRNA"), ("URS00005FCB94_9606", "U4_snRNA"), ("URS000060B496_10090", "H_ACA_box_snoRNA"), ("URS000060F99B_559292", "mt_rRNA"), ("URS000061DECF_1235461", "group_II_intron"), ("URS000061F377_559292", "rRNA_25S"), ("URS00006233F9_9606", "ribozyme"), ("URS000063164F_9606", "U2_snRNA"), ("URS0000631BD4_9606", "U5_snRNA"), ("URS000064A09E_13616", "vault_RNA"), ("URS00006550DA_10090", "scaRNA"), ("URS000065BB41_7955", "scRNA"), ("URS0000661037_7955", "seryl_tRNA"), ("URS0000675C1B_9796", "U6atac_snRNA"), ("URS000069D7FA_6239", "histidyl_tRNA"), ("URS00006A938C_10090", "RNase_P_RNA"), ("URS00006B14E9_6183", "hammerhead_ribozyme"), ("URS00006B3271_10090", "scaRNA"), ("URS00006BA413_9606", "C_D_box_snoRNA"), ("URS00006C1AB2_9606", "U6atac_snRNA"), ("URS00006C670E_30608", "hammerhead_ribozyme"), ("URS00006CE02F_9606", "C_D_box_snoRNA"), ("URS00006D80BC_9913", "pre_miRNA"), ("URS00006DC8B9_6239", "prolyl_tRNA"), ("URS00006F5B4D_9606", "U4atac_snRNA"), ("URS000070D8C8_9606", "U6atac_snRNA"), ("URS0000714027_9031", "scRNA"), ("URS00007150F8_9913", "pre_miRNA"), ("URS0000715A86_9606", "U4_snRNA"), ("URS000072A167_10141", "Y_RNA"), ("URS0000732D5D_9606", "lincRNA"), ("URS0000734D8F_9606", "snRNA"), ("URS0000759BEC_9606", "lnc_RNA"), ("URS000075A336_9606", "miRNA"), ("URS000075A546_9606", "pre_miRNA"), ("URS000075AD80_9606", "U1_snRNA"), ("URS000075ADBA_9606", "U4atac_snRNA"), ("URS000075BAAE_9606", "U5_snRNA"), ("URS000075C290_9606", "pre_miRNA"), ("URS000075C808_9606", "lincRNA"), ("URS000075CC93_9606", "pre_miRNA"), ("URS000075CD30_9606", "U1_snRNA"), ("URS000075CEC3_9606", "pre_miRNA"), ("URS000075CF25_9913", "pre_miRNA"), ("URS000075D341_9606", "rRNA_5_8S"), ("URS000075D95B_9606", "lnc_RNA"), ("URS000075EF5D_9606", "U12_snRNA"), ("URS0000764CCC_1415657", "RNase_P_RNA"), ("URS000077FBEB_9606", "lnc_RNA"), ("URS00007A9FDC_6239", "transcript"), ("URS00007CD270_1872691", "rRNA_18S"), ("URS00007FD8A3_7227", "lnc_RNA"), ("URS0000808D19_644", "hammerhead_ribozyme"), ("URS0000808D70_1478174", "tmRNA"), ("URS000080DD33_32630", "group_I_intron"), ("URS000080DE76_9606", "rRNA_5_8S"), ("URS000080DFDA_32630", "hammerhead_ribozyme"), ("URS000082AF7D_5699", "guide_RNA"), ("URS000083F182_242161", "seryl_tRNA"), ("URS000086852D_32630", "hammerhead_ribozyme"), ("URS00008B56F3_9606", "lnc_RNA"), ("URS00008E398A_9606", "H_ACA_box_snoRNA"), ("URS00008E39F3_7227", "scaRNA"), # Why flybase disagree? ("URS00008E3A1B_10090", "lnc_RNA"), ("URS000091C11A_9606", "rRNA_28S"), ("URS000092FF0A_9371", "H_ACA_box_snoRNA"), ("URS00009AD661_10090", "lnc_RNA"), ("URS00009BB4DD_10090", "lnc_RNA"), ("URS00009E8F92_885695", "rRNA_16S"), ("URS0000A17B82_640938", "sequence_feature"), ("URS0000A767C0_3702", "lnc_RNA"), ("URS0000A770BD_3702", "U6atac_snRNA"), ("URS0000A7BB1C_9606", "U4_snRNA"), ("URS0000A85A32_10090", "seryl_tRNA"), ("URS0000A86584_10090", "lnc_RNA"), ("URS0000A8F612_9371", "scaRNA"), ("URS0000A96391_9606", "U4_snRNA"), ("URS0000A994FE_9606", "ncRNA"), ("URS0000ABD7E8_9606", "rRNA_primary_transcript"), ("URS0000ABD7E9_9606", "snoRNA_gene"), ("URS0000ABD7EF_9606", "rRNA_28S"), ("URS0000ABD87F_9606", "rRNA_primary_transcript"), ("URS0000ABD8C6_9606", "rRNA_primary_transcript"), ("URS0000BC44FE_9606", "lnc_RNA"), ("URS0000CC4B4C_10090", "lnc_RNA"), ("URS0000D56C46_9606", "pre_miRNA"), ("URS0000D570F5_9606", "lincRNA"), ("URS0000D5C855_9606", "lnc_RNA"), ("URS0000E305C6_573", "ncRNA"), ("URS0000EE1237_5661", "rRNA_18S"), ("URS0000EEE7A7_9606", "SRP_RNA"), ("URS0000EF7A6A_6945", "U12_snRNA"), ("URS0001980FDE_6239", "circular_ncRNA"), ("URS00019AF356_9606", "rRNA_primary_transcript"), ("URS00019BDBE3_9606", "rRNA_primary_transcript"), ("URS0001A7BF9C_85057", "rRNA_18S"), ("URS0001BBAB5E_7227", "rRNA_18S"), ("URS0001BBF240_224308", "tmRNA"), # Known failing # pytest.param('URS0000175007_7227', 'miRNA', marks=pytest.mark.xfail(reason="Inactive sequence")), # pytest.param('URS000060C682_9606', 'vault_RNA', marks=pytest.mark.xfail(reason="Inactive sequence")), ], ) def test_computes_correct_rna_types(rna_id, rna_type): context, data = load_data(rna_id) assert rna_type_of(context, data).so_term.name == rna_type
# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from oslo.config import cfg from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.v2 import attributes from neutron.common import constants as q_const from neutron.common import exceptions as n_exc from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 from neutron.db import dhcp_rpc_base from neutron.db import external_net_db from neutron.db import extraroute_db from neutron.db import l3_agentschedulers_db from neutron.db import l3_gwmode_db from neutron.db import l3_rpc_base from neutron.db import portbindings_db from neutron.db import quota_db # noqa from neutron.db import securitygroups_rpc_base as sg_db_rpc from neutron.extensions import portbindings from neutron.extensions import providernet as provider from neutron import manager from neutron.openstack.common import importutils from neutron.openstack.common import log as logging from neutron.plugins.common import constants as svc_constants from neutron.plugins.common import utils as plugin_utils from neutron.plugins.linuxbridge.common import constants from neutron.plugins.linuxbridge.db import l2network_db_v2 as db LOG = logging.getLogger(__name__) class LinuxBridgeRpcCallbacks(n_rpc.RpcCallback, dhcp_rpc_base.DhcpRpcCallbackMixin, l3_rpc_base.L3RpcCallbackMixin, sg_db_rpc.SecurityGroupServerRpcCallbackMixin ): # history # 1.1 Support Security Group RPC RPC_API_VERSION = '1.1' # Device names start with "tap" TAP_PREFIX_LEN = 3 @classmethod def get_port_from_device(cls, device): port = db.get_port_from_device(device[cls.TAP_PREFIX_LEN:]) if port: port['device'] = device return port def get_device_details(self, rpc_context, **kwargs): """Agent requests device details.""" agent_id = kwargs.get('agent_id') device = kwargs.get('device') LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), {'device': device, 'agent_id': agent_id}) port = self.get_port_from_device(device) if port: binding = db.get_network_binding(db_api.get_session(), port['network_id']) (network_type, segmentation_id) = constants.interpret_vlan_id(binding.vlan_id) entry = {'device': device, 'network_type': network_type, 'physical_network': binding.physical_network, 'segmentation_id': segmentation_id, 'network_id': port['network_id'], 'port_id': port['id'], 'admin_state_up': port['admin_state_up']} if cfg.CONF.AGENT.rpc_support_old_agents: entry['vlan_id'] = binding.vlan_id new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up'] else q_const.PORT_STATUS_DOWN) if port['status'] != new_status: db.set_port_status(port['id'], new_status) else: entry = {'device': device} LOG.debug(_("%s can not be found in database"), device) return entry def update_device_down(self, rpc_context, **kwargs): """Device no longer exists on agent.""" # TODO(garyk) - live migration and port status agent_id = kwargs.get('agent_id') device = kwargs.get('device') host = kwargs.get('host') port = self.get_port_from_device(device) LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), {'device': device, 'agent_id': agent_id}) plugin = manager.NeutronManager.get_plugin() if port: entry = {'device': device, 'exists': True} if (host and not plugin.get_port_host(rpc_context, port['id']) == host): LOG.debug(_("Device %(device)s not bound to the" " agent host %(host)s"), {'device': device, 'host': host}) elif port['status'] != q_const.PORT_STATUS_DOWN: # Set port status to DOWN db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN) else: entry = {'device': device, 'exists': False} LOG.debug(_("%s can not be found in database"), device) return entry def update_device_up(self, rpc_context, **kwargs): """Device is up on agent.""" agent_id = kwargs.get('agent_id') device = kwargs.get('device') host = kwargs.get('host') port = self.get_port_from_device(device) LOG.debug(_("Device %(device)s up on %(agent_id)s"), {'device': device, 'agent_id': agent_id}) plugin = manager.NeutronManager.get_plugin() if port: if (host and not plugin.get_port_host(rpc_context, port['id']) == host): LOG.debug(_("Device %(device)s not bound to the" " agent host %(host)s"), {'device': device, 'host': host}) return elif port['status'] != q_const.PORT_STATUS_ACTIVE: db.set_port_status(port['id'], q_const.PORT_STATUS_ACTIVE) else: LOG.debug(_("%s can not be found in database"), device) class AgentNotifierApi(n_rpc.RpcProxy, sg_rpc.SecurityGroupAgentRpcApiMixin): '''Agent side of the linux bridge rpc API. API version history: 1.0 - Initial version. 1.1 - Added get_active_networks_info, create_dhcp_port, and update_dhcp_port methods. ''' BASE_RPC_API_VERSION = '1.1' def __init__(self, topic): super(AgentNotifierApi, self).__init__( topic=topic, default_version=self.BASE_RPC_API_VERSION) self.topic = topic self.topic_network_delete = topics.get_topic_name(topic, topics.NETWORK, topics.DELETE) self.topic_port_update = topics.get_topic_name(topic, topics.PORT, topics.UPDATE) def network_delete(self, context, network_id): self.fanout_cast(context, self.make_msg('network_delete', network_id=network_id), topic=self.topic_network_delete) def port_update(self, context, port, physical_network, vlan_id): network_type, segmentation_id = constants.interpret_vlan_id(vlan_id) kwargs = {'port': port, 'network_type': network_type, 'physical_network': physical_network, 'segmentation_id': segmentation_id} if cfg.CONF.AGENT.rpc_support_old_agents: kwargs['vlan_id'] = vlan_id msg = self.make_msg('port_update', **kwargs) self.fanout_cast(context, msg, topic=self.topic_port_update) class LinuxBridgePluginV2(db_base_plugin_v2.NeutronDbPluginV2, external_net_db.External_net_db_mixin, extraroute_db.ExtraRoute_db_mixin, l3_gwmode_db.L3_NAT_db_mixin, sg_db_rpc.SecurityGroupServerRpcMixin, l3_agentschedulers_db.L3AgentSchedulerDbMixin, agentschedulers_db.DhcpAgentSchedulerDbMixin, portbindings_db.PortBindingMixin): """Implement the Neutron abstractions using Linux bridging. A new VLAN is created for each network. An agent is relied upon to perform the actual Linux bridge configuration on each host. The provider extension is also supported. As discussed in https://bugs.launchpad.net/neutron/+bug/1023156, this class could be simplified, and filtering on extended attributes could be handled, by adding support for extended attributes to the NeutronDbPluginV2 base class. When that occurs, this class should be updated to take advantage of it. The port binding extension enables an external application relay information to and from the plugin. """ # This attribute specifies whether the plugin supports or not # bulk/pagination/sorting operations. Name mangling is used in # order to ensure it is qualified by class __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True _supported_extension_aliases = ["provider", "external-net", "router", "ext-gw-mode", "binding", "quotas", "security-group", "agent", "extraroute", "l3_agent_scheduler", "dhcp_agent_scheduler"] @property def supported_extension_aliases(self): if not hasattr(self, '_aliases'): aliases = self._supported_extension_aliases[:] sg_rpc.disable_security_group_extension_by_config(aliases) self._aliases = aliases return self._aliases def __init__(self): super(LinuxBridgePluginV2, self).__init__() self.base_binding_dict = { portbindings.VIF_TYPE: portbindings.VIF_TYPE_BRIDGE, portbindings.VIF_DETAILS: { # TODO(rkukura): Replace with new VIF security details portbindings.CAP_PORT_FILTER: 'security-group' in self.supported_extension_aliases}} self._parse_network_vlan_ranges() db.sync_network_states(self.network_vlan_ranges) self.tenant_network_type = cfg.CONF.VLANS.tenant_network_type if self.tenant_network_type not in [svc_constants.TYPE_LOCAL, svc_constants.TYPE_VLAN, svc_constants.TYPE_NONE]: LOG.error(_("Invalid tenant_network_type: %s. " "Service terminated!"), self.tenant_network_type) sys.exit(1) self._setup_rpc() self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) self.router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver ) LOG.debug(_("Linux Bridge Plugin initialization complete")) def _setup_rpc(self): # RPC support self.service_topics = {svc_constants.CORE: topics.PLUGIN, svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} self.conn = n_rpc.create_connection(new=True) self.endpoints = [LinuxBridgeRpcCallbacks(), agents_db.AgentExtRpcCallback()] for svc_topic in self.service_topics.values(): self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) # Consume from all consumers in threads self.conn.consume_in_threads() self.notifier = AgentNotifierApi(topics.AGENT) self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( l3_rpc_agent_api.L3AgentNotifyAPI() ) def _parse_network_vlan_ranges(self): try: self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( cfg.CONF.VLANS.network_vlan_ranges) except Exception as ex: LOG.error(_("%s. Agent terminated!"), ex) sys.exit(1) LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) def _add_network_vlan_range(self, physical_network, vlan_min, vlan_max): self._add_network(physical_network) self.network_vlan_ranges[physical_network].append((vlan_min, vlan_max)) def _add_network(self, physical_network): if physical_network not in self.network_vlan_ranges: self.network_vlan_ranges[physical_network] = [] def _extend_network_dict_provider(self, context, network): binding = db.get_network_binding(context.session, network['id']) if binding.vlan_id == constants.FLAT_VLAN_ID: network[provider.NETWORK_TYPE] = svc_constants.TYPE_FLAT network[provider.PHYSICAL_NETWORK] = binding.physical_network network[provider.SEGMENTATION_ID] = None elif binding.vlan_id == constants.LOCAL_VLAN_ID: network[provider.NETWORK_TYPE] = svc_constants.TYPE_LOCAL network[provider.PHYSICAL_NETWORK] = None network[provider.SEGMENTATION_ID] = None else: network[provider.NETWORK_TYPE] = svc_constants.TYPE_VLAN network[provider.PHYSICAL_NETWORK] = binding.physical_network network[provider.SEGMENTATION_ID] = binding.vlan_id def _process_provider_create(self, context, attrs): network_type = attrs.get(provider.NETWORK_TYPE) physical_network = attrs.get(provider.PHYSICAL_NETWORK) segmentation_id = attrs.get(provider.SEGMENTATION_ID) network_type_set = attributes.is_attr_set(network_type) physical_network_set = attributes.is_attr_set(physical_network) segmentation_id_set = attributes.is_attr_set(segmentation_id) if not (network_type_set or physical_network_set or segmentation_id_set): return (None, None, None) if not network_type_set: msg = _("provider:network_type required") raise n_exc.InvalidInput(error_message=msg) elif network_type == svc_constants.TYPE_FLAT: if segmentation_id_set: msg = _("provider:segmentation_id specified for flat network") raise n_exc.InvalidInput(error_message=msg) else: segmentation_id = constants.FLAT_VLAN_ID elif network_type == svc_constants.TYPE_VLAN: if not segmentation_id_set: msg = _("provider:segmentation_id required") raise n_exc.InvalidInput(error_message=msg) if not utils.is_valid_vlan_tag(segmentation_id): msg = (_("provider:segmentation_id out of range " "(%(min_id)s through %(max_id)s)") % {'min_id': q_const.MIN_VLAN_TAG, 'max_id': q_const.MAX_VLAN_TAG}) raise n_exc.InvalidInput(error_message=msg) elif network_type == svc_constants.TYPE_LOCAL: if physical_network_set: msg = _("provider:physical_network specified for local " "network") raise n_exc.InvalidInput(error_message=msg) else: physical_network = None if segmentation_id_set: msg = _("provider:segmentation_id specified for local " "network") raise n_exc.InvalidInput(error_message=msg) else: segmentation_id = constants.LOCAL_VLAN_ID else: msg = _("provider:network_type %s not supported") % network_type raise n_exc.InvalidInput(error_message=msg) if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]: if physical_network_set: if physical_network not in self.network_vlan_ranges: msg = (_("Unknown provider:physical_network %s") % physical_network) raise n_exc.InvalidInput(error_message=msg) elif 'default' in self.network_vlan_ranges: physical_network = 'default' else: msg = _("provider:physical_network required") raise n_exc.InvalidInput(error_message=msg) return (network_type, physical_network, segmentation_id) def create_network(self, context, network): (network_type, physical_network, vlan_id) = self._process_provider_create(context, network['network']) session = context.session with session.begin(subtransactions=True): #set up default security groups tenant_id = self._get_tenant_id_for_create( context, network['network']) self._ensure_default_security_group(context, tenant_id) if not network_type: # tenant network network_type = self.tenant_network_type if network_type == svc_constants.TYPE_NONE: raise n_exc.TenantNetworksDisabled() elif network_type == svc_constants.TYPE_VLAN: physical_network, vlan_id = db.reserve_network(session) else: # TYPE_LOCAL vlan_id = constants.LOCAL_VLAN_ID else: # provider network if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]: db.reserve_specific_network(session, physical_network, vlan_id) # no reservation needed for TYPE_LOCAL net = super(LinuxBridgePluginV2, self).create_network(context, network) db.add_network_binding(session, net['id'], physical_network, vlan_id) self._process_l3_create(context, net, network['network']) self._extend_network_dict_provider(context, net) # note - exception will rollback entire transaction return net def update_network(self, context, id, network): provider._raise_if_updates_provider_attributes(network['network']) session = context.session with session.begin(subtransactions=True): net = super(LinuxBridgePluginV2, self).update_network(context, id, network) self._process_l3_update(context, net, network['network']) self._extend_network_dict_provider(context, net) return net def delete_network(self, context, id): session = context.session with session.begin(subtransactions=True): binding = db.get_network_binding(session, id) self._process_l3_delete(context, id) super(LinuxBridgePluginV2, self).delete_network(context, id) if binding.vlan_id != constants.LOCAL_VLAN_ID: db.release_network(session, binding.physical_network, binding.vlan_id, self.network_vlan_ranges) # the network_binding record is deleted via cascade from # the network record, so explicit removal is not necessary self.notifier.network_delete(context, id) def get_network(self, context, id, fields=None): session = context.session with session.begin(subtransactions=True): net = super(LinuxBridgePluginV2, self).get_network(context, id, None) self._extend_network_dict_provider(context, net) return self._fields(net, fields) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): session = context.session with session.begin(subtransactions=True): nets = super(LinuxBridgePluginV2, self).get_networks(context, filters, None, sorts, limit, marker, page_reverse) for net in nets: self._extend_network_dict_provider(context, net) return [self._fields(net, fields) for net in nets] def create_port(self, context, port): session = context.session port_data = port['port'] with session.begin(subtransactions=True): self._ensure_default_security_group_on_port(context, port) sgids = self._get_security_groups_on_port(context, port) # Set port status as 'DOWN'. This will be updated by agent port['port']['status'] = q_const.PORT_STATUS_DOWN port = super(LinuxBridgePluginV2, self).create_port(context, port) self._process_portbindings_create_and_update(context, port_data, port) self._process_port_create_security_group( context, port, sgids) self.notify_security_groups_member_updated(context, port) return port def update_port(self, context, id, port): original_port = self.get_port(context, id) session = context.session need_port_update_notify = False with session.begin(subtransactions=True): updated_port = super(LinuxBridgePluginV2, self).update_port( context, id, port) self._process_portbindings_create_and_update(context, port['port'], updated_port) need_port_update_notify = self.update_security_group_on_port( context, id, port, original_port, updated_port) need_port_update_notify |= self.is_security_group_member_updated( context, original_port, updated_port) if original_port['admin_state_up'] != updated_port['admin_state_up']: need_port_update_notify = True if need_port_update_notify: self._notify_port_updated(context, updated_port) return updated_port def delete_port(self, context, id, l3_port_check=True): # if needed, check to see if this is a port owned by # and l3-router. If so, we should prevent deletion. if l3_port_check: self.prevent_l3_port_deletion(context, id) session = context.session with session.begin(subtransactions=True): self.disassociate_floatingips(context, id) port = self.get_port(context, id) self._delete_port_security_group_bindings(context, id) super(LinuxBridgePluginV2, self).delete_port(context, id) self.notify_security_groups_member_updated(context, port) def _notify_port_updated(self, context, port): binding = db.get_network_binding(context.session, port['network_id']) self.notifier.port_update(context, port, binding.physical_network, binding.vlan_id)
from __future__ import unicode_literals from urllib import urlencode from cStringIO import StringIO from appointments.unicsv import UnicodeCSVReader from django.contrib.auth.models import Permission from django.core.urlresolvers import reverse from ..models import Appointment from .base import AppointmentDataTestCase __all__ = ['AppointmentListViewTestCase', 'AppointmentExportViewTestCase'] class AppointmentViewTestCase(AppointmentDataTestCase): url_name = None perm_names = [] url_args = [] url_kwargs = {} get_kwargs = {} def setUp(self): super(AppointmentViewTestCase, self).setUp() self.username = 'testuser' self.password = 'password' self.permissions = self.get_permissions() self.user = self.create_user(self.username, self.password, user_permissions=self.permissions) self.client.login(username=self.username, password=self.password) def get_permissions(self, perm_names=None): """Returns a list of Permission objects corresponding to perm_names.""" perm_names = perm_names if perm_names is not None else self.perm_names return [Permission.objects.filter(content_type__app_label=app_label, codename=codename)[0] for app_label, codename in perm_names] def _url(self, url_name=None, url_args=None, url_kwargs=None, get_kwargs=None): url_name = url_name or self.url_name url_args = self.url_args if url_args is None else url_args url_kwargs = self.url_kwargs if url_kwargs is None else url_kwargs get_kwargs = self.get_kwargs if get_kwargs is None else get_kwargs url = reverse(url_name, args=url_args, kwargs=url_kwargs) if get_kwargs: url = '{0}?{1}'.format(url, urlencode(get_kwargs)) return url def _get(self, url_name=None, url_args=None, url_kwargs=None, get_kwargs=None, url=None, *args, **kwargs): """Convenience wrapper for self.client.get. If url is not given, it is built using url_name, url_args, and url_kwargs. Get parameters may be added from get_kwargs. """ url = url or self._url(url_name, url_args, url_kwargs, get_kwargs) return self.client.get(url, *args, **kwargs) class AppointmentListViewTestCase(AppointmentViewTestCase): url_name = 'appointment_list' perm_names = [('appointments', 'view_appointment')] def _extract(self, response): """Extract the information we're interested in from the context.""" form = response.context['form'] queryset = response.context['table'].data.queryset return queryset, form def test_no_permission(self): """Permission is required to get the Appointment list page.""" self.user.user_permissions.all().delete() response = self._get() self.assertEquals(response.status_code, 302) # redirect to login def test_no_appointments(self): """Retrieve the Appointment list when there are no appointments.""" Appointment.objects.all().delete() response = self._get() self.assertEquals(response.status_code, 200) queryset, form = self._extract(response) self.assertEquals(queryset.count(), 0) def test_appointment(self): """Retrieve the Appointment list when there is one Appointment.""" report = self.create_appointment() response = self._get() self.assertEquals(response.status_code, 200) queryset, form = self._extract(response) self.assertEquals(queryset.count(), 1) self.assertEquals(queryset.get(), report) def test_pagination(self): """The reports list should show 10 items per page.""" for i in range(11): self.create_appointment() response = self._get(get_kwargs={'page': 2}) self.assertEquals(response.status_code, 200) queryset, form = self._extract(response) self.assertEquals(queryset.count(), 11) page = response.context['table'].page self.assertEquals(page.object_list.data.count(), 1) def test_filter_subscription_timeline(self): """Reports should be filtered by timeline.""" timeline = self.create_timeline() subscription = self.create_timeline_subscription(timeline=timeline) appt = self.create_appointment(subscription=subscription) self.create_appointment() response = self._get(get_kwargs={'subscription__timeline': timeline.id}) self.assertEquals(response.status_code, 200) queryset, form = self._extract(response) self.assertEquals(queryset.count(), 1) self.assertEquals(queryset.get(), appt) def test_filter_bad_subscription_timeline(self): """Form does no validation on timeline, but no results returned.""" timeline = self.create_timeline() subscription = self.create_timeline_subscription(timeline=timeline) self.create_appointment(subscription=subscription) response = self._get(get_kwargs={'subscription__timeline': 10}) self.assertEquals(response.status_code, 200) queryset, form = self._extract(response) self.assertEquals(queryset.count(), 0) self.assertTrue('subscription__timeline' in form.errors) def test_filter_status(self): """Reports should be filtered by status.""" params = {'status': Appointment.STATUS_MISSED} appt = self.create_appointment(**params) self.create_appointment() response = self._get(get_kwargs=params) self.assertEquals(response.status_code, 200) queryset, form = self._extract(response) self.assertEquals(queryset.count(), 1) self.assertEquals(queryset.get(), appt) def test_filter_bad_status(self): """Form has error & no results returned if invalid status is given.""" self.create_appointment() response = self._get(get_kwargs={'status': 7}) self.assertEquals(response.status_code, 200) queryset, form = self._extract(response) self.assertEquals(queryset.count(), 0) self.assertTrue('status' in form.errors) class AppointmentExportViewTestCase(AppointmentViewTestCase): url_name = 'csv_appointment_list' perm_names = [('appointments', 'view_appointment')] def _extract(self, response): reader = UnicodeCSVReader(StringIO(response.content)) return [line for line in reader] def _check_appointment(self, response, *appts): self.assertEquals(response.status_code, 200) csv = self._extract(response) self.assertEquals(len(csv), 1 + len(appts)) # include headers row num_columns = 8 headers, data = csv[0], csv[1:] self.assertEquals(len(headers), num_columns) for line in data: self.assertEquals(len(line), num_columns) def test_no_permissions(self): """Permission is required to export a Appointment list.""" self.user.user_permissions.all().delete() response = self._get() self.assertEquals(response.status_code, 302) # redirect to login def test_no_appointments(self): """Export reports list when there are no reports.""" Appointment.objects.all().delete() response = self._get() self._check_appointment(response) def test_appointment(self): """Export reports list when there is one Appointment.""" report = self.create_appointment() response = self._get() self._check_appointment(response, report) def test_filter_subscription_timeline(self): """Reports export should be filtered by timeline.""" timeline = self.create_timeline() subscription = self.create_timeline_subscription(timeline=timeline) appt = self.create_appointment(subscription=subscription) self.create_appointment() response = self._get(get_kwargs={'subscription__timeline': timeline.id}) self._check_appointment(response, appt) def test_filter_bad_subscription_timeline(self): """Invalid status causes redirect to regular list view.""" self.create_appointment() response = self._get(get_kwargs={'status': 'bad'}, follow=True) correct_url = reverse('appointment_list') + '?status=bad' self.assertRedirects(response, correct_url) queryset = response.context['table'].data.queryset form = response.context['form'] self.assertEquals(queryset.count(), 0) self.assertTrue('status' in form.errors) def test_filter_status(self): """Reports export should be filtered by status.""" params = {'status': Appointment.STATUS_MISSED} appt = self.create_appointment(**params) self.create_appointment() response = self._get(get_kwargs=params) self._check_appointment(response, appt) def test_filter_bad_status(self): """Invalid status causes redirect to regular list view.""" self.create_appointment() response = self._get(get_kwargs={'status': 'bad'}, follow=True) correct_url = reverse('appointment_list') + '?status=bad' self.assertRedirects(response, correct_url) queryset = response.context['table'].data.queryset form = response.context['form'] self.assertEquals(queryset.count(), 0) self.assertTrue('status' in form.errors)
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np import sys import math import paddle.fluid as fluid from op_test import OpTest def generate_proposal_labels_in_python(rpn_rois, gt_classes, is_crowd, gt_boxes, im_info, batch_size_per_im, fg_fraction, fg_thresh, bg_thresh_hi, bg_thresh_lo, bbox_reg_weights, class_nums): rois = [] labels_int32 = [] bbox_targets = [] bbox_inside_weights = [] bbox_outside_weights = [] lod = [] assert len(rpn_rois) == len( im_info), 'batch size of rpn_rois and ground_truth is not matched' for im_i in range(len(im_info)): frcn_blobs = _sample_rois( rpn_rois[im_i], gt_classes[im_i], is_crowd[im_i], gt_boxes[im_i], im_info[im_i], batch_size_per_im, fg_fraction, fg_thresh, bg_thresh_hi, bg_thresh_lo, bbox_reg_weights, class_nums) lod.append(frcn_blobs['rois'].shape[0]) rois.append(frcn_blobs['rois']) labels_int32.append(frcn_blobs['labels_int32']) bbox_targets.append(frcn_blobs['bbox_targets']) bbox_inside_weights.append(frcn_blobs['bbox_inside_weights']) bbox_outside_weights.append(frcn_blobs['bbox_outside_weights']) return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights, lod def _sample_rois(rpn_rois, gt_classes, is_crowd, gt_boxes, im_info, batch_size_per_im, fg_fraction, fg_thresh, bg_thresh_hi, bg_thresh_lo, bbox_reg_weights, class_nums): rois_per_image = int(batch_size_per_im) fg_rois_per_im = int(np.round(fg_fraction * rois_per_image)) # Roidb im_scale = im_info[2] inv_im_scale = 1. / im_scale rpn_rois = rpn_rois * inv_im_scale boxes = np.vstack([gt_boxes, rpn_rois]) gt_overlaps = np.zeros((boxes.shape[0], class_nums)) box_to_gt_ind_map = np.zeros((boxes.shape[0]), dtype=np.int32) if len(gt_boxes) > 0: proposal_to_gt_overlaps = _bbox_overlaps(boxes, gt_boxes) overlaps_argmax = proposal_to_gt_overlaps.argmax(axis=1) overlaps_max = proposal_to_gt_overlaps.max(axis=1) # Boxes which with non-zero overlap with gt boxes overlapped_boxes_ind = np.where(overlaps_max > 0)[0] overlapped_boxes_gt_classes = gt_classes[overlaps_argmax[ overlapped_boxes_ind]] gt_overlaps[overlapped_boxes_ind, overlapped_boxes_gt_classes] = overlaps_max[ overlapped_boxes_ind] box_to_gt_ind_map[overlapped_boxes_ind] = overlaps_argmax[ overlapped_boxes_ind] crowd_ind = np.where(is_crowd)[0] gt_overlaps[crowd_ind] = -1 max_overlaps = gt_overlaps.max(axis=1) max_classes = gt_overlaps.argmax(axis=1) # Foreground fg_inds = np.where(max_overlaps >= fg_thresh)[0] fg_rois_per_this_image = np.minimum(fg_rois_per_im, fg_inds.shape[0]) # Sample foreground if there are too many # if fg_inds.shape[0] > fg_rois_per_this_image: # fg_inds = np.random.choice( # fg_inds, size=fg_rois_per_this_image, replace=False) fg_inds = fg_inds[:fg_rois_per_this_image] # Background bg_inds = np.where((max_overlaps < bg_thresh_hi) & (max_overlaps >= bg_thresh_lo))[0] bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_inds.shape[0]) # Sample background if there are too many # if bg_inds.shape[0] > bg_rois_per_this_image: # bg_inds = np.random.choice( # bg_inds, size=bg_rois_per_this_image, replace=False) bg_inds = bg_inds[:bg_rois_per_this_image] keep_inds = np.append(fg_inds, bg_inds) sampled_labels = max_classes[keep_inds] sampled_labels[fg_rois_per_this_image:] = 0 sampled_boxes = boxes[keep_inds] sampled_gts = gt_boxes[box_to_gt_ind_map[keep_inds]] sampled_gts[fg_rois_per_this_image:, :] = gt_boxes[0] bbox_label_targets = _compute_targets(sampled_boxes, sampled_gts, sampled_labels, bbox_reg_weights) bbox_targets, bbox_inside_weights = _expand_bbox_targets(bbox_label_targets, class_nums) bbox_outside_weights = np.array( bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype) # Scale rois sampled_rois = sampled_boxes * im_scale # Faster RCNN blobs frcn_blobs = dict( rois=sampled_rois, labels_int32=sampled_labels, bbox_targets=bbox_targets, bbox_inside_weights=bbox_inside_weights, bbox_outside_weights=bbox_outside_weights) return frcn_blobs def _bbox_overlaps(roi_boxes, gt_boxes): w1 = np.maximum(roi_boxes[:, 2] - roi_boxes[:, 0] + 1, 0) h1 = np.maximum(roi_boxes[:, 3] - roi_boxes[:, 1] + 1, 0) w2 = np.maximum(gt_boxes[:, 2] - gt_boxes[:, 0] + 1, 0) h2 = np.maximum(gt_boxes[:, 3] - gt_boxes[:, 1] + 1, 0) area1 = w1 * h1 area2 = w2 * h2 overlaps = np.zeros((roi_boxes.shape[0], gt_boxes.shape[0])) for ind1 in range(roi_boxes.shape[0]): for ind2 in range(gt_boxes.shape[0]): inter_x1 = np.maximum(roi_boxes[ind1, 0], gt_boxes[ind2, 0]) inter_y1 = np.maximum(roi_boxes[ind1, 1], gt_boxes[ind2, 1]) inter_x2 = np.minimum(roi_boxes[ind1, 2], gt_boxes[ind2, 2]) inter_y2 = np.minimum(roi_boxes[ind1, 3], gt_boxes[ind2, 3]) inter_w = np.maximum(inter_x2 - inter_x1 + 1, 0) inter_h = np.maximum(inter_y2 - inter_y1 + 1, 0) inter_area = inter_w * inter_h iou = inter_area / (area1[ind1] + area2[ind2] - inter_area) overlaps[ind1, ind2] = iou return overlaps def _compute_targets(roi_boxes, gt_boxes, labels, bbox_reg_weights): assert roi_boxes.shape[0] == gt_boxes.shape[0] assert roi_boxes.shape[1] == 4 assert gt_boxes.shape[1] == 4 targets = np.zeros(roi_boxes.shape) bbox_reg_weights = np.asarray(bbox_reg_weights) targets = _box_to_delta( ex_boxes=roi_boxes, gt_boxes=gt_boxes, weights=bbox_reg_weights) return np.hstack([labels[:, np.newaxis], targets]).astype( np.float32, copy=False) def _box_to_delta(ex_boxes, gt_boxes, weights): ex_w = ex_boxes[:, 2] - ex_boxes[:, 0] + 1 ex_h = ex_boxes[:, 3] - ex_boxes[:, 1] + 1 ex_ctr_x = ex_boxes[:, 0] + 0.5 * ex_w ex_ctr_y = ex_boxes[:, 1] + 0.5 * ex_h gt_w = gt_boxes[:, 2] - gt_boxes[:, 0] + 1 gt_h = gt_boxes[:, 3] - gt_boxes[:, 1] + 1 gt_ctr_x = gt_boxes[:, 0] + 0.5 * gt_w gt_ctr_y = gt_boxes[:, 1] + 0.5 * gt_h dx = (gt_ctr_x - ex_ctr_x) / ex_w / weights[0] dy = (gt_ctr_y - ex_ctr_y) / ex_h / weights[1] dw = (np.log(gt_w / ex_w)) / weights[2] dh = (np.log(gt_h / ex_h)) / weights[3] targets = np.vstack([dx, dy, dw, dh]).transpose() return targets def _expand_bbox_targets(bbox_targets_input, class_nums): class_labels = bbox_targets_input[:, 0] fg_inds = np.where(class_labels > 0)[0] bbox_targets = np.zeros((class_labels.shape[0], 4 * class_nums)) bbox_inside_weights = np.zeros(bbox_targets.shape) for ind in fg_inds: class_label = int(class_labels[ind]) start_ind = class_label * 4 end_ind = class_label * 4 + 4 bbox_targets[ind, start_ind:end_ind] = bbox_targets_input[ind, 1:] bbox_inside_weights[ind, start_ind:end_ind] = (1.0, 1.0, 1.0, 1.0) return bbox_targets, bbox_inside_weights class TestGenerateProposalLabelsOp(OpTest): def set_data(self): self.init_test_params() self.init_test_input() self.init_test_output() self.inputs = { 'RpnRois': (self.rpn_rois[0], self.rpn_rois_lod), 'GtClasses': (self.gt_classes[0], self.gts_lod), 'IsCrowd': (self.is_crowd[0], self.gts_lod), 'GtBoxes': (self.gt_boxes[0], self.gts_lod), 'ImInfo': self.im_info } self.attrs = { 'batch_size_per_im': self.batch_size_per_im, 'fg_fraction': self.fg_fraction, 'fg_thresh': self.fg_thresh, 'bg_thresh_hi': self.bg_thresh_hi, 'bg_thresh_lo': self.bg_thresh_lo, 'bbox_reg_weights': self.bbox_reg_weights, 'class_nums': self.class_nums, 'use_random': False } self.outputs = { 'Rois': (self.rois, [self.lod]), 'LabelsInt32': (self.labels_int32, [self.lod]), 'BboxTargets': (self.bbox_targets, [self.lod]), 'BboxInsideWeights': (self.bbox_inside_weights, [self.lod]), 'BboxOutsideWeights': (self.bbox_outside_weights, [self.lod]), } def test_check_output(self): self.check_output() def setUp(self): self.op_type = 'generate_proposal_labels' self.set_data() def init_test_params(self): self.batch_size_per_im = 512 self.fg_fraction = 0.25 self.fg_thresh = 0.5 self.bg_thresh_hi = 0.5 self.bg_thresh_lo = 0.0 self.bbox_reg_weights = [0.1, 0.1, 0.2, 0.2] self.class_nums = 81 def init_test_input(self): np.random.seed(0) gt_nums = 6 # Keep same with batch_size_per_im for unittest proposal_nums = 2000 #self.batch_size_per_im - gt_nums images_shape = [[64, 64]] self.im_info = np.ones((len(images_shape), 3)).astype(np.float32) for i in range(len(images_shape)): self.im_info[i, 0] = images_shape[i][0] self.im_info[i, 1] = images_shape[i][1] self.im_info[i, 2] = 0.8 #scale self.rpn_rois, self.rpn_rois_lod = _generate_proposals(images_shape, proposal_nums) ground_truth, self.gts_lod = _generate_groundtruth( images_shape, self.class_nums, gt_nums) self.gt_classes = [gt['gt_classes'] for gt in ground_truth] self.gt_boxes = [gt['boxes'] for gt in ground_truth] self.is_crowd = [gt['is_crowd'] for gt in ground_truth] def init_test_output(self): self.rois, self.labels_int32, self.bbox_targets, \ self.bbox_inside_weights, self.bbox_outside_weights, \ self.lod = generate_proposal_labels_in_python( self.rpn_rois, self.gt_classes, self.is_crowd, self.gt_boxes, self.im_info, self.batch_size_per_im, self.fg_fraction, self.fg_thresh, self.bg_thresh_hi, self.bg_thresh_lo, self.bbox_reg_weights, self.class_nums ) self.rois = np.vstack(self.rois) self.labels_int32 = np.hstack(self.labels_int32) self.labels_int32 = self.labels_int32[:, np.newaxis] self.bbox_targets = np.vstack(self.bbox_targets) self.bbox_inside_weights = np.vstack(self.bbox_inside_weights) self.bbox_outside_weights = np.vstack(self.bbox_outside_weights) def _generate_proposals(images_shape, proposal_nums): rpn_rois = [] rpn_rois_lod = [] num_proposals = 0 for i, image_shape in enumerate(images_shape): proposals = _generate_boxes(image_shape, proposal_nums) rpn_rois.append(proposals) num_proposals = len(proposals) rpn_rois_lod.append(num_proposals) return rpn_rois, [rpn_rois_lod] def _generate_groundtruth(images_shape, class_nums, gt_nums): ground_truth = [] gts_lod = [] num_gts = 0 for i, image_shape in enumerate(images_shape): # Avoid background gt_classes = np.random.randint( low=1, high=class_nums, size=gt_nums).astype(np.int32) gt_boxes = _generate_boxes(image_shape, gt_nums) is_crowd = np.zeros((gt_nums), dtype=np.int32) is_crowd[0] = 1 ground_truth.append( dict( gt_classes=gt_classes, boxes=gt_boxes, is_crowd=is_crowd)) num_gts += len(gt_classes) gts_lod.append(num_gts) return ground_truth, [gts_lod] def _generate_boxes(image_size, box_nums): width = image_size[0] height = image_size[1] xywh = np.random.rand(box_nums, 4) xy1 = xywh[:, [0, 1]] * image_size wh = xywh[:, [2, 3]] * (image_size - xy1) xy2 = xy1 + wh boxes = np.hstack([xy1, xy2]) boxes[:, [0, 2]] = np.minimum(width - 1., np.maximum(0., boxes[:, [0, 2]])) boxes[:, [1, 3]] = np.minimum(height - 1., np.maximum(0., boxes[:, [1, 3]])) return boxes.astype(np.float32) if __name__ == '__main__': unittest.main()
import inspect import re from abc import abstractmethod import six from selenium.common.exceptions import InvalidSelectorException, NoSuchElementException from selenium.webdriver.common.by import By WAIT_STALE_ELEMENT_MAX_TRY = 5 WAIT_ELEMENT_TIMEOUT = 0 WAIT_ELEMENT_POLL_FREQUENCY = 0.5 def get_members_safety(cls): # inspect.getmembers calls __get__ method of the field, if exists, that may cause unexpected actions # the solution below does't have this problem return reduce(lambda a, b: dict(a, **vars(b)), reversed(inspect.getmro(cls)), {}).items() class PageElementsContainer(object): """ Classes inheriting PageElementsContainer can use BasePageElement(s) as class attributes with implicit element initialization and searching. If a class inherits PageElementsContainer and doesn't inherit PageElement, than it should have attribute 'driver' with web driver instance. """ def __new__(cls, *args, **kwargs): for k, v in get_members_safety(cls): if isinstance(v, (BasePageElement,)) and v._name is None: v._name = k # noinspection PyArgumentList return super(PageElementsContainer, cls).__new__(cls, *args, **kwargs) def all_elements(self): """returns all public BasePageElements grouped by this element and it parent(s) :rtype: list[(str, BasePageElement)] """ return [(k, getattr(self, k)) for k, v in get_members_safety(self.__class__) if not k.startswith("_") and isinstance(v, (BasePageElement,))] class BasePageElement(object): """ Base class to describe page object element. """ def __init__(self, selector, name=None, timeout=None, cached=True): self.__cached__ = cached self._locator = build_locator(selector) self._name = name self._owner = None self._parent = None """ :type: FindOverride """ self.__timeout = timeout self._w3c = False def _fill_owner(self, owner): # _parent and _id field are native for Selenium WebDriver WebElement # and should be used in this way for clear and through work with PageElement as WebElement if hasattr(owner, "parent"): self._parent = owner.parent self._owner = owner else: if not hasattr(owner, "driver"): raise TypeError("class {0} doesn't have 'driver' attribute.\n" "Class uses page element(s) should inherit " "PageElement or has 'driver' attribute.".format(type(owner).__name__)) self._parent = owner.driver self._owner = owner.driver self._w3c = getattr(self._parent, "w3c", False) # noinspection PyUnusedLocal def __get__(self, owner, cls): self._fill_owner(owner) return self @abstractmethod def reload(self): pass @property def wait_timeout(self): return WAIT_ELEMENT_TIMEOUT if self.__timeout is None else self.__timeout @wait_timeout.setter def wait_timeout(self, value): self.__timeout = value if isinstance(value, (float, int)) and value >= 0 else None @property def name(self): return self._name or "({}:{})".format(*self._locator) def define_selector(by, value, el_class): """ :param by: :param value: :param el_class: :rtype: tuple[type, str|tuple[str, str]] :return: """ el = el_class selector = by if isinstance(value, six.string_types): selector = (by, value) elif value is not None: el = value if el is None: el = elements.PageElement return el, selector class FindOverride(object): def child_element(self, by=By.ID, value=None, el_class=None): """ Doesn't rise NoSuchElementException in case if there are no element with the selector. In this case ``exists()`` and ``is_displayed()`` methods of the element will return *False*. Attempt to call any other method supposed to interact with browser will raise NoSuchElementException. usages with ``'one string'`` selector: - find_element(by: str) -> PageElement - find_element(by: str, value: T <= PageElement) -> T usages with ``'webdriver'`` By selector - find_element(by: str, value: str) -> PageElement - find_element(by: str, value: str, el_class: T <= PageElement) -> T :type by: str :param by: :type value: str | T <= PageElement :param value: :type el_class: T <= PageElement :param el_class: :rtype: T <= PageElement :return: """ el, selector = define_selector(by, value, el_class) return self._init_element(el(selector)) def child_elements(self, by=By.ID, value=None, el_class=None): """ alias for ``find_elements`` :param by: :param value: :param el_class: :return: """ el, selector = define_selector(by, value, el_class) return self._init_element(elements.PageElementsList(selector, el)) def find_element(self, by=By.ID, value=None, el_class=None): """ usages with ``'one string'`` selector: - find_element(by: str) -> PageElement - find_element(by: str, value: T <= PageElement) -> T usages with ``'webdriver'`` By selector - find_element(by: str, value: str) -> PageElement - find_element(by: str, value: str, el_class: T <= PageElement) -> T :type by: str :param by: :type value: str | T <= PageElement :param value: :type el_class: T <= PageElement :param el_class: :rtype: T <= PageElement :return: """ el = self.child_element(by, value, el_class) el.reload() return el def find_elements(self, by=By.ID, value=None, el_class=None): """ usages with ``'one string'`` selector: - find_elements(by: str) -> PageElementsList[ListElement] - find_elements(by: str, value: T <= ListElement) -> PageElementsList[T] usages with ``'webdriver'`` By selector - find_elements(by: str, value: str) -> PageElementsList[ListElement] - find_elements(by: str, value: str, el_class: T <= ListElement) -> PageElementsList[T] :type by: str :param by: :type value: str | T <= ListElement :param value: :type el_class: T <= ListElement :param el_class: :rtype: PageElementsList[T | ListElement] :return: """ els = self.child_elements(by, value, el_class) els.reload() return els def _init_element(self, element): # noinspection PyProtectedMember element._fill_owner(self) return element import elements def _stats_with(prefix): return lambda s: s.startswith(prefix) selectors = [ (re.compile(r"^\w+$").match, By.TAG_NAME, None), (re.compile(r"^\.-?[_a-zA-Z]+[_a-zA-Z0-9-]*$").match, By.CLASS_NAME, 1), (re.compile(r"^#[A-Za-z]+[:._a-zA-Z0-9-]*$").match, By.ID, 1), (re.compile(r"^@[A-Za-z]+[:._a-zA-Z0-9-]*$").match, By.NAME, 1), (re.compile(r"^(\./|//).+").match, By.XPATH, None), (_stats_with("$x:"), By.XPATH, 3), (_stats_with("$link_text:"), By.LINK_TEXT, 11), (_stats_with("$partial_link_text:"), By.PARTIAL_LINK_TEXT, 19), (re.compile(r"^(\*|\.|#|[\w-]|\[|:).*").match, By.CSS_SELECTOR, None) ] def build_locator(selector): """ - ID = "#valid_id" - CLASS_NAME = ".valid_class_name" - TAG_NAME = "valid_tag_name" - XPATH = start with "./" or "//" or "$x:" - LINK_TEXT = start with "$link_text:" - PARTIAL_LINK_TEXT = start with "$partial_link_text:" - NAME = "@valid_name_attribute_value" CSS_SELECTOR = all other that starts with *|.|#|[\w-]|\[|: :type selector: str|tuple :param selector: :rtype: tuple[selenium.webdriver.common.by.By, str] :return: """ if type(selector) is tuple: return selector if not isinstance(selector, six.string_types): raise InvalidSelectorException("Invalid locator values passed in") s = selector.strip() for test, by, index in selectors: if test(s): return by, s[index:] raise InvalidSelectorException("Invalid locator values passed in: {}".format(selector)) def find(owner, locator): try: return super(FindOverride, owner).find_element(*locator) except NoSuchElementException: return False
######################################################################################## # Davi Frossard, 2016 # # VGG16 implementation in TensorFlow # # Details: # # http://www.cs.toronto.edu/~frossard/post/vgg16/ # # # # Model from https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md # # Weights from Caffe converted using https://github.com/ethereon/caffe-tensorflow # ######################################################################################## import tensorflow as tf import numpy as np from scipy.misc import imread, imresize from imagenet_classes import class_names class vgg16: def __init__(self, imgs, weights=None, sess=None): self.imgs = imgs tf.summary.image("imgs", self.imgs) self.convlayers() self.fc_layers() self.probs = tf.nn.softmax(self.fc3l) if weights is not None and sess is not None: self.load_weights(weights, sess) self.my_summaries = tf.summary.merge_all() self.my_writer = tf.summary.FileWriter('tb_files', sess.graph) def convlayers(self): self.parameters = [] # zero-mean input with tf.name_scope('preprocess') as scope: mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean') images = self.imgs-mean # conv1_1 with tf.name_scope('conv1_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv1_1 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # conv1_2 with tf.name_scope('conv1_2') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv1_2 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # pool1 self.pool1 = tf.nn.max_pool(self.conv1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') # conv2_1 with tf.name_scope('conv2_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv2_1 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # conv2_2 with tf.name_scope('conv2_2') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(self.conv2_1, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv2_2 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # pool2 self.pool2 = tf.nn.max_pool(self.conv2_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2') # conv3_1 with tf.name_scope('conv3_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv3_1 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # conv3_2 with tf.name_scope('conv3_2') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(self.conv3_1, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv3_2 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # conv3_3 with tf.name_scope('conv3_3') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(self.conv3_2, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv3_3 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # pool3 self.pool3 = tf.nn.max_pool(self.conv3_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3') # conv4_1 with tf.name_scope('conv4_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv4_1 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # conv4_2 with tf.name_scope('conv4_2') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(self.conv4_1, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv4_2 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # conv4_3 with tf.name_scope('conv4_3') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(self.conv4_2, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv4_3 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # pool4 self.pool4 = tf.nn.max_pool(self.conv4_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4') # conv5_1 with tf.name_scope('conv5_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv5_1 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # conv5_2 with tf.name_scope('conv5_2') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(self.conv5_1, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv5_2 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # conv5_3 with tf.name_scope('conv5_3') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(self.conv5_2, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases') out = tf.nn.bias_add(conv, biases) self.conv5_3 = tf.nn.relu(out, name=scope) self.parameters += [kernel, biases] # pool5 self.pool5 = tf.nn.max_pool(self.conv5_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4') def fc_layers(self): # fc1 with tf.name_scope('fc1') as scope: shape = int(np.prod(self.pool5.get_shape()[1:])) fc1w = tf.Variable(tf.truncated_normal([shape, 4096], dtype=tf.float32, stddev=1e-1), name='weights') fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32), trainable=True, name='biases') pool5_flat = tf.reshape(self.pool5, [-1, shape]) fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b) self.fc1 = tf.nn.relu(fc1l) self.parameters += [fc1w, fc1b] # fc2 with tf.name_scope('fc2') as scope: fc2w = tf.Variable(tf.truncated_normal([4096, 4096], dtype=tf.float32, stddev=1e-1), name='weights') fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32), trainable=True, name='biases') fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b) self.fc2 = tf.nn.relu(fc2l) self.parameters += [fc2w, fc2b] # fc3 with tf.name_scope('fc3') as scope: fc3w = tf.Variable(tf.truncated_normal([4096, 1000], dtype=tf.float32, stddev=1e-1), name='weights') fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32), trainable=True, name='biases') self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b) self.parameters += [fc3w, fc3b] def load_weights(self, weight_file, sess): weights = np.load(weight_file) keys = sorted(weights.keys()) for i, k in enumerate(keys): print(i, k, np.shape(weights[k])) sess.run(self.parameters[i].assign(weights[k]))
import logging from typing import Dict, List, Optional from great_expectations.core.usage_statistics.anonymizers.anonymizer import Anonymizer from great_expectations.core.usage_statistics.anonymizers.batch_request_anonymizer import ( BatchRequestAnonymizer, ) from great_expectations.core.usage_statistics.anonymizers.expectation_suite_anonymizer import ( ExpectationSuiteAnonymizer, ) from great_expectations.core.usage_statistics.util import ( aggregate_all_core_expectation_types, ) from great_expectations.rule_based_profiler.config.base import RuleBasedProfilerConfig from great_expectations.rule_based_profiler.domain_builder.categorical_column_domain_builder import ( CategoricalColumnDomainBuilder, ) from great_expectations.rule_based_profiler.domain_builder.column_domain_builder import ( ColumnDomainBuilder, ) from great_expectations.rule_based_profiler.domain_builder.domain_builder import ( DomainBuilder, ) from great_expectations.rule_based_profiler.domain_builder.map_metric_column_domain_builder import ( MapMetricColumnDomainBuilder, ) from great_expectations.rule_based_profiler.domain_builder.simple_column_suffix_domain_builder import ( SimpleColumnSuffixDomainBuilder, ) from great_expectations.rule_based_profiler.domain_builder.simple_semantic_type_domain_builder import ( SimpleSemanticTypeColumnDomainBuilder, ) from great_expectations.rule_based_profiler.domain_builder.table_domain_builder import ( TableDomainBuilder, ) from great_expectations.rule_based_profiler.expectation_configuration_builder.default_expectation_configuration_builder import ( DefaultExpectationConfigurationBuilder, ) from great_expectations.rule_based_profiler.expectation_configuration_builder.expectation_configuration_builder import ( ExpectationConfigurationBuilder, ) from great_expectations.rule_based_profiler.parameter_builder.mean_unexpected_map_metric_multi_batch_parameter_builder import ( MeanUnexpectedMapMetricMultiBatchParameterBuilder, ) from great_expectations.rule_based_profiler.parameter_builder.metric_multi_batch_parameter_builder import ( MetricMultiBatchParameterBuilder, ) from great_expectations.rule_based_profiler.parameter_builder.numeric_metric_range_multi_batch_parameter_builder import ( NumericMetricRangeMultiBatchParameterBuilder, ) from great_expectations.rule_based_profiler.parameter_builder.parameter_builder import ( ParameterBuilder, ) from great_expectations.rule_based_profiler.parameter_builder.regex_pattern_string_parameter_builder import ( RegexPatternStringParameterBuilder, ) from great_expectations.rule_based_profiler.parameter_builder.simple_date_format_string_parameter_builder import ( SimpleDateFormatStringParameterBuilder, ) from great_expectations.rule_based_profiler.parameter_builder.value_set_multi_batch_parameter_builder import ( ValueSetMultiBatchParameterBuilder, ) from great_expectations.util import deep_filter_properties_iterable logger = logging.getLogger(__name__) class ProfilerRunAnonymizer(Anonymizer): def __init__(self, salt: Optional[str] = None) -> None: super().__init__(salt=salt) # ordered bottom up in terms of inheritance order self._ge_domain_builders = [ MapMetricColumnDomainBuilder, CategoricalColumnDomainBuilder, SimpleColumnSuffixDomainBuilder, SimpleSemanticTypeColumnDomainBuilder, ColumnDomainBuilder, TableDomainBuilder, DomainBuilder, ] self._ge_parameter_builders = [ MeanUnexpectedMapMetricMultiBatchParameterBuilder, ValueSetMultiBatchParameterBuilder, NumericMetricRangeMultiBatchParameterBuilder, MetricMultiBatchParameterBuilder, RegexPatternStringParameterBuilder, SimpleDateFormatStringParameterBuilder, ParameterBuilder, ] self._ge_expectation_configuration_builders = [ DefaultExpectationConfigurationBuilder, ExpectationConfigurationBuilder, ] self._ge_expectation_types = aggregate_all_core_expectation_types() self._salt = salt self._batch_request_anonymizer = BatchRequestAnonymizer(self._salt) self._expectation_suite_anonymizer = ExpectationSuiteAnonymizer(self._salt) def anonymize_profiler_run(self, profiler_config: RuleBasedProfilerConfig) -> dict: """ Traverse the entire RuleBasedProfiler configuration structure (as per its formal, validated Marshmallow schema) and anonymize every field that can be customized by a user (public fields are recorded as their original names). """ name: str = profiler_config.name anonymized_name: Optional[str] = self.anonymize(name) config_version: float = profiler_config.config_version rules: Dict[str, dict] = profiler_config.rules anonymized_rules: List[dict] = self._anonymize_rules(rules=rules) rule_count: int = len(rules) variables: dict = profiler_config.variables or {} variable_count: int = len(variables) anonymized_profiler_run_properties_dict: dict = { "anonymized_name": anonymized_name, "config_version": config_version, "anonymized_rules": anonymized_rules, "rule_count": rule_count, "variable_count": variable_count, } deep_filter_properties_iterable( properties=anonymized_profiler_run_properties_dict, clean_falsy=True, inplace=True, ) return anonymized_profiler_run_properties_dict def _anonymize_rules(self, rules: Dict[str, dict]) -> List[dict]: anonymized_rules: List[dict] = [] for name, rule in rules.items(): anonymized_rule: dict = self._anonymize_rule(name, rule) anonymized_rules.append(anonymized_rule) logger.debug("Anonymized rule %s", name) return anonymized_rules def _anonymize_rule(self, name: str, rule: dict) -> dict: anonymized_rule: dict = {} anonymized_rule["anonymized_name"] = self.anonymize(name) domain_builder: Optional[dict] = rule.get("domain_builder") if domain_builder is not None: anonymized_rule[ "anonymized_domain_builder" ] = self._anonymize_domain_builder(domain_builder) parameter_builders: List[dict] = rule.get("parameter_builders", []) anonymized_rule[ "anonymized_parameter_builders" ] = self._anonymize_parameter_builders(parameter_builders) expectation_configuration_builders: List[dict] = rule.get( "expectation_configuration_builders", [] ) anonymized_rule[ "anonymized_expectation_configuration_builders" ] = self._anonymize_expectation_configuration_builders( expectation_configuration_builders ) return anonymized_rule def _anonymize_domain_builder(self, domain_builder: dict) -> dict: anonymized_domain_builder: dict = self.anonymize_object_info( object_config=domain_builder, anonymized_info_dict={}, ge_classes=self._ge_domain_builders, runtime_environment={ "module_name": "great_expectations.rule_based_profiler.domain_builder" }, ) batch_request: Optional[dict] = domain_builder.get("batch_request") if batch_request: anonymized_batch_request: Optional[ dict ] = self._batch_request_anonymizer.anonymize_batch_request(**batch_request) anonymized_domain_builder[ "anonymized_batch_request" ] = anonymized_batch_request logger.debug("Anonymized batch request in DomainBuilder") return anonymized_domain_builder def _anonymize_parameter_builders( self, parameter_builders: List[dict] ) -> List[dict]: anonymized_parameter_builders: List[dict] = [] for parameter_builder in parameter_builders: anonymized_parameter_builder: dict = self._anonymize_parameter_builder( parameter_builder ) anonymized_parameter_builders.append(anonymized_parameter_builder) return anonymized_parameter_builders def _anonymize_parameter_builder(self, parameter_builder: dict) -> dict: anonymized_parameter_builder: dict = self.anonymize_object_info( object_config=parameter_builder, anonymized_info_dict={}, ge_classes=self._ge_parameter_builders, runtime_environment={ "module_name": "great_expectations.rule_based_profiler.parameter_builder" }, ) anonymized_parameter_builder["anonymized_name"] = self.anonymize( parameter_builder.get("name") ) batch_request: Optional[dict] = parameter_builder.get("batch_request") if batch_request: anonymized_batch_request: Optional[ dict ] = self._batch_request_anonymizer.anonymize_batch_request(**batch_request) anonymized_parameter_builder[ "anonymized_batch_request" ] = anonymized_batch_request logger.debug("Anonymized batch request in ParameterBuilder") return anonymized_parameter_builder def _anonymize_expectation_configuration_builders( self, expectation_configuration_builders: List[dict] ) -> List[dict]: anonymized_expectation_configuration_builders: List[dict] = [] for expectation_configuration_builder in expectation_configuration_builders: anonymized_expectation_configuration_builder: dict = ( self._anonymize_expectation_configuration_builder( expectation_configuration_builder ) ) anonymized_expectation_configuration_builders.append( anonymized_expectation_configuration_builder ) return anonymized_expectation_configuration_builders def _anonymize_expectation_configuration_builder( self, expectation_configuration_builder: dict ) -> dict: anonymized_expectation_configuration_builder: dict = self.anonymize_object_info( object_config=expectation_configuration_builder, anonymized_info_dict={}, ge_classes=self._ge_expectation_configuration_builders, runtime_environment={ "module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder" }, ) expectation_type: Optional[str] = expectation_configuration_builder.get( "expectation_type" ) self._expectation_suite_anonymizer.anonymize_expectation( expectation_type, anonymized_expectation_configuration_builder ) condition: Optional[str] = expectation_configuration_builder.get("condition") if condition: anonymized_expectation_configuration_builder[ "anonymized_condition" ] = self.anonymize(condition) logger.debug("Anonymized condition in ExpectationConfigurationBuilder") return anonymized_expectation_configuration_builder
import argparse import logging import os import re import subprocess import sys from collections import OrderedDict try: from ..manifest import manifest from ..manifest.utils import git as get_git_cmd except ValueError: # if we're not within the tools package, the above is an import from above # the top-level which raises ValueError, so reimport it with an absolute # reference # # note we need both because depending on caller we may/may not have the # paths set up correctly to handle both and MYPY has no knowledge of our # sys.path magic from manifest import manifest # type: ignore from manifest.utils import git as get_git_cmd # type: ignore MYPY = False if MYPY: # MYPY is set to True when run under Mypy. from typing import Any from typing import Dict from typing import Iterable from typing import List from typing import Optional from typing import Pattern from typing import Sequence from typing import Set from typing import Text from typing import Tuple DEFAULT_IGNORE_RULERS = ("resources/testharness*", "resources/testdriver*") here = os.path.dirname(__file__) wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir)) logger = logging.getLogger() def display_branch_point(): # type: () -> None print(branch_point()) def branch_point(): # type: () -> Optional[Text] git = get_git_cmd(wpt_root) if git is None: raise Exception("git not found") if (os.environ.get("GITHUB_PULL_REQUEST", "false") == "false" and os.environ.get("GITHUB_BRANCH") == "master"): # For builds on the master branch just return the HEAD commit return git("rev-parse", "HEAD") elif os.environ.get("GITHUB_PULL_REQUEST", "false") != "false": # This is a PR, so the base branch is in GITHUB_BRANCH base_branch = os.environ.get("GITHUB_BRANCH") assert base_branch, "GITHUB_BRANCH environment variable is defined" branch_point = git("merge-base", "HEAD", base_branch) # type: Optional[Text] else: # Otherwise we aren't on a PR, so we try to find commits that are only in the # current branch c.f. # http://stackoverflow.com/questions/13460152/find-first-ancestor-commit-in-another-branch # parse HEAD into an object ref head = git("rev-parse", "HEAD") # get everything in refs/heads and refs/remotes that doesn't include HEAD not_heads = [item for item in git("rev-parse", "--not", "--branches", "--remotes").split("\n") if item and item != "^%s" % head] # get all commits on HEAD but not reachable from anything in not_heads cmd = ["git", "rev-list", "--topo-order", "--parents", "--stdin", "HEAD"] proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=wpt_root) commits_bytes, _ = proc.communicate(b"\n".join(item.encode("ascii") for item in not_heads)) if proc.returncode != 0: raise subprocess.CalledProcessError(proc.returncode, cmd, commits_bytes) commit_parents = OrderedDict() # type: Dict[Text, List[Text]] commits = commits_bytes.decode("ascii") if commits: for line in commits.split("\n"): line_commits = line.split(" ") commit_parents[line_commits[0]] = line_commits[1:] branch_point = None # if there are any commits, take the first parent that is not in commits for commit, parents in commit_parents.items(): for parent in parents: if parent not in commit_parents: branch_point = parent break if branch_point: break # if we had any commits, we should now have a branch point assert branch_point or not commit_parents # The above heuristic will fail in the following cases: # # - The current branch has fallen behind the remote version # - Changes on the current branch were rebased and therefore do not exist on any # other branch. This will result in the selection of a commit that is earlier # in the history than desired (as determined by calculating the later of the # branch point and the merge base) # # In either case, fall back to using the merge base as the branch point. merge_base = git("merge-base", "HEAD", "origin/master") if (branch_point is None or (branch_point != merge_base and not git("log", "--oneline", "%s..%s" % (merge_base, branch_point)).strip())): logger.debug("Using merge-base as the branch point") branch_point = merge_base else: logger.debug("Using first commit on another branch as the branch point") logger.debug("Branch point from master: %s" % branch_point) if branch_point: branch_point = branch_point.strip() return branch_point def compile_ignore_rule(rule): # type: (Text) -> Pattern[Text] rule = rule.replace(os.path.sep, u"/") parts = rule.split(u"/") re_parts = [] for part in parts: if part.endswith(u"**"): re_parts.append(re.escape(part[:-2]) + u".*") elif part.endswith(u"*"): re_parts.append(re.escape(part[:-1]) + u"[^/]*") else: re_parts.append(re.escape(part)) return re.compile(u"^%s$" % u"/".join(re_parts)) def repo_files_changed(revish, include_uncommitted=False, include_new=False): # type: (Text, bool, bool) -> Set[Text] git = get_git_cmd(wpt_root) if git is None: raise Exception("git not found") if "..." in revish: raise Exception(f"... not supported when finding files changed (revish: {revish!r}") if ".." in revish: # ".." isn't treated as a range for git-diff; what we want is # everything reachable from B but not A, and git diff A...B # gives us that (via the merge-base) revish = revish.replace("..", "...") files_list = git("diff", "--no-renames", "--name-only", "-z", revish).split(u"\0") assert not files_list[-1], f"final item should be empty, got: {files_list[-1]!r}" files = set(files_list[:-1]) if include_uncommitted: entries = git("status", "-z").split("\0") assert not entries[-1] entries = entries[:-1] for item in entries: status, path = item.split(" ", 1) if status == "??" and not include_new: continue else: if not os.path.isdir(path): files.add(path) else: for dirpath, dirnames, filenames in os.walk(path): for filename in filenames: files.add(os.path.join(dirpath, filename)) return files def exclude_ignored(files, ignore_rules): # type: (Iterable[Text], Optional[Sequence[Text]]) -> Tuple[List[Text], List[Text]] if ignore_rules is None: ignore_rules = DEFAULT_IGNORE_RULERS compiled_ignore_rules = [compile_ignore_rule(item) for item in set(ignore_rules)] changed = [] ignored = [] for item in sorted(files): fullpath = os.path.join(wpt_root, item) rule_path = item.replace(os.path.sep, "/") for rule in compiled_ignore_rules: if rule.match(rule_path): ignored.append(fullpath) break else: changed.append(fullpath) return changed, ignored def files_changed(revish, # type: Text ignore_rules=None, # type: Optional[Sequence[Text]] include_uncommitted=False, # type: bool include_new=False # type: bool ): # type: (...) -> Tuple[List[Text], List[Text]] """Find files changed in certain revisions. The function passes `revish` directly to `git diff`, so `revish` can have a variety of forms; see `git diff --help` for details. Files in the diff that are matched by `ignore_rules` are excluded. """ files = repo_files_changed(revish, include_uncommitted=include_uncommitted, include_new=include_new) if not files: return [], [] return exclude_ignored(files, ignore_rules) def _in_repo_root(full_path): # type: (Text) -> bool rel_path = os.path.relpath(full_path, wpt_root) path_components = rel_path.split(os.sep) return len(path_components) < 2 def load_manifest(manifest_path=None, manifest_update=True): # type: (Optional[Text], bool) -> manifest.Manifest if manifest_path is None: manifest_path = os.path.join(wpt_root, u"MANIFEST.json") return manifest.load_and_update(wpt_root, manifest_path, "/", update=manifest_update) def affected_testfiles(files_changed, # type: Iterable[Text] skip_dirs=None, # type: Optional[Set[Text]] manifest_path=None, # type: Optional[Text] manifest_update=True # type: bool ): # type: (...) -> Tuple[Set[Text], Set[Text]] """Determine and return list of test files that reference changed files.""" if skip_dirs is None: skip_dirs = {u"conformance-checkers", u"docs", u"tools"} affected_testfiles = set() # Exclude files that are in the repo root, because # they are not part of any test. files_changed = [f for f in files_changed if not _in_repo_root(f)] nontests_changed = set(files_changed) wpt_manifest = load_manifest(manifest_path, manifest_update) test_types = ["crashtest", "print-reftest", "reftest", "testharness", "wdspec"] support_files = {os.path.join(wpt_root, path) for _, path, _ in wpt_manifest.itertypes("support")} wdspec_test_files = {os.path.join(wpt_root, path) for _, path, _ in wpt_manifest.itertypes("wdspec")} test_files = {os.path.join(wpt_root, path) for _, path, _ in wpt_manifest.itertypes(*test_types)} interface_dir = os.path.join(wpt_root, 'interfaces') interfaces_files = {os.path.join(wpt_root, 'interfaces', filename) for filename in os.listdir(interface_dir)} interfaces_changed = interfaces_files.intersection(nontests_changed) nontests_changed = nontests_changed.intersection(support_files) tests_changed = {item for item in files_changed if item in test_files} nontest_changed_paths = set() rewrites = {"/resources/webidl2/lib/webidl2.js": "/resources/WebIDLParser.js"} # type: Dict[Text, Text] for full_path in nontests_changed: rel_path = os.path.relpath(full_path, wpt_root) path_components = rel_path.split(os.sep) top_level_subdir = path_components[0] if top_level_subdir in skip_dirs: continue repo_path = "/" + os.path.relpath(full_path, wpt_root).replace(os.path.sep, "/") if repo_path in rewrites: repo_path = rewrites[repo_path] full_path = os.path.join(wpt_root, repo_path[1:].replace("/", os.path.sep)) nontest_changed_paths.add((full_path, repo_path)) interfaces_changed_names = [os.path.splitext(os.path.basename(interface))[0] for interface in interfaces_changed] def affected_by_wdspec(test): # type: (Text) -> bool affected = False if test in wdspec_test_files: for support_full_path, _ in nontest_changed_paths: # parent of support file or of "support" directory parent = os.path.dirname(support_full_path) if os.path.basename(parent) == "support": parent = os.path.dirname(parent) relpath = os.path.relpath(test, parent) if not relpath.startswith(os.pardir): # testfile is in subtree of support file affected = True break return affected def affected_by_interfaces(file_contents): # type: (Text) -> bool if len(interfaces_changed_names) > 0: if 'idlharness.js' in file_contents: for interface in interfaces_changed_names: regex = '[\'"]' + interface + '(\\.idl)?[\'"]' if re.search(regex, file_contents): return True return False for root, dirs, fnames in os.walk(wpt_root): # Walk top_level_subdir looking for test files containing either the # relative filepath or absolute filepath to the changed files. if root == wpt_root: for dir_name in skip_dirs: dirs.remove(dir_name) for fname in fnames: test_full_path = os.path.join(root, fname) # Skip any file that's not a test file. if test_full_path not in test_files: continue if affected_by_wdspec(test_full_path): affected_testfiles.add(test_full_path) continue with open(test_full_path, "rb") as fh: raw_file_contents = fh.read() # type: bytes if raw_file_contents.startswith(b"\xfe\xff"): file_contents = raw_file_contents.decode("utf-16be", "replace") # type: Text elif raw_file_contents.startswith(b"\xff\xfe"): file_contents = raw_file_contents.decode("utf-16le", "replace") else: file_contents = raw_file_contents.decode("utf8", "replace") for full_path, repo_path in nontest_changed_paths: rel_path = os.path.relpath(full_path, root).replace(os.path.sep, "/") if rel_path in file_contents or repo_path in file_contents or affected_by_interfaces(file_contents): affected_testfiles.add(test_full_path) continue return tests_changed, affected_testfiles def get_parser(): # type: () -> argparse.ArgumentParser parser = argparse.ArgumentParser() parser.add_argument("revish", default=None, help="Commits to consider. Defaults to the " "commits on the current branch", nargs="?") parser.add_argument("--ignore-rule", action="append", help="Override the rules for paths to exclude from lists of changes. " "Rules are paths relative to the test root, with * before a separator " "or the end matching anything other than a path separator and ** in that " "position matching anything. This flag can be used multiple times for " "multiple rules. Specifying this flag overrides the default: " + ", ".join(DEFAULT_IGNORE_RULERS)) parser.add_argument("--modified", action="store_true", help="Include files under version control that have been " "modified or staged") parser.add_argument("--new", action="store_true", help="Include files in the worktree that are not in version control") parser.add_argument("--show-type", action="store_true", help="Print the test type along with each affected test") parser.add_argument("--null", action="store_true", help="Separate items with a null byte") return parser def get_parser_affected(): # type: () -> argparse.ArgumentParser parser = get_parser() parser.add_argument("--metadata", dest="metadata_root", action="store", default=wpt_root, help="Directory that will contain MANIFEST.json") return parser def get_revish(**kwargs): # type: (**Any) -> Text revish = kwargs.get("revish") if revish is None: revish = u"%s..HEAD" % branch_point() return revish.strip() def run_changed_files(**kwargs): # type: (**Any) -> None revish = get_revish(**kwargs) changed, _ = files_changed(revish, kwargs["ignore_rule"], include_uncommitted=kwargs["modified"], include_new=kwargs["new"]) separator = u"\0" if kwargs["null"] else u"\n" for item in sorted(changed): line = os.path.relpath(item, wpt_root) + separator sys.stdout.write(line) def run_tests_affected(**kwargs): # type: (**Any) -> None revish = get_revish(**kwargs) changed, _ = files_changed(revish, kwargs["ignore_rule"], include_uncommitted=kwargs["modified"], include_new=kwargs["new"]) manifest_path = os.path.join(kwargs["metadata_root"], "MANIFEST.json") tests_changed, dependents = affected_testfiles( changed, {"conformance-checkers", "docs", "tools"}, manifest_path=manifest_path ) message = "{path}" if kwargs["show_type"]: wpt_manifest = load_manifest(manifest_path) message = "{path}\t{item_type}" message += "\0" if kwargs["null"] else "\n" for item in sorted(tests_changed | dependents): results = { "path": os.path.relpath(item, wpt_root) } if kwargs["show_type"]: item_types = {i.item_type for i in wpt_manifest.iterpath(results["path"])} if len(item_types) != 1: item_types = {" ".join(item_types)} results["item_type"] = item_types.pop() sys.stdout.write(message.format(**results))
# coding: utf-8 from __future__ import unicode_literals import json import os import re import subprocess import tempfile from .common import InfoExtractor from ..compat import ( compat_urlparse, compat_kwargs, ) from ..utils import ( check_executable, determine_ext, encodeArgument, ExtractorError, get_element_by_id, get_exe_version, is_outdated_version, std_headers, ) def cookie_to_dict(cookie): cookie_dict = { 'name': cookie.name, 'value': cookie.value, } if cookie.port_specified: cookie_dict['port'] = cookie.port if cookie.domain_specified: cookie_dict['domain'] = cookie.domain if cookie.path_specified: cookie_dict['path'] = cookie.path if cookie.expires is not None: cookie_dict['expires'] = cookie.expires if cookie.secure is not None: cookie_dict['secure'] = cookie.secure if cookie.discard is not None: cookie_dict['discard'] = cookie.discard try: if (cookie.has_nonstandard_attr('httpOnly') or cookie.has_nonstandard_attr('httponly') or cookie.has_nonstandard_attr('HttpOnly')): cookie_dict['httponly'] = True except TypeError: pass return cookie_dict def cookie_jar_to_list(cookie_jar): return [cookie_to_dict(cookie) for cookie in cookie_jar] class PhantomJSwrapper(object): """PhantomJS wrapper class This class is experimental. """ _TEMPLATE = r''' phantom.onError = function(msg, trace) {{ var msgStack = ['PHANTOM ERROR: ' + msg]; if(trace && trace.length) {{ msgStack.push('TRACE:'); trace.forEach(function(t) {{ msgStack.push(' -> ' + (t.file || t.sourceURL) + ': ' + t.line + (t.function ? ' (in function ' + t.function +')' : '')); }}); }} console.error(msgStack.join('\n')); phantom.exit(1); }}; var page = require('webpage').create(); var fs = require('fs'); var read = {{ mode: 'r', charset: 'utf-8' }}; var write = {{ mode: 'w', charset: 'utf-8' }}; JSON.parse(fs.read("{cookies}", read)).forEach(function(x) {{ phantom.addCookie(x); }}); page.settings.resourceTimeout = {timeout}; page.settings.userAgent = "{ua}"; page.onLoadStarted = function() {{ page.evaluate(function() {{ delete window._phantom; delete window.callPhantom; }}); }}; var saveAndExit = function() {{ fs.write("{html}", page.content, write); fs.write("{cookies}", JSON.stringify(phantom.cookies), write); phantom.exit(); }}; page.onLoadFinished = function(status) {{ if(page.url === "") {{ page.setContent(fs.read("{html}", read), "{url}"); }} else {{ {jscode} }} }}; page.open(""); ''' _TMP_FILE_NAMES = ['script', 'html', 'cookies'] @staticmethod def _version(): return get_exe_version('phantomjs', version_re=r'([0-9.]+)') def __init__(self, extractor, required_version=None, timeout=10000): self._TMP_FILES = {} self.exe = check_executable('phantomjs', ['-v']) if not self.exe: raise ExtractorError('PhantomJS executable not found in PATH, ' 'download it from http://phantomjs.org', expected=True) self.extractor = extractor if required_version: version = self._version() if is_outdated_version(version, required_version): self.extractor._downloader.report_warning( 'Your copy of PhantomJS is outdated, update it to version ' '%s or newer if you encounter any errors.' % required_version) self.options = { 'timeout': timeout, } for name in self._TMP_FILE_NAMES: tmp = tempfile.NamedTemporaryFile(delete=False) tmp.close() self._TMP_FILES[name] = tmp def __del__(self): for name in self._TMP_FILE_NAMES: try: os.remove(self._TMP_FILES[name].name) except (IOError, OSError, KeyError): pass def _save_cookies(self, url): cookies = cookie_jar_to_list(self.extractor._downloader.cookiejar) for cookie in cookies: if 'path' not in cookie: cookie['path'] = '/' if 'domain' not in cookie: cookie['domain'] = compat_urlparse.urlparse(url).netloc with open(self._TMP_FILES['cookies'].name, 'wb') as f: f.write(json.dumps(cookies).encode('utf-8')) def _load_cookies(self): with open(self._TMP_FILES['cookies'].name, 'rb') as f: cookies = json.loads(f.read().decode('utf-8')) for cookie in cookies: if cookie['httponly'] is True: cookie['rest'] = {'httpOnly': None} if 'expiry' in cookie: cookie['expire_time'] = cookie['expiry'] self.extractor._set_cookie(**compat_kwargs(cookie)) def get(self, url, html=None, video_id=None, note=None, note2='Executing JS on webpage', headers={}, jscode='saveAndExit();'): """ Downloads webpage (if needed) and executes JS Params: url: website url html: optional, html code of website video_id: video id note: optional, displayed when downloading webpage note2: optional, displayed when executing JS headers: custom http headers jscode: code to be executed when page is loaded Returns tuple with: * downloaded website (after JS execution) * anything you print with `console.log` (but not inside `page.execute`!) In most cases you don't need to add any `jscode`. It is executed in `page.onLoadFinished`. `saveAndExit();` is mandatory, use it instead of `phantom.exit()` It is possible to wait for some element on the webpage, for example: var check = function() { var elementFound = page.evaluate(function() { return document.querySelector('#b.done') !== null; }); if(elementFound) saveAndExit(); else window.setTimeout(check, 500); } page.evaluate(function(){ document.querySelector('#a').click(); }); check(); """ if 'saveAndExit();' not in jscode: raise ExtractorError('`saveAndExit();` not found in `jscode`') if not html: html = self.extractor._download_webpage(url, video_id, note=note, headers=headers) with open(self._TMP_FILES['html'].name, 'wb') as f: f.write(html.encode('utf-8')) self._save_cookies(url) replaces = self.options replaces['url'] = url user_agent = headers.get('User-Agent') or std_headers['User-Agent'] replaces['ua'] = user_agent.replace('"', '\\"') replaces['jscode'] = jscode for x in self._TMP_FILE_NAMES: replaces[x] = self._TMP_FILES[x].name.replace('\\', '\\\\').replace('"', '\\"') with open(self._TMP_FILES['script'].name, 'wb') as f: f.write(self._TEMPLATE.format(**replaces).encode('utf-8')) if video_id is None: self.extractor.to_screen('%s' % (note2,)) else: self.extractor.to_screen('%s: %s' % (video_id, note2)) p = subprocess.Popen([ self.exe, '--ssl-protocol=any', self._TMP_FILES['script'].name ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode != 0: raise ExtractorError( 'Executing JS failed\n:' + encodeArgument(err)) with open(self._TMP_FILES['html'].name, 'rb') as f: html = f.read().decode('utf-8') self._load_cookies() return (html, encodeArgument(out)) class OpenloadIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:openload\.(?:co|io|link)|oload\.(?:tv|stream|site|xyz|win|download))/(?:f|embed)/(?P<id>[a-zA-Z0-9-_]+)' _TESTS = [{ 'url': 'https://openload.co/f/kUEfGclsU9o', 'md5': 'bf1c059b004ebc7a256f89408e65c36e', 'info_dict': { 'id': 'kUEfGclsU9o', 'ext': 'mp4', 'title': 'skyrim_no-audio_1080.mp4', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'https://openload.co/embed/rjC09fkPLYs', 'info_dict': { 'id': 'rjC09fkPLYs', 'ext': 'mp4', 'title': 'movie.mp4', 'thumbnail': r're:^https?://.*\.jpg$', 'subtitles': { 'en': [{ 'ext': 'vtt', }], }, }, 'params': { 'skip_download': True, # test subtitles only }, }, { 'url': 'https://openload.co/embed/kUEfGclsU9o/skyrim_no-audio_1080.mp4', 'only_matching': True, }, { 'url': 'https://openload.io/f/ZAn6oz-VZGE/', 'only_matching': True, }, { 'url': 'https://openload.co/f/_-ztPaZtMhM/', 'only_matching': True, }, { # unavailable via https://openload.co/f/Sxz5sADo82g/, different layout # for title and ext 'url': 'https://openload.co/embed/Sxz5sADo82g/', 'only_matching': True, }, { # unavailable via https://openload.co/embed/e-Ixz9ZR5L0/ but available # via https://openload.co/f/e-Ixz9ZR5L0/ 'url': 'https://openload.co/f/e-Ixz9ZR5L0/', 'only_matching': True, }, { 'url': 'https://oload.tv/embed/KnG-kKZdcfY/', 'only_matching': True, }, { 'url': 'http://www.openload.link/f/KnG-kKZdcfY', 'only_matching': True, }, { 'url': 'https://oload.stream/f/KnG-kKZdcfY', 'only_matching': True, }, { 'url': 'https://oload.xyz/f/WwRBpzW8Wtk', 'only_matching': True, }, { 'url': 'https://oload.win/f/kUEfGclsU9o', 'only_matching': True, }, { 'url': 'https://oload.download/f/kUEfGclsU9o', 'only_matching': True, }, { # Its title has not got its extension but url has it 'url': 'https://oload.download/f/N4Otkw39VCw/Tomb.Raider.2018.HDRip.XviD.AC3-EVO.avi.mp4', 'only_matching': True, }] _USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36' @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+src=["\']((?:https?://)?(?:openload\.(?:co|io)|oload\.tv)/embed/[a-zA-Z0-9-_]+)', webpage) def _real_extract(self, url): video_id = self._match_id(url) url_pattern = 'https://openload.co/%%s/%s/' % video_id headers = { 'User-Agent': self._USER_AGENT, } for path in ('embed', 'f'): page_url = url_pattern % path last = path == 'f' webpage = self._download_webpage( page_url, video_id, 'Downloading %s webpage' % path, headers=headers, fatal=last) if not webpage: continue if 'File not found' in webpage or 'deleted by the owner' in webpage: if not last: continue raise ExtractorError('File not found', expected=True, video_id=video_id) break phantom = PhantomJSwrapper(self, required_version='2.0') webpage, _ = phantom.get(page_url, html=webpage, video_id=video_id, headers=headers) decoded_id = (get_element_by_id('streamurl', webpage) or get_element_by_id('streamuri', webpage) or get_element_by_id('streamurj', webpage) or self._search_regex( (r'>\s*([\w-]+~\d{10,}~\d+\.\d+\.0\.0~[\w-]+)\s*<', r'>\s*([\w~-]+~\d+\.\d+\.\d+\.\d+~[\w~-]+)', r'>\s*([\w-]+~\d{10,}~(?:[a-f\d]+:){2}:~[\w-]+)\s*<', r'>\s*([\w~-]+~[a-f0-9:]+~[\w~-]+)\s*<', r'>\s*([\w~-]+~[a-f0-9:]+~[\w~-]+)'), webpage, 'stream URL')) video_url = 'https://openload.co/stream/%s?mime=true' % decoded_id title = self._og_search_title(webpage, default=None) or self._search_regex( r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage, 'title', default=None) or self._html_search_meta( 'description', webpage, 'title', fatal=True) entries = self._parse_html5_media_entries(page_url, webpage, video_id) entry = entries[0] if entries else {} subtitles = entry.get('subtitles') info_dict = { 'id': video_id, 'title': title, 'thumbnail': entry.get('thumbnail') or self._og_search_thumbnail(webpage, default=None), 'url': video_url, 'ext': determine_ext(title, None) or determine_ext(url, 'mp4'), 'subtitles': subtitles, 'http_headers': headers, } return info_dict
# Copyright (c) 2014 Scality # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for the Scality REST Block storage system This driver provisions Linux SRB volumes leveraging RESTful storage platforms (e.g. Scality CDMI). """ import contextlib import functools import re import sys import time from oslo_concurrency import lockutils from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units import six from six.moves import range from cinder.brick.local_dev import lvm from cinder import exception from cinder.i18n import _, _LI, _LE, _LW from cinder.image import image_utils from cinder import utils from cinder.volume import driver from cinder.volume import utils as volutils LOG = logging.getLogger(__name__) srb_opts = [ cfg.StrOpt('srb_base_urls', default=None, help='Comma-separated list of REST servers IP to connect to. ' '(eg http://IP1/,http://IP2:81/path'), ] CONF = cfg.CONF CONF.register_opts(srb_opts) ACCEPTED_REST_SERVER = re.compile(r'^http://' '(\d{1,3}\.){3}\d{1,3}' '(:\d+)?/[a-zA-Z0-9\-_\/]*$') class retry(object): SLEEP_NONE = 'none' SLEEP_DOUBLE = 'double' SLEEP_INCREMENT = 'increment' def __init__(self, exceptions, count, sleep_mechanism=SLEEP_INCREMENT, sleep_factor=1): if sleep_mechanism not in [self.SLEEP_NONE, self.SLEEP_DOUBLE, self.SLEEP_INCREMENT]: raise ValueError('Invalid value for `sleep_mechanism` argument') self._exceptions = exceptions self._count = count self._sleep_mechanism = sleep_mechanism self._sleep_factor = sleep_factor def __call__(self, fun): func_name = fun.func_name @functools.wraps(fun) def wrapped(*args, **kwargs): sleep_time = self._sleep_factor exc_info = None for attempt in range(self._count): if attempt != 0: LOG.warning(_LW('Retrying failed call to %(func)s, ' 'attempt %(attempt)i.'), {'func': func_name, 'attempt': attempt}) try: return fun(*args, **kwargs) except self._exceptions: exc_info = sys.exc_info() if attempt != self._count - 1: if self._sleep_mechanism == self.SLEEP_NONE: continue elif self._sleep_mechanism == self.SLEEP_INCREMENT: time.sleep(sleep_time) sleep_time += self._sleep_factor elif self._sleep_mechanism == self.SLEEP_DOUBLE: time.sleep(sleep_time) sleep_time *= 2 else: raise ValueError('Unknown sleep mechanism: %r' % self._sleep_mechanism) six.reraise(exc_info[0], exc_info[1], exc_info[2]) return wrapped class LVM(lvm.LVM): def activate_vg(self): """Activate the Volume Group associated with this instantiation. :raises: putils.ProcessExecutionError """ cmd = ['vgchange', '-ay', self.vg_name] try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error activating Volume Group')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise def deactivate_vg(self): """Deactivate the Volume Group associated with this instantiation. This forces LVM to release any reference to the device. :raises: putils.ProcessExecutionError """ cmd = ['vgchange', '-an', self.vg_name] try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error deactivating Volume Group')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise def destroy_vg(self): """Destroy the Volume Group associated with this instantiation. :raises: putils.ProcessExecutionError """ cmd = ['vgremove', '-f', self.vg_name] try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error destroying Volume Group')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise def pv_resize(self, pv_name, new_size_str): """Extend the size of an existing PV (for virtual PVs). :raises: putils.ProcessExecutionError """ try: self._execute('pvresize', '--setphysicalvolumesize', new_size_str, pv_name, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error resizing Physical Volume')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise def extend_thin_pool(self): """Extend the size of the thin provisioning pool. This method extends the size of a thin provisioning pool to 95% of the size of the VG, if the VG is configured as thin and owns a thin provisioning pool. :raises: putils.ProcessExecutionError """ if self.vg_thin_pool is None: return new_size_str = self._calculate_thin_pool_size() try: self._execute('lvextend', '-L', new_size_str, "%s/%s-pool" % (self.vg_name, self.vg_name), root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error extending thin provisioning pool')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise @contextlib.contextmanager def patched(obj, attr, fun): """Context manager to locally patch a method. Within the managed context, the `attr` method of `obj` will be replaced by a method which calls `fun` passing in the original `attr` attribute of `obj` as well as any positional and keyword arguments. At the end of the context, the original method is restored. """ orig = getattr(obj, attr) def patch(*args, **kwargs): return fun(orig, *args, **kwargs) setattr(obj, attr, patch) try: yield finally: setattr(obj, attr, orig) @contextlib.contextmanager def handle_process_execution_error(message, info_message, reraise=True): """Consistently handle `putils.ProcessExecutionError` exceptions This context-manager will catch any `putils.ProcessExecutionError` exceptions raised in the managed block, and generate logging output accordingly. The value of the `message` argument will be logged at `logging.ERROR` level, and the `info_message` argument at `logging.INFO` level. Finally the command string, exit code, standard output and error output of the process will be logged at `logging.DEBUG` level. The `reraise` argument specifies what should happen when a `putils.ProcessExecutionError` is caught. If it's equal to `True`, the exception will be re-raised. If it's some other non-`False` object, this object will be raised instead (so you most likely want it to be some `Exception`). Any `False` value will result in the exception to be swallowed. """ try: yield except putils.ProcessExecutionError as exc: LOG.error(message) LOG.info(info_message) LOG.debug('Command : %s', exc.cmd) LOG.debug('Exit Code : %r', exc.exit_code) LOG.debug('StdOut : %s', exc.stdout) LOG.debug('StdErr : %s', exc.stderr) if reraise is True: raise elif reraise: raise reraise # pylint: disable=E0702 @contextlib.contextmanager def temp_snapshot(driver, volume, src_vref): snapshot = {'volume_name': src_vref['name'], 'volume_id': src_vref['id'], 'volume_size': src_vref['size'], 'name': 'snapshot-clone-%s' % volume['id'], 'id': 'tmp-snap-%s' % volume['id'], 'size': src_vref['size']} driver.create_snapshot(snapshot) try: yield snapshot finally: driver.delete_snapshot(snapshot) @contextlib.contextmanager def temp_raw_device(driver, volume): driver._attach_file(volume) try: yield finally: driver._detach_file(volume) @contextlib.contextmanager def temp_lvm_device(driver, volume): with temp_raw_device(driver, volume): vg = driver._get_lvm_vg(volume) vg.activate_vg() yield vg class SRBDriver(driver.VolumeDriver): """Scality SRB volume driver This driver manages volumes provisioned by the Scality REST Block driver Linux kernel module, backed by RESTful storage providers (e.g. Scality CDMI). """ VERSION = '1.1.0' # Over-allocation ratio (multiplied with requested size) for thin # provisioning OVER_ALLOC_RATIO = 2 SNAPSHOT_PREFIX = 'snapshot' def __init__(self, *args, **kwargs): super(SRBDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(srb_opts) self.urls_setup = False self.backend_name = None self.base_urls = None self.root_helper = utils.get_root_helper() self._attached_devices = {} def _setup_urls(self): if not self.base_urls: message = _("No url configured") raise exception.VolumeBackendAPIException(data=message) with handle_process_execution_error( message=_LE('Cound not setup urls on the Block Driver.'), info_message=_LI('Error creating Volume'), reraise=False): cmd = self.base_urls path = '/sys/class/srb/add_urls' putils.execute('tee', path, process_input=cmd, root_helper=self.root_helper, run_as_root=True) self.urls_setup = True def do_setup(self, context): """Any initialization the volume driver does while starting.""" self.backend_name = self.configuration.safe_get('volume_backend_name') base_urls = self.configuration.safe_get('srb_base_urls') sane_urls = [] if base_urls: for url in base_urls.split(','): stripped_url = url.strip() if ACCEPTED_REST_SERVER.match(stripped_url): sane_urls.append(stripped_url) else: LOG.warning(_LW("%s is not an accepted REST server " "IP address"), stripped_url) self.base_urls = ','.join(sane_urls) self._setup_urls() def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" if not self.base_urls: LOG.warning(_LW("Configuration variable srb_base_urls" " not set or empty.")) if self.urls_setup is False: message = _("Could not setup urls properly") raise exception.VolumeBackendAPIException(data=message) @classmethod def _is_snapshot(cls, volume): return volume['name'].startswith(cls.SNAPSHOT_PREFIX) @classmethod def _get_volname(cls, volume): """Returns the name of the actual volume If the volume is a snapshot, it returns the name of the parent volume. otherwise, returns the volume's name. """ name = volume['name'] if cls._is_snapshot(volume): name = "volume-%s" % (volume['volume_id']) return name @classmethod def _get_volid(cls, volume): """Returns the ID of the actual volume If the volume is a snapshot, it returns the ID of the parent volume. otherwise, returns the volume's id. """ volid = volume['id'] if cls._is_snapshot(volume): volid = volume['volume_id'] return volid @classmethod def _device_name(cls, volume): volume_id = cls._get_volid(volume) name = 'cinder-%s' % volume_id # Device names can't be longer than 32 bytes (incl. \0) return name[:31] @classmethod def _device_path(cls, volume): return "/dev/" + cls._device_name(volume) @classmethod def _escape_snapshot(cls, snapshot_name): # Linux LVM reserves name that starts with snapshot, so that # such volume name can't be created. Mangle it. if not snapshot_name.startswith(cls.SNAPSHOT_PREFIX): return snapshot_name return '_' + snapshot_name @classmethod def _mapper_path(cls, volume): groupname = cls._get_volname(volume) name = volume['name'] if cls._is_snapshot(volume): name = cls._escape_snapshot(name) # NOTE(vish): stops deprecation warning groupname = groupname.replace('-', '--') name = name.replace('-', '--') return "/dev/mapper/%s-%s" % (groupname, name) @staticmethod def _size_int(size_in_g): try: return max(int(size_in_g), 1) except ValueError: message = (_("Invalid size parameter '%s': Cannot be interpreted" " as an integer value.") % size_in_g) LOG.error(message) raise exception.VolumeBackendAPIException(data=message) @classmethod def _set_device_path(cls, volume): volume['provider_location'] = cls._get_volname(volume) return { 'provider_location': volume['provider_location'], } @staticmethod def _activate_lv(orig, *args, **kwargs): """Activate lv. Use with `patched` to patch `lvm.LVM.activate_lv` to ignore `EEXIST` """ try: orig(*args, **kwargs) except putils.ProcessExecutionError as exc: if exc.exit_code != 5: raise else: LOG.debug('`activate_lv` returned 5, ignored') def _get_lvm_vg(self, volume, create_vg=False): # NOTE(joachim): One-device volume group to manage thin snapshots # Get origin volume name even for snapshots volume_name = self._get_volname(volume) physical_volumes = [self._device_path(volume)] with patched(lvm.LVM, 'activate_lv', self._activate_lv): return LVM(volume_name, utils.get_root_helper(), create_vg=create_vg, physical_volumes=physical_volumes, lvm_type='thin', executor=self._execute) @staticmethod def _volume_not_present(vg, volume_name): # Used to avoid failing to delete a volume for which # the create operation partly failed return vg.get_volume(volume_name) is None def _create_file(self, volume): message = _('Could not create volume on any configured REST server.') with handle_process_execution_error( message=message, info_message=_LI('Error creating Volume %s.') % volume['name'], reraise=exception.VolumeBackendAPIException(data=message)): size = self._size_int(volume['size']) * self.OVER_ALLOC_RATIO cmd = volume['name'] cmd += ' %dG' % size path = '/sys/class/srb/create' putils.execute('tee', path, process_input=cmd, root_helper=self.root_helper, run_as_root=True) return self._set_device_path(volume) def _extend_file(self, volume, new_size): message = _('Could not extend volume on any configured REST server.') with handle_process_execution_error( message=message, info_message=(_LI('Error extending Volume %s.') % volume['name']), reraise=exception.VolumeBackendAPIException(data=message)): size = self._size_int(new_size) * self.OVER_ALLOC_RATIO cmd = volume['name'] cmd += ' %dG' % size path = '/sys/class/srb/extend' putils.execute('tee', path, process_input=cmd, root_helper=self.root_helper, run_as_root=True) @staticmethod def _destroy_file(volume): message = _('Could not destroy volume on any configured REST server.') volname = volume['name'] with handle_process_execution_error( message=message, info_message=_LI('Error destroying Volume %s.') % volname, reraise=exception.VolumeBackendAPIException(data=message)): cmd = volume['name'] path = '/sys/class/srb/destroy' putils.execute('tee', path, process_input=cmd, root_helper=utils.get_root_helper(), run_as_root=True) # NOTE(joachim): Must only be called within a function decorated by: # @lockutils.synchronized('devices', 'cinder-srb-') def _increment_attached_count(self, volume): """Increments the attach count of the device""" volid = self._get_volid(volume) if volid not in self._attached_devices: self._attached_devices[volid] = 1 else: self._attached_devices[volid] += 1 # NOTE(joachim): Must only be called within a function decorated by: # @lockutils.synchronized('devices', 'cinder-srb-') def _decrement_attached_count(self, volume): """Decrements the attach count of the device""" volid = self._get_volid(volume) if volid not in self._attached_devices: raise exception.VolumeBackendAPIException( (_("Internal error in srb driver: " "Trying to detach detached volume %s.")) % (self._get_volname(volume)) ) self._attached_devices[volid] -= 1 if self._attached_devices[volid] == 0: del self._attached_devices[volid] # NOTE(joachim): Must only be called within a function decorated by: # @lockutils.synchronized('devices', 'cinder-srb-') def _get_attached_count(self, volume): volid = self._get_volid(volume) return self._attached_devices.get(volid, 0) @lockutils.synchronized('devices', 'cinder-srb-') def _is_attached(self, volume): return self._get_attached_count(volume) > 0 @lockutils.synchronized('devices', 'cinder-srb-') def _attach_file(self, volume): name = self._get_volname(volume) devname = self._device_name(volume) LOG.debug('Attaching volume %(name)s as %(devname)s', {'name': name, 'devname': devname}) count = self._get_attached_count(volume) if count == 0: message = (_('Could not attach volume %(vol)s as %(dev)s ' 'on system.') % {'vol': name, 'dev': devname}) with handle_process_execution_error( message=message, info_message=_LI('Error attaching Volume'), reraise=exception.VolumeBackendAPIException(data=message)): cmd = name + ' ' + devname path = '/sys/class/srb/attach' putils.execute('tee', path, process_input=cmd, root_helper=self.root_helper, run_as_root=True) else: LOG.debug('Volume %s already attached', name) self._increment_attached_count(volume) @retry(exceptions=(putils.ProcessExecutionError, ), count=3, sleep_mechanism=retry.SLEEP_INCREMENT, sleep_factor=5) def _do_deactivate(self, volume, vg): vg.deactivate_vg() @retry(exceptions=(putils.ProcessExecutionError, ), count=5, sleep_mechanism=retry.SLEEP_DOUBLE, sleep_factor=1) def _do_detach(self, volume, vg): devname = self._device_name(volume) volname = self._get_volname(volume) cmd = devname path = '/sys/class/srb/detach' try: putils.execute('tee', path, process_input=cmd, root_helper=self.root_helper, run_as_root=True) except putils.ProcessExecutionError: with excutils.save_and_reraise_exception(reraise=True): try: with patched(lvm.LVM, 'activate_lv', self._activate_lv): vg.activate_lv(volname) self._do_deactivate(volume, vg) except putils.ProcessExecutionError: LOG.warning(_LW('All attempts to recover failed detach ' 'of %(volume)s failed.'), {'volume': volname}) @lockutils.synchronized('devices', 'cinder-srb-') def _detach_file(self, volume): name = self._get_volname(volume) devname = self._device_name(volume) vg = self._get_lvm_vg(volume) LOG.debug('Detaching device %s', devname) count = self._get_attached_count(volume) if count > 1: LOG.info(_LI('Reference count of %(volume)s is %(count)d, ' 'not detaching.'), {'volume': volume['name'], 'count': count}) return message = (_('Could not detach volume %(vol)s from device %(dev)s.') % {'vol': name, 'dev': devname}) with handle_process_execution_error( message=message, info_message=_LI('Error detaching Volume'), reraise=exception.VolumeBackendAPIException(data=message)): try: if vg is not None: self._do_deactivate(volume, vg) except putils.ProcessExecutionError: LOG.error(_LE('Could not deactivate volume group %s'), self._get_volname(volume)) raise try: self._do_detach(volume, vg=vg) except putils.ProcessExecutionError: LOG.error(_LE('Could not detach volume %(vol)s from device ' '%(dev)s.'), {'vol': name, 'dev': devname}) raise self._decrement_attached_count(volume) def _setup_lvm(self, volume): # NOTE(joachim): One-device volume group to manage thin snapshots size = self._size_int(volume['size']) * self.OVER_ALLOC_RATIO size_str = '%dg' % size vg = self._get_lvm_vg(volume, create_vg=True) vg.create_volume(volume['name'], size_str, lv_type='thin') def _destroy_lvm(self, volume): vg = self._get_lvm_vg(volume) if vg.lv_has_snapshot(volume['name']): LOG.error(_LE('Unable to delete due to existing snapshot ' 'for volume: %s.'), volume['name']) raise exception.VolumeIsBusy(volume_name=volume['name']) vg.destroy_vg() # NOTE(joachim) Force lvm vg flush through a vgs command vgs = vg.get_all_volume_groups(root_helper=self.root_helper, vg_name=vg.vg_name) if len(vgs) != 0: LOG.warning(_LW('Removed volume group %s still appears in vgs.'), vg.vg_name) def _create_and_copy_volume(self, dstvol, srcvol): """Creates a volume from a volume or a snapshot.""" updates = self._create_file(dstvol) # We need devices attached for IO operations. with temp_lvm_device(self, srcvol) as vg, \ temp_raw_device(self, dstvol): self._setup_lvm(dstvol) # Some configurations of LVM do not automatically activate # ThinLVM snapshot LVs. with patched(lvm.LVM, 'activate_lv', self._activate_lv): vg.activate_lv(srcvol['name'], True) # copy_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in volutils.copy_volume(self._mapper_path(srcvol), self._mapper_path(dstvol), srcvol['volume_size'] * units.Ki, self.configuration.volume_dd_blocksize, execute=self._execute) return updates def create_volume(self, volume): """Creates a volume. Can optionally return a Dictionary of changes to the volume object to be persisted. """ updates = self._create_file(volume) # We need devices attached for LVM operations. with temp_raw_device(self, volume): self._setup_lvm(volume) return updates def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" return self._create_and_copy_volume(volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" LOG.info(_LI('Creating clone of volume: %s'), src_vref['id']) updates = None with temp_lvm_device(self, src_vref): with temp_snapshot(self, volume, src_vref) as snapshot: updates = self._create_and_copy_volume(volume, snapshot) return updates def delete_volume(self, volume): """Deletes a volume.""" attached = False if self._is_attached(volume): attached = True with temp_lvm_device(self, volume): self._destroy_lvm(volume) self._detach_file(volume) LOG.debug('Deleting volume %(volume_name)s, attached=%(attached)s', {'volume_name': volume['name'], 'attached': attached}) self._destroy_file(volume) def create_snapshot(self, snapshot): """Creates a snapshot.""" with temp_lvm_device(self, snapshot) as vg: # NOTE(joachim) we only want to support thin lvm_types vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']), snapshot['volume_name'], lv_type='thin') def delete_snapshot(self, snapshot): """Deletes a snapshot.""" with temp_lvm_device(self, snapshot) as vg: if self._volume_not_present( vg, self._escape_snapshot(snapshot['name'])): # If the snapshot isn't present, then don't attempt to delete LOG.warning(_LW("snapshot: %s not found, " "skipping delete operations"), snapshot['name']) return vg.delete(self._escape_snapshot(snapshot['name'])) def get_volume_stats(self, refresh=False): """Return the current state of the volume service.""" stats = { 'vendor_name': 'Scality', 'driver_version': self.VERSION, 'storage_protocol': 'Scality Rest Block Device', 'total_capacity_gb': 'infinite', 'free_capacity_gb': 'infinite', 'reserved_percentage': 0, 'volume_backend_name': self.backend_name, } return stats def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" with temp_lvm_device(self, volume): image_utils.fetch_to_volume_format(context, image_service, image_id, self._mapper_path(volume), 'qcow2', self.configuration. volume_dd_blocksize, size=volume['size']) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" with temp_lvm_device(self, volume): image_utils.upload_volume(context, image_service, image_meta, self._mapper_path(volume)) def extend_volume(self, volume, new_size): new_alloc_size = self._size_int(new_size) * self.OVER_ALLOC_RATIO new_size_str = '%dg' % new_alloc_size self._extend_file(volume, new_size) with temp_lvm_device(self, volume) as vg: vg.pv_resize(self._device_path(volume), new_size_str) vg.extend_thin_pool() vg.extend_volume(volume['name'], new_size_str) class SRBISCSIDriver(SRBDriver, driver.ISCSIDriver): """Scality SRB volume driver with ISCSI support This driver manages volumes provisioned by the Scality REST Block driver Linux kernel module, backed by RESTful storage providers (e.g. Scality CDMI), and exports them through ISCSI to Nova. """ VERSION = '1.0.0' def __init__(self, *args, **kwargs): self.db = kwargs.get('db') self.target_driver = \ self.target_mapping[self.configuration.safe_get('iscsi_helper')] super(SRBISCSIDriver, self).__init__(*args, **kwargs) self.backend_name =\ self.configuration.safe_get('volume_backend_name') or 'SRB_iSCSI' self.protocol = 'iSCSI' def set_execute(self, execute): super(SRBISCSIDriver, self).set_execute(execute) if self.target_driver is not None: self.target_driver.set_execute(execute) def ensure_export(self, context, volume): device_path = self._mapper_path(volume) model_update = self.target_driver.ensure_export(context, volume, device_path) if model_update: self.db.volume_update(context, volume['id'], model_update) def create_export(self, context, volume): """Creates an export for a logical volume.""" self._attach_file(volume) vg = self._get_lvm_vg(volume) vg.activate_vg() # SRB uses the same name as the volume for the VG volume_path = self._mapper_path(volume) data = self.target_driver.create_export(context, volume, volume_path) return { 'provider_location': data['location'], 'provider_auth': data['auth'], } def remove_export(self, context, volume): # NOTE(joachim) Taken from iscsi._ExportMixin.remove_export # This allows us to avoid "detaching" a device not attached by # an export, and avoid screwing up the device attach refcount. try: # Raises exception.NotFound if export not provisioned iscsi_target = self.target_driver._get_iscsi_target(context, volume['id']) # Raises an Exception if currently not exported location = volume['provider_location'].split(' ') iqn = location[1] self.target_driver.show_target(iscsi_target, iqn=iqn) self.target_driver.remove_export(context, volume) self._detach_file(volume) except exception.NotFound: LOG.warning(_LW('Volume %r not found while trying to remove.'), volume['id']) except Exception as exc: LOG.warning(_LW('Error while removing export: %r'), exc)
import logging import sys import param import numpy as np from cartopy import crs as ccrs from cartopy.img_transform import warp_array, _determine_bounds from holoviews.core.data import MultiInterface from holoviews.core.util import cartesian_product, get_param_values, pd from holoviews.operation import Operation from shapely.geometry import Polygon, MultiPolygon from shapely.geometry.collection import GeometryCollection from ..data import GeoPandasInterface from ..element import (Image, Shape, Polygons, Path, Points, Contours, RGB, Graph, Nodes, EdgePaths, QuadMesh, VectorField, HexTiles, Labels, Rectangles, Segments) from ..util import ( project_extents, path_to_geom_dicts, polygons_to_geom_dicts, geom_dict_to_array_dict ) class _project_operation(Operation): """ Baseclass for projection operations, projecting elements from their source coordinate reference system to the supplied projection. """ projection = param.ClassSelector(default=ccrs.GOOGLE_MERCATOR, class_=ccrs.Projection, instantiate=False, doc=""" Projection the shape type is projected to.""") # Defines the types of elements supported by the operation supported_types = [] def _process(self, element, key=None): return element.map(self._process_element, self.supported_types) class project_path(_project_operation): """ Projects Polygons and Path Elements from their source coordinate reference system to the supplied projection. """ supported_types = [Polygons, Path, Contours, EdgePaths] def _process_element(self, element): if not bool(element): return element.clone(crs=self.p.projection) crs = element.crs proj = self.p.projection if (isinstance(crs, ccrs.PlateCarree) and not isinstance(proj, ccrs.PlateCarree) and crs.proj4_params['lon_0'] != 0): element = self.instance(projection=ccrs.PlateCarree())(element) if isinstance(proj, ccrs.CRS) and not isinstance(proj, ccrs.Projection): raise ValueError('invalid transform:' ' Spherical contouring is not supported - ' ' consider using PlateCarree/RotatedPole.') if isinstance(element, Polygons): geoms = polygons_to_geom_dicts(element, skip_invalid=False) else: geoms = path_to_geom_dicts(element, skip_invalid=False) projected = [] for path in geoms: geom = path['geometry'] # Ensure minimum area for polygons (precision issues cause errors) if isinstance(geom, Polygon) and geom.area < 1e-15: continue elif isinstance(geom, MultiPolygon): polys = [g for g in geom if g.area > 1e-15] if not polys: continue geom = MultiPolygon(polys) elif (not geom or isinstance(geom, GeometryCollection)): continue proj_geom = proj.project_geometry(geom, element.crs) # Attempt to fix geometry without being noisy about it logger = logging.getLogger() try: prev = logger.level logger.setLevel(logging.ERROR) if not proj_geom.is_valid: proj_geom = proj.project_geometry(geom.buffer(0), element.crs) except: continue finally: logger.setLevel(prev) if proj_geom.geom_type == 'GeometryCollection' and len(proj_geom) == 0: continue data = dict(path, geometry=proj_geom) if 'holes' in data: data.pop('holes') projected.append(data) if len(geoms) and len(projected) == 0: self.warning('While projecting a %s element from a %s coordinate ' 'reference system (crs) to a %s projection none of ' 'the projected paths were contained within the bounds ' 'specified by the projection. Ensure you have specified ' 'the correct coordinate system for your data.' % (type(element).__name__, type(element.crs).__name__, type(self.p.projection).__name__)) # Try casting back to original types if element.interface is GeoPandasInterface: import geopandas as gpd projected = gpd.GeoDataFrame(projected, columns=element.data.columns) elif element.interface is MultiInterface: x, y = element.kdims item = element.data[0] if element.data else None if item is None or (isinstance(item, dict) and 'geometry' in item): return element.clone(projected, crs=self.p.projection) projected = [geom_dict_to_array_dict(p, [x.name, y.name]) for p in projected] if any('holes' in p for p in projected): pass elif pd and isinstance(item, pd.DataFrame): projected = [pd.DataFrame(p, columns=item.columns) for p in projected] elif isinstance(item, np.ndarray): projected = [np.column_stack([p[d.name] for d in element.dimensions()]) for p in projected] return element.clone(projected, crs=self.p.projection) class project_shape(_project_operation): """ Projects Shape Element from the source coordinate reference system to the supplied projection. """ supported_types = [Shape] def _process_element(self, element): if not len(element): return element.clone(crs=self.p.projection) geom = element.geom() if isinstance(geom, (MultiPolygon, Polygon)): obj = Polygons([geom]) else: obj = Path([geom]) geom = project_path(obj, projection=self.p.projection).geom() return element.clone(geom, crs=self.p.projection) class project_points(_project_operation): supported_types = [Points, Nodes, VectorField, HexTiles, Labels] def _process_element(self, element): if not len(element): return element.clone(crs=self.p.projection) xdim, ydim = element.dimensions()[:2] xs, ys = (element.dimension_values(i) for i in range(2)) coordinates = self.p.projection.transform_points(element.crs, xs, ys) mask = np.isfinite(coordinates[:, 0]) dims = [d for d in element.dimensions() if d not in (xdim, ydim)] new_data = {k: v[mask] for k, v in element.columns(dims).items()} new_data[xdim.name] = coordinates[mask, 0] new_data[ydim.name] = coordinates[mask, 1] if len(new_data[xdim.name]) == 0: self.warning('While projecting a %s element from a %s coordinate ' 'reference system (crs) to a %s projection none of ' 'the projected paths were contained within the bounds ' 'specified by the projection. Ensure you have specified ' 'the correct coordinate system for your data.' % (type(element).__name__, type(element.crs).__name__, type(self.p.projection).__name__)) return element.clone(tuple(new_data[d.name] for d in element.dimensions()), crs=self.p.projection) class project_geom(_project_operation): supported_types = [Rectangles, Segments] def _process_element(self, element): if not len(element): return element.clone(crs=self.p.projection) x0d, y0d, x1d, y1d = element.kdims x0, y0, x1, y1 = (element.dimension_values(i) for i in range(4)) p1 = self.p.projection.transform_points(element.crs, x0, y0) p2 = self.p.projection.transform_points(element.crs, x1, y1) mask = np.isfinite(p1[:, 0]) & np.isfinite(p2[:, 0]) new_data = {k: v[mask] for k, v in element.columns(element.vdims).items()} new_data[x0d.name] = p1[mask, 0] new_data[y0d.name] = p1[mask, 1] new_data[x1d.name] = p2[mask, 0] new_data[y1d.name] = p2[mask, 1] if len(new_data[x0d.name]) == 0: self.warning('While projecting a %s element from a %s coordinate ' 'reference system (crs) to a %s projection none of ' 'the projected paths were contained within the bounds ' 'specified by the projection. Ensure you have specified ' 'the correct coordinate system for your data.' % (type(element).__name__, type(element.crs).__name__, type(self.p.projection).__name__)) return element.clone(tuple(new_data[d.name] for d in element.dimensions()), crs=self.p.projection) class project_graph(_project_operation): supported_types = [Graph] def _process_element(self, element): proj = self.p.projection nodes = project_points(element.nodes, projection=proj) data = (element.data, nodes) if element._edgepaths: data = data + (project_path(element.edgepaths, projection=proj),) return element.clone(data, crs=proj) class project_quadmesh(_project_operation): supported_types = [QuadMesh] def _process_element(self, element): proj = self.p.projection irregular = any(element.interface.irregular(element, kd) for kd in element.kdims) zs = element.dimension_values(2, flat=False) if irregular: X, Y = [np.asarray(element.interface.coords( element, kd, expanded=True, edges=False)) for kd in element.kdims] else: X = element.interface.coords(element, 0, True, True, False) if np.all(X[0, 1:] < X[0, :-1]): X = X[:, ::-1] Y = element.interface.coords(element, 1, True, True, False) if np.all(Y[1:, 0] < Y[:-1, 0]): Y = Y[::-1, :] if X.shape != zs.shape: X = X[:-1] + np.diff(X, axis=0)/2. X = X[:, :-1] + (np.diff(X, axis=1)/2.) if Y.shape != zs.shape: Y = Y[:-1] + np.diff(Y, axis=0)/2. Y = Y[:, :-1] + (np.diff(Y, axis=1)/2.) coords = proj.transform_points(element.crs, X, Y) PX, PY = coords[..., 0], coords[..., 1] # Mask quads which are wrapping around the x-axis wrap_proj_types = (ccrs._RectangularProjection, ccrs._WarpedRectangularProjection, ccrs.InterruptedGoodeHomolosine, ccrs.Mercator) if isinstance(proj, wrap_proj_types): with np.errstate(invalid='ignore'): edge_lengths = np.hypot( np.diff(PX, axis=1), np.diff(PY, axis=1) ) to_mask = ( (edge_lengths >= abs(proj.x_limits[1] - proj.x_limits[0]) / 2) | np.isnan(edge_lengths) ) if np.any(to_mask): mask = np.zeros(zs.shape, dtype=np.bool) mask[:, 1:][to_mask] = True mask[:, 2:][to_mask[:, :-1]] = True mask[:, :-1][to_mask] = True mask[:, :-2][to_mask[:, 1:]] = True mask[1:, 1:][to_mask[:-1]] = True mask[1:, :-1][to_mask[:-1]] = True mask[:-1, 1:][to_mask[1:]] = True mask[:-1, :-1][to_mask[1:]] = True zs[mask] = np.NaN params = get_param_values(element) return element.clone((PX, PY, zs), crs=self.p.projection, **params) class project_image(_project_operation): """ Projects an geoviews Image to the specified projection, returning a regular HoloViews Image type. Works by regridding the data along projected bounds. Only supports rectangular projections. """ fast = param.Boolean(default=False, doc=""" Whether to enable fast reprojection with (much) better performance but poorer handling in polar regions.""") width = param.Integer(default=None, doc=""" Width of the reprojectd Image""") height = param.Integer(default=None, doc=""" Height of the reprojected Image""") link_inputs = param.Boolean(default=True, doc=""" By default, the link_inputs parameter is set to True so that when applying project_image, backends that support linked streams update RangeXY streams on the inputs of the operation.""") supported_types = [Image, RGB] def _process(self, img, key=None): if self.p.fast: return self._fast_process(img, key) proj = self.p.projection x0, x1 = img.range(0, dimension_range=False) y0, y1 = img.range(1, dimension_range=False) yn, xn = img.interface.shape(img, gridded=True)[:2] (px0, py0, px1, py1) = project_extents((x0, y0, x1, y1), img.crs, proj) # Some bug in cartopy is causing zero values eps = sys.float_info.epsilon src_extent = tuple(e+v if e == 0 else e for e, v in zip((x0, x1, y0, y1), (eps, -eps, eps, -eps))) tgt_extent = (px0, px1, py0, py1) if img.crs == proj and np.isclose(src_extent, tgt_extent).all(): return img arrays = [] for vd in img.vdims: arr = img.dimension_values(vd, flat=False) if arr.size: projected, _ = warp_array(arr, proj, img.crs, (xn, yn), src_extent, tgt_extent) else: projected = arr arrays.append(projected) if xn == 0 or yn == 0: return img.clone([], bounds=tgt_extent, crs=proj) xunit = ((tgt_extent[1]-tgt_extent[0])/float(xn))/2. yunit = ((tgt_extent[3]-tgt_extent[2])/float(yn))/2. xs = np.linspace(tgt_extent[0]+xunit, tgt_extent[1]-xunit, xn) ys = np.linspace(tgt_extent[2]+yunit, tgt_extent[3]-yunit, yn) return img.clone((xs, ys)+tuple(arrays), bounds=None, kdims=img.kdims, vdims=img.vdims, crs=proj, xdensity=None, ydensity=None) def _fast_process(self, element, key=None): # Project coordinates proj = self.p.projection if proj == element.crs: return element h, w = element.interface.shape(element, gridded=True)[:2] xs = element.dimension_values(0) ys = element.dimension_values(1) if isinstance(element, RGB): rgb = element.rgb array = np.dstack([np.flipud(rgb.dimension_values(d, flat=False)) for d in rgb.vdims]) else: array = element.dimension_values(2, flat=False) (x0, y0, x1, y1) = element.bounds.lbrt() width = int(w) if self.p.width is None else self.p.width height = int(h) if self.p.height is None else self.p.height bounds = _determine_bounds(xs, ys, element.crs) yb = bounds['y'] resampled = [] xvalues = [] for xb in bounds['x']: px0, py0, px1, py1 = project_extents((xb[0], yb[0], xb[1], yb[1]), element.crs, proj) if len(bounds['x']) > 1: xfraction = (xb[1]-xb[0])/(x1-x0) fraction_width = int(width*xfraction) else: fraction_width = width xs = np.linspace(px0, px1, fraction_width) ys = np.linspace(py0, py1, height) cxs, cys = cartesian_product([xs, ys]) pxs, pys, _ = element.crs.transform_points(proj, np.asarray(cxs), np.asarray(cys)).T icxs = (((pxs-x0) / (x1-x0)) * w).astype(int) icys = (((pys-y0) / (y1-y0)) * h).astype(int) xvalues.append(xs) icxs[icxs<0] = 0 icys[icys<0] = 0 icxs[icxs>=w] = w-1 icys[icys>=h] = h-1 resampled_arr = array[icys, icxs] if isinstance(element, RGB): nvdims = len(element.vdims) resampled_arr = resampled_arr.reshape((fraction_width, height, nvdims)).transpose([1, 0, 2]) else: resampled_arr = resampled_arr.reshape((fraction_width, height)).T resampled.append(resampled_arr) xs = np.concatenate(xvalues[::-1]) resampled = np.hstack(resampled[::-1]) datatypes = [element.interface.datatype, 'xarray', 'grid'] data = (xs, ys) for i in range(len(element.vdims)): if resampled.ndim > 2: data = data + (resampled[::-1, :, i],) else: data = data + (resampled,) return element.clone(data, crs=proj, bounds=None, datatype=datatypes) class project(Operation): """ Projects GeoViews Element types to the specified projection. """ projection = param.ClassSelector(default=ccrs.GOOGLE_MERCATOR, class_=ccrs.Projection, instantiate=False, doc=""" Projection the image type is projected to.""") _operations = [project_path, project_image, project_shape, project_graph, project_quadmesh, project_points, project_geom] def _process(self, element, key=None): for op in self._operations: element = element.map(op.instance(projection=self.p.projection), op.supported_types) return element
# ActivitySim # See full license in LICENSE.txt. from builtins import range import warnings import logging from collections import OrderedDict import numpy as np import pandas as pd from . import logit from . import tracing from . import pipeline from . import config from . import util from . import assign from . import chunk from . import pathbuilder logger = logging.getLogger(__name__) SPEC_DESCRIPTION_NAME = 'Description' SPEC_EXPRESSION_NAME = 'Expression' SPEC_LABEL_NAME = 'Label' ALT_LOSER_UTIL = -900 def random_rows(df, n): # only sample if df has more than n rows if len(df.index) > n: prng = pipeline.get_rn_generator().get_global_rng() return df.take(prng.choice(len(df), size=n, replace=False)) else: return df def uniquify_spec_index(spec): # uniquify spec index inplace # ensure uniqueness of spec index by appending comment with dupe count # this allows us to use pandas dot to compute_utilities dict = OrderedDict() for expr in spec.index: dict[assign.uniquify_key(dict, expr, template="{} # ({})")] = expr prev_index_name = spec.index.name spec.index = list(dict.keys()) spec.index.name = prev_index_name assert spec.index.is_unique def read_model_alts(file_name, set_index=None): file_path = config.config_file_path(file_name) df = pd.read_csv(file_path, comment='#') if set_index: df.set_index(set_index, inplace=True) return df def read_model_spec(file_name): """ Read a CSV model specification into a Pandas DataFrame or Series. file_path : str absolute or relative path to file The CSV is expected to have columns for component descriptions and expressions, plus one or more alternatives. The CSV is required to have a header with column names. For example: Description,Expression,alt0,alt1,alt2 Parameters ---------- model_settings : dict name of spec_file is in model_settings['SPEC'] and file is relative to configs file_name : str file_name id spec file in configs folder description_name : str, optional Name of the column in `fname` that contains the component description. expression_name : str, optional Name of the column in `fname` that contains the component expression. Returns ------- spec : pandas.DataFrame The description column is dropped from the returned data and the expression values are set as the table index. """ assert isinstance(file_name, str) if not file_name.lower().endswith('.csv'): file_name = '%s.csv' % (file_name,) file_path = config.config_file_path(file_name) try: spec = pd.read_csv(file_path, comment='#') except Exception as err: logger.error(f"read_model_spec error reading {file_path}") logger.error(f"read_model_spec error {type(err).__name__}: {str(err)}") raise(err) spec = spec.dropna(subset=[SPEC_EXPRESSION_NAME]) # don't need description and set the expression to the index if SPEC_DESCRIPTION_NAME in spec.columns: spec = spec.drop(SPEC_DESCRIPTION_NAME, axis=1) spec = spec.set_index(SPEC_EXPRESSION_NAME).fillna(0) # ensure uniqueness of spec index by appending comment with dupe count # this allows us to use pandas dot to compute_utilities uniquify_spec_index(spec) if SPEC_LABEL_NAME in spec: spec = spec.set_index(SPEC_LABEL_NAME, append=True) assert isinstance(spec.index, pd.MultiIndex) return spec def read_model_coefficients(model_settings=None, file_name=None): """ Read the coefficient file specified by COEFFICIENTS model setting """ if model_settings is None: assert file_name is not None else: assert file_name is None assert 'COEFFICIENTS' in model_settings, \ "'COEFFICIENTS' tag not in model_settings in %s" % model_settings.get('source_file_paths') file_name = model_settings['COEFFICIENTS'] logger.debug(f"read_model_coefficients file_name {file_name}") file_path = config.config_file_path(file_name) try: coefficients = pd.read_csv(file_path, comment='#', index_col='coefficient_name') except ValueError: logger.exception("Coefficient File Invalid: %s" % str(file_path)) raise if coefficients.index.duplicated().any(): logger.warning(f"duplicate coefficients in {file_path}\n" f"{coefficients[coefficients.index.duplicated(keep=False)]}") raise RuntimeError(f"duplicate coefficients in {file_path}") if coefficients.value.isnull().any(): logger.warning( f"null coefficients in {file_path}\n{coefficients[coefficients.value.isnull()]}") raise RuntimeError(f"null coefficients in {file_path}") return coefficients def spec_for_segment(model_settings, spec_id, segment_name, estimator): """ Select spec for specified segment from omnibus spec containing columns for each segment Parameters ---------- model_spec : pandas.DataFrame omnibus spec file with expressions in index and one column per segment segment_name : str segment_name that is also column name in model_spec Returns ------- pandas.dataframe canonical spec file with expressions in index and single column with utility coefficients """ spec_file_name = model_settings[spec_id] spec = read_model_spec(file_name=spec_file_name) if len(spec.columns) > 1: # if spec is segmented spec = spec[[segment_name]] else: # otherwise we expect a single coefficient column # doesn't really matter what it is called, but this may catch errors assert spec.columns[0] in ['coefficient', segment_name] if 'COEFFICIENTS' not in model_settings: logger.warning(f"no coefficient file specified in model_settings for {spec_file_name}") try: assert (spec.astype(float) == spec).all(axis=None) except (ValueError, AssertionError): raise RuntimeError(f"No coefficient file specified for {spec_file_name} " f"but not all spec column values are numeric") return spec coefficients = read_model_coefficients(model_settings) spec = eval_coefficients(spec, coefficients, estimator) return spec def read_model_coefficient_template(model_settings): """ Read the coefficient template specified by COEFFICIENT_TEMPLATE model setting """ assert 'COEFFICIENT_TEMPLATE' in model_settings, \ "'COEFFICIENT_TEMPLATE' not in model_settings in %s" % model_settings.get('source_file_paths') coefficients_file_name = model_settings['COEFFICIENT_TEMPLATE'] file_path = config.config_file_path(coefficients_file_name) try: template = pd.read_csv(file_path, comment='#', index_col='coefficient_name') except ValueError: logger.exception("Coefficient Template File Invalid: %s" % str(file_path)) raise # by convention, an empty cell in the template indicates that # the coefficient name should be propogated to across all segments # this makes for a more legible template than repeating the identical coefficient name in each column # replace missing cell values with coefficient_name from index template = template.where( ~template.isnull(), np.broadcast_to(template.index.values[:, None], template.shape), ) if template.index.duplicated().any(): dupes = template[template.index.duplicated(keep=False)].sort_index() logger.warning(f"duplicate coefficient names in {coefficients_file_name}:\n{dupes}") assert not template.index.duplicated().any() return template def dump_mapped_coefficients(model_settings): """ dump template_df with coefficient values """ coefficients_df = read_model_coefficients(model_settings) template_df = read_model_coefficient_template(model_settings) for c in template_df.columns: template_df[c] = template_df[c].map(coefficients_df.value) coefficients_template_file_name = model_settings['COEFFICIENT_TEMPLATE'] file_path = config.output_file_path(coefficients_template_file_name) template_df.to_csv(file_path, index=True) logger.info(f"wrote mapped coefficient template to {file_path}") coefficients_file_name = model_settings['COEFFICIENTS'] file_path = config.output_file_path(coefficients_file_name) coefficients_df.to_csv(file_path, index=True) logger.info(f"wrote raw coefficients to {file_path}") def get_segment_coefficients(model_settings, segment_name): """ Return a dict mapping generic coefficient names to segment-specific coefficient values some specs mode_choice logsums have the same espression values with different coefficients for various segments (e.g. eatout, .. ,atwork) and a template file that maps a flat list of coefficients into segment columns. This allows us to provide a coefficient fiel with just the coefficients for a specific segment, that works with generic coefficient names in the spec. For instance coef_ivt can take on the values of segment-specific coefficients coef_ivt_school_univ, coef_ivt_work, coef_ivt_atwork,... :: coefficients_df value constrain coefficient_name coef_ivt_eatout_escort_... -0.0175 F coef_ivt_school_univ -0.0224 F coef_ivt_work -0.0134 F coef_ivt_atwork -0.0188 F template_df coefficient_name eatout school school work coef_ivt coef_ivt_eatout_escort_... coef_ivt_school_univ coef_ivt_school_univ coef_ivt_work For school segment this will return the generic coefficient name withe h segment-specific coefficient value e.g. {'coef_ivt': -0.0224, ...} ... """ if 'COEFFICIENTS' in model_settings and 'COEFFICIENT_TEMPLATE' in model_settings: legacy = False elif 'COEFFICIENTS' in model_settings: legacy = 'COEFFICIENTS' warnings.warn("Support for COEFFICIENTS without COEFFICIENT_TEMPLATE in model settings file will be removed." "Use COEFFICIENT and COEFFICIENT_TEMPLATE to support estimation.", FutureWarning) elif 'LEGACY_COEFFICIENTS' in model_settings: legacy = 'LEGACY_COEFFICIENTS' warnings.warn("Support for 'LEGACY_COEFFICIENTS' setting in model settings file will be removed." "Use COEFFICIENT and COEFFICIENT_TEMPLATE to support estimation.", FutureWarning) else: raise RuntimeError(f"No COEFFICIENTS setting in model_settings") if legacy: constants = config.get_model_constants(model_settings) legacy_coeffs_file_path = config.config_file_path(model_settings['LEGACY_COEFFICIENTS']) omnibus_coefficients = pd.read_csv(legacy_coeffs_file_path, comment='#', index_col='coefficient_name') coefficients_dict = assign.evaluate_constants(omnibus_coefficients[segment_name], constants=constants) else: coefficients_df = read_model_coefficients(model_settings) template_df = read_model_coefficient_template(model_settings) coefficients_col = template_df[segment_name].map(coefficients_df.value).astype(float) if coefficients_col.isnull().any(): # show them the offending lines from interaction_coefficients_file logger.warning(f"bad coefficients in COEFFICIENTS {model_settings['COEFFICIENTS']}\n" f"{coefficients_col[coefficients_col.isnull()]}") assert not coefficients_col.isnull().any() coefficients_dict = coefficients_col.to_dict() return coefficients_dict def eval_nest_coefficients(nest_spec, coefficients, trace_label): def replace_coefficients(nest): if isinstance(nest, dict): assert 'coefficient' in nest coefficient_name = nest['coefficient'] if isinstance(coefficient_name, str): assert coefficient_name in coefficients, "%s not in nest coefficients" % (coefficient_name, ) nest['coefficient'] = coefficients[coefficient_name] assert 'alternatives' in nest for alternative in nest['alternatives']: if isinstance(alternative, dict): replace_coefficients(alternative) if isinstance(coefficients, pd.DataFrame): assert ('value' in coefficients.columns) coefficients = coefficients['value'].to_dict() replace_coefficients(nest_spec) logit.validate_nest_spec(nest_spec, trace_label) return nest_spec def eval_coefficients(spec, coefficients, estimator): spec = spec.copy() # don't clobber input spec if isinstance(coefficients, pd.DataFrame): assert ('value' in coefficients.columns) coefficients = coefficients['value'].to_dict() assert isinstance(coefficients, dict), \ "eval_coefficients doesn't grok type of coefficients: %s" % (type(coefficients)) for c in spec.columns: if c == SPEC_LABEL_NAME: continue spec[c] = spec[c].apply(lambda x: eval(str(x), {}, coefficients)).astype(np.float32) # drop any rows with all zeros since they won't have any effect (0 marginal utility) # (do not drop rows in estimation mode as it may confuse the estimation package (e.g. larch) zero_rows = (spec == 0).all(axis=1) if zero_rows.any(): if estimator: logger.debug("keeping %s all-zero rows in SPEC" % (zero_rows.sum(),)) else: logger.debug("dropping %s all-zero rows from SPEC" % (zero_rows.sum(), )) spec = spec.loc[~zero_rows] return spec def eval_utilities(spec, choosers, locals_d=None, trace_label=None, have_trace_targets=False, trace_all_rows=False, estimator=None, trace_column_names=None, log_alt_losers=False): """ Parameters ---------- spec : pandas.DataFrame A table of variable specifications and coefficient values. Variable expressions should be in the table index and the table should have a column for each alternative. choosers : pandas.DataFrame locals_d : Dict or None This is a dictionary of local variables that will be the environment for an evaluation of an expression that begins with @ trace_label: str have_trace_targets: boolean - choosers has targets to trace trace_all_rows: boolean - trace all chooser rows, bypassing tracing.trace_targets estimator : called to report intermediate table results (used for estimation) trace_column_names: str or list of str chooser columns to include when tracing expression_values Returns ------- """ # fixme - restore tracing and _check_for_variability trace_label = tracing.extend_trace_label(trace_label, 'eval_utils') # avoid altering caller's passed-in locals_d parameter (they may be looping) locals_dict = assign.local_utilities() if locals_d is not None: locals_dict.update(locals_d) globals_dict = {} locals_dict['df'] = choosers # - eval spec expressions if isinstance(spec.index, pd.MultiIndex): # spec MultiIndex with expression and label exprs = spec.index.get_level_values(SPEC_EXPRESSION_NAME) else: exprs = spec.index expression_values = np.empty((spec.shape[0], choosers.shape[0])) chunk.log_df(trace_label, "expression_values", expression_values) i = 0 for expr, coefficients in zip(exprs, spec.values): try: with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") if expr.startswith('@'): expression_value = eval(expr[1:], globals_dict, locals_dict) else: expression_value = choosers.eval(expr) if len(w) > 0: for wrn in w: logger.warning(f"{trace_label} - {type(wrn).__name__} ({wrn.message}) evaluating: {str(expr)}") except Exception as err: logger.exception(f"{trace_label} - {type(err).__name__} ({str(err)}) evaluating: {str(expr)}") raise err if log_alt_losers: # utils for each alt for this expression # FIXME if we always did tis, we cold uem these and skip np.dot below utils = np.outer(expression_value, coefficients) losers = np.amax(utils, axis=1) < ALT_LOSER_UTIL if losers.any(): logger.warning(f"{trace_label} - {sum(losers)} choosers of {len(losers)} " f"with prohibitive utilities for all alternatives for expression: {expr}") expression_values[i] = expression_value i += 1 chunk.log_df(trace_label, "expression_values", expression_values) if estimator: df = pd.DataFrame( data=expression_values.transpose(), index=choosers.index, columns=spec.index.get_level_values(SPEC_LABEL_NAME)) df.index.name = choosers.index.name estimator.write_expression_values(df) # - compute_utilities utilities = np.dot(expression_values.transpose(), spec.astype(np.float64).values) utilities = pd.DataFrame(data=utilities, index=choosers.index, columns=spec.columns) chunk.log_df(trace_label, "utilities", utilities) if trace_all_rows or have_trace_targets: if trace_all_rows: trace_targets = pd.Series(True, index=choosers.index) else: trace_targets = tracing.trace_targets(choosers) assert trace_targets.any() # since they claimed to have targets... # get int offsets of the trace_targets (offsets of bool=True values) offsets = np.nonzero(list(trace_targets))[0] # get array of expression_values # expression_values.shape = (len(spec), len(choosers)) # data.shape = (len(spec), len(offsets)) data = expression_values[:, offsets] # index is utility expressions (and optional label if MultiIndex) expression_values_df = pd.DataFrame(data=data, index=spec.index) if trace_column_names is not None: if isinstance(trace_column_names, str): trace_column_names = [trace_column_names] expression_values_df.columns = pd.MultiIndex.from_frame(choosers.loc[trace_targets, trace_column_names]) tracing.trace_df(expression_values_df, tracing.extend_trace_label(trace_label, 'expression_values'), slicer=None, transpose=False) if len(spec.columns) > 1: for c in spec.columns: name = f'expression_value_{c}' tracing.trace_df(expression_values_df.multiply(spec[c].values, axis=0), tracing.extend_trace_label(trace_label, name), slicer=None, transpose=False) del expression_values chunk.log_df(trace_label, "expression_values", None) # no longer our problem - but our caller should re-log this... chunk.log_df(trace_label, "utilities", None) return utilities def eval_variables(exprs, df, locals_d=None): """ Evaluate a set of variable expressions from a spec in the context of a given data table. There are two kinds of supported expressions: "simple" expressions are evaluated in the context of the DataFrame using DataFrame.eval. This is the default type of expression. Python expressions are evaluated in the context of this function using Python's eval function. Because we use Python's eval this type of expression supports more complex operations than a simple expression. Python expressions are denoted by beginning with the @ character. Users should take care that these expressions must result in a Pandas Series. # FIXME - for performance, it is essential that spec and expression_values # FIXME - not contain booleans when dotted with spec values # FIXME - or the arrays will be converted to dtype=object within dot() Parameters ---------- exprs : sequence of str df : pandas.DataFrame locals_d : Dict This is a dictionary of local variables that will be the environment for an evaluation of an expression that begins with @ Returns ------- variables : pandas.DataFrame Will have the index of `df` and columns of eval results of `exprs`. """ # avoid altering caller's passed-in locals_d parameter (they may be looping) locals_dict = assign.local_utilities() if locals_d is not None: locals_dict.update(locals_d) globals_dict = {} locals_dict['df'] = df def to_array(x): if x is None or np.isscalar(x): a = np.asanyarray([x] * len(df.index)) elif isinstance(x, pd.Series): # fixme # assert x.index.equals(df.index) # save a little RAM a = x.values else: a = x # FIXME - for performance, it is essential that spec and expression_values # FIXME - not contain booleans when dotted with spec values # FIXME - or the arrays will be converted to dtype=object within dot() if not np.issubdtype(a.dtype, np.number): a = a.astype(np.int8) return a values = OrderedDict() for expr in exprs: try: if expr.startswith('@'): expr_values = to_array(eval(expr[1:], globals_dict, locals_dict)) else: expr_values = to_array(df.eval(expr)) # read model spec should ensure uniqueness, otherwise we should uniquify assert expr not in values values[expr] = expr_values except Exception as err: logger.exception(f"Variable evaluation failed {type(err).__name__} ({str(err)}) evaluating: {str(expr)}") raise err values = util.df_from_dict(values, index=df.index) return values # no longer used because eval_utilities aggregates expression_values as they are computed to save space # def compute_utilities(expression_values, spec): # # # matrix product of spec expression_values with utility coefficients of alternatives # # sums the partial utilities (represented by each spec row) of the alternatives # # resulting in a dataframe with one row per chooser and one column per alternative # # pandas.dot depends on column names of expression_values matching spec index values # # # FIXME - for performance, it is essential that spec and expression_values # # FIXME - not contain booleans when dotted with spec values # # FIXME - or the arrays will be converted to dtype=object within dot() # # spec = spec.astype(np.float64) # # # pandas.dot depends on column names of expression_values matching spec index values # # expressions should have been uniquified when spec was read # # we could do it here if need be, and then set spec.index and expression_values.columns equal # assert spec.index.is_unique # assert (spec.index.values == expression_values.columns.values).all() # # utilities = expression_values.dot(spec) # # return utilities def set_skim_wrapper_targets(df, skims): """ Add the dataframe to the SkimWrapper object so that it can be dereferenced using the parameters of the skims object. Parameters ---------- df : pandas.DataFrame Table to which to add skim data as new columns. `df` is modified in-place. skims : SkimWrapper or Skim3dWrapper object, or a list or dict of skims The skims object is used to contain multiple matrices of origin-destination impedances. Make sure to also add it to the locals_d below in order to access it in expressions. The *only* job of this method in regards to skims is to call set_df with the dataframe that comes back from interacting choosers with alternatives. See the skims module for more documentation on how the skims object is intended to be used. """ skims = skims if isinstance(skims, list) \ else skims.values() if isinstance(skims, dict) \ else [skims] # assume any object in skims can be treated as a skim for skim in skims: try: skim.set_df(df) except AttributeError: pass def _check_for_variability(expression_values, trace_label): """ This is an internal method which checks for variability in each expression - under the assumption that you probably wouldn't be using a variable (in live simulations) if it had no variability. This is a warning to the user that they might have constructed the variable incorrectly. It samples 1000 rows in order to not hurt performance - it's likely that if 1000 rows have no variability, the whole dataframe will have no variability. """ if trace_label is None: trace_label = '_check_for_variability' sample = random_rows(expression_values, min(1000, len(expression_values))) no_variability = has_missing_vals = 0 for i in range(len(sample.columns)): v = sample.iloc[:, i] if v.min() == v.max(): col_name = sample.columns[i] logger.info("%s: no variability (%s) in: %s" % (trace_label, v.iloc[0], col_name)) no_variability += 1 # FIXME - how could this happen? Not sure it is really a problem? if np.count_nonzero(v.isnull().values) > 0: col_name = sample.columns[i] logger.info("%s: missing values in: %s" % (trace_label, col_name)) has_missing_vals += 1 if no_variability > 0: logger.warning("%s: %s columns have no variability" % (trace_label, no_variability)) if has_missing_vals > 0: logger.warning("%s: %s columns have missing values" % (trace_label, has_missing_vals)) def compute_nested_exp_utilities(raw_utilities, nest_spec): """ compute exponentiated nest utilities based on nesting coefficients For nest nodes this is the exponentiated logsum of alternatives adjusted by nesting coefficient leaf <- exp( raw_utility ) nest <- exp( ln(sum of exponentiated raw_utility of leaves) * nest_coefficient) Parameters ---------- raw_utilities : pandas.DataFrame dataframe with the raw alternative utilities of all leaves (what in non-nested logit would be the utilities of all the alternatives) nest_spec : dict Nest tree dict from the model spec yaml file Returns ------- nested_utilities : pandas.DataFrame Will have the index of `raw_utilities` and columns for exponentiated leaf and node utilities """ nested_utilities = pd.DataFrame(index=raw_utilities.index) for nest in logit.each_nest(nest_spec, post_order=True): name = nest.name if nest.is_leaf: # leaf_utility = raw_utility / nest.product_of_coefficients nested_utilities[name] = \ raw_utilities[name].astype(float) / nest.product_of_coefficients else: # nest node # the alternative nested_utilities will already have been computed due to post_order # this will RuntimeWarning: divide by zero encountered in log # if all nest alternative utilities are zero # but the resulting inf will become 0 when exp is applied below with np.errstate(divide='ignore'): nested_utilities[name] = \ nest.coefficient * np.log(nested_utilities[nest.alternatives].sum(axis=1)) # exponentiate the utility nested_utilities[name] = np.exp(nested_utilities[name]) return nested_utilities def compute_nested_probabilities(nested_exp_utilities, nest_spec, trace_label): """ compute nested probabilities for nest leafs and nodes probability for nest alternatives is simply the alternatives's local (to nest) probability computed in the same way as the probability of non-nested alternatives in multinomial logit i.e. the fractional share of the sum of the exponentiated utility of itself and its siblings except in nested logit, its sib group is restricted to the nest Parameters ---------- nested_exp_utilities : pandas.DataFrame dataframe with the exponentiated nested utilities of all leaves and nodes nest_spec : dict Nest tree dict from the model spec yaml file Returns ------- nested_probabilities : pandas.DataFrame Will have the index of `nested_exp_utilities` and columns for leaf and node probabilities """ nested_probabilities = pd.DataFrame(index=nested_exp_utilities.index) for nest in logit.each_nest(nest_spec, type='node', post_order=False): probs = logit.utils_to_probs(nested_exp_utilities[nest.alternatives], trace_label=trace_label, exponentiated=True, allow_zero_probs=True) nested_probabilities = pd.concat([nested_probabilities, probs], axis=1) return nested_probabilities def compute_base_probabilities(nested_probabilities, nests, spec): """ compute base probabilities for nest leaves Base probabilities will be the nest-adjusted probabilities of all leaves This flattens or normalizes all the nested probabilities so that they have the proper global relative values (the leaf probabilities sum to 1 for each row.) Parameters ---------- nested_probabilities : pandas.DataFrame dataframe with the nested probabilities for nest leafs and nodes nests : dict Nest tree dict from the model spec yaml file spec : pandas.Dataframe simple simulate spec so we can return columns in appropriate order Returns ------- base_probabilities : pandas.DataFrame Will have the index of `nested_probabilities` and columns for leaf base probabilities """ base_probabilities = pd.DataFrame(index=nested_probabilities.index) for nest in logit.each_nest(nests, type='leaf', post_order=False): # skip root: it has a prob of 1 but we didn't compute a nested probability column for it ancestors = nest.ancestors[1:] base_probabilities[nest.name] = nested_probabilities[ancestors].prod(axis=1) # reorder alternative columns to match spec # since these are alternatives chosen by column index, order of columns matters assert(set(base_probabilities.columns) == set(spec.columns)) base_probabilities = base_probabilities[spec.columns] return base_probabilities def eval_mnl(choosers, spec, locals_d, custom_chooser, estimator, log_alt_losers=False, want_logsums=False, trace_label=None, trace_choice_name=None, trace_column_names=None): """ Run a simulation for when the model spec does not involve alternative specific data, e.g. there are no interactions with alternative properties and no need to sample from alternatives. Each row in spec computes a partial utility for each alternative, by providing a spec expression (often a boolean 0-1 trigger) and a column of utility coefficients for each alternative. We compute the utility of each alternative by matrix-multiplication of eval results with the utility coefficients in the spec alternative columns yielding one row per chooser and one column per alternative Parameters ---------- choosers : pandas.DataFrame spec : pandas.DataFrame A table of variable specifications and coefficient values. Variable expressions should be in the table index and the table should have a column for each alternative. locals_d : Dict or None This is a dictionary of local variables that will be the environment for an evaluation of an expression that begins with @ custom_chooser : function(probs, choosers, spec, trace_label) returns choices, rands custom alternative to logit.make_choices estimator : Estimator object called to report intermediate table results (used for estimation) trace_label: str This is the label to be used for trace log file entries and dump file names when household tracing enabled. No tracing occurs if label is empty or None. trace_choice_name: str This is the column label to be used in trace file csv dump of choices trace_column_names: str or list of str chooser columns to include when tracing expression_values Returns ------- choices : pandas.Series Index will be that of `choosers`, values will match the columns of `spec`. """ # FIXME - not implemented because not currently needed assert not want_logsums trace_label = tracing.extend_trace_label(trace_label, 'eval_mnl') have_trace_targets = tracing.has_trace_targets(choosers) if have_trace_targets: tracing.trace_df(choosers, '%s.choosers' % trace_label) utilities = eval_utilities(spec, choosers, locals_d, log_alt_losers=log_alt_losers, trace_label=trace_label, have_trace_targets=have_trace_targets, estimator=estimator, trace_column_names=trace_column_names) chunk.log_df(trace_label, "utilities", utilities) if have_trace_targets: tracing.trace_df(utilities, '%s.utilities' % trace_label, column_labels=['alternative', 'utility']) probs = logit.utils_to_probs(utilities, trace_label=trace_label, trace_choosers=choosers) chunk.log_df(trace_label, "probs", probs) del utilities chunk.log_df(trace_label, 'utilities', None) if have_trace_targets: # report these now in case make_choices throws error on bad_choices tracing.trace_df(probs, '%s.probs' % trace_label, column_labels=['alternative', 'probability']) if custom_chooser: choices, rands = custom_chooser(probs=probs, choosers=choosers, spec=spec, trace_label=trace_label) else: choices, rands = logit.make_choices(probs, trace_label=trace_label) del probs chunk.log_df(trace_label, 'probs', None) if have_trace_targets: tracing.trace_df(choices, '%s.choices' % trace_label, columns=[None, trace_choice_name]) tracing.trace_df(rands, '%s.rands' % trace_label, columns=[None, 'rand']) return choices def eval_nl(choosers, spec, nest_spec, locals_d, custom_chooser, estimator, log_alt_losers=False, want_logsums=False, trace_label=None, trace_choice_name=None, trace_column_names=None): """ Run a nested-logit simulation for when the model spec does not involve alternative specific data, e.g. there are no interactions with alternative properties and no need to sample from alternatives. Parameters ---------- choosers : pandas.DataFrame spec : pandas.DataFrame A table of variable specifications and coefficient values. Variable expressions should be in the table index and the table should have a column for each alternative. nest_spec: dictionary specifying nesting structure and nesting coefficients (from the model spec yaml file) locals_d : Dict or None This is a dictionary of local variables that will be the environment for an evaluation of an expression that begins with @ custom_chooser : function(probs, choosers, spec, trace_label) returns choices, rands custom alternative to logit.make_choices estimator : Estimator object called to report intermediate table results (used for estimation) trace_label: str This is the label to be used for trace log file entries and dump file names when household tracing enabled. No tracing occurs if label is empty or None. trace_choice_name: str This is the column label to be used in trace file csv dump of choices trace_column_names: str or list of str chooser columns to include when tracing expression_values Returns ------- choices : pandas.Series Index will be that of `choosers`, values will match the columns of `spec`. """ trace_label = tracing.extend_trace_label(trace_label, 'eval_nl') assert trace_label have_trace_targets = tracing.has_trace_targets(choosers) logit.validate_nest_spec(nest_spec, trace_label) if have_trace_targets: tracing.trace_df(choosers, '%s.choosers' % trace_label) raw_utilities = eval_utilities(spec, choosers, locals_d, log_alt_losers=log_alt_losers, trace_label=trace_label, have_trace_targets=have_trace_targets, estimator=estimator, trace_column_names=trace_column_names) chunk.log_df(trace_label, "raw_utilities", raw_utilities) if have_trace_targets: tracing.trace_df(raw_utilities, '%s.raw_utilities' % trace_label, column_labels=['alternative', 'utility']) # exponentiated utilities of leaves and nests nested_exp_utilities = compute_nested_exp_utilities(raw_utilities, nest_spec) chunk.log_df(trace_label, "nested_exp_utilities", nested_exp_utilities) del raw_utilities chunk.log_df(trace_label, 'raw_utilities', None) if have_trace_targets: tracing.trace_df(nested_exp_utilities, '%s.nested_exp_utilities' % trace_label, column_labels=['alternative', 'utility']) # probabilities of alternatives relative to siblings sharing the same nest nested_probabilities = \ compute_nested_probabilities(nested_exp_utilities, nest_spec, trace_label=trace_label) chunk.log_df(trace_label, "nested_probabilities", nested_probabilities) if want_logsums: # logsum of nest root logsums = pd.Series(np.log(nested_exp_utilities.root), index=choosers.index) chunk.log_df(trace_label, "logsums", logsums) del nested_exp_utilities chunk.log_df(trace_label, 'nested_exp_utilities', None) if have_trace_targets: tracing.trace_df(nested_probabilities, '%s.nested_probabilities' % trace_label, column_labels=['alternative', 'probability']) # global (flattened) leaf probabilities based on relative nest coefficients (in spec order) base_probabilities = compute_base_probabilities(nested_probabilities, nest_spec, spec) chunk.log_df(trace_label, "base_probabilities", base_probabilities) del nested_probabilities chunk.log_df(trace_label, 'nested_probabilities', None) if have_trace_targets: tracing.trace_df(base_probabilities, '%s.base_probabilities' % trace_label, column_labels=['alternative', 'probability']) # note base_probabilities could all be zero since we allowed all probs for nests to be zero # check here to print a clear message but make_choices will raise error if probs don't sum to 1 BAD_PROB_THRESHOLD = 0.001 no_choices = (base_probabilities.sum(axis=1) - 1).abs() > BAD_PROB_THRESHOLD if no_choices.any(): logit.report_bad_choices( no_choices, base_probabilities, trace_label=tracing.extend_trace_label(trace_label, 'bad_probs'), trace_choosers=choosers, msg="base_probabilities do not sum to one") if custom_chooser: choices, rands = custom_chooser(probs=base_probabilities, choosers=choosers, spec=spec, trace_label=trace_label) else: choices, rands = logit.make_choices(base_probabilities, trace_label=trace_label) del base_probabilities chunk.log_df(trace_label, 'base_probabilities', None) if have_trace_targets: tracing.trace_df(choices, '%s.choices' % trace_label, columns=[None, trace_choice_name]) tracing.trace_df(rands, '%s.rands' % trace_label, columns=[None, 'rand']) if want_logsums: tracing.trace_df(logsums, '%s.logsums' % trace_label, columns=[None, 'logsum']) if want_logsums: choices = choices.to_frame('choice') choices['logsum'] = logsums return choices def _simple_simulate(choosers, spec, nest_spec, skims=None, locals_d=None, custom_chooser=None, log_alt_losers=False, want_logsums=False, estimator=None, trace_label=None, trace_choice_name=None, trace_column_names=None, ): """ Run an MNL or NL simulation for when the model spec does not involve alternative specific data, e.g. there are no interactions with alternative properties and no need to sample from alternatives. Parameters ---------- choosers : pandas.DataFrame spec : pandas.DataFrame A table of variable specifications and coefficient values. Variable expressions should be in the table index and the table should have a column for each alternative. nest_spec: for nested logit (nl): dictionary specifying nesting structure and nesting coefficients for multinomial logit (mnl): None skims : Skims object The skims object is used to contain multiple matrices of origin-destination impedances. Make sure to also add it to the locals_d below in order to access it in expressions. The *only* job of this method in regards to skims is to call set_df with the dataframe that comes back from interacting choosers with alternatives. See the skims module for more documentation on how the skims object is intended to be used. locals_d : Dict This is a dictionary of local variables that will be the environment for an evaluation of an expression that begins with @ custom_chooser : Estimator object estimator : function(df, label, table_name) called to report intermediate table results (used for estimation) trace_label: str This is the label to be used for trace log file entries and dump file names when household tracing enabled. No tracing occurs if label is empty or None. trace_choice_name: str This is the column label to be used in trace file csv dump of choices trace_column_names: str or list of str chooser columns to include when tracing expression_values Returns ------- choices : pandas.Series Index will be that of `choosers`, values will match the columns of `spec`. """ if skims is not None: set_skim_wrapper_targets(choosers, skims) if nest_spec is None: choices = eval_mnl(choosers, spec, locals_d, custom_chooser, log_alt_losers=log_alt_losers, want_logsums=want_logsums, estimator=estimator, trace_label=trace_label, trace_choice_name=trace_choice_name, trace_column_names=trace_column_names) else: choices = eval_nl(choosers, spec, nest_spec, locals_d, custom_chooser, log_alt_losers=log_alt_losers, want_logsums=want_logsums, estimator=estimator, trace_label=trace_label, trace_choice_name=trace_choice_name, trace_column_names=trace_column_names) return choices def tvpb_skims(skims): def list_of_skims(skims): return \ skims if isinstance(skims, list) \ else skims.values() if isinstance(skims, dict) \ else [skims] if skims is not None \ else [] return [skim for skim in list_of_skims(skims) if isinstance(skim, pathbuilder.TransitVirtualPathLogsumWrapper)] def simple_simulate(choosers, spec, nest_spec, skims=None, locals_d=None, chunk_size=0, custom_chooser=None, log_alt_losers=False, want_logsums=False, estimator=None, trace_label=None, trace_choice_name=None, trace_column_names=None): """ Run an MNL or NL simulation for when the model spec does not involve alternative specific data, e.g. there are no interactions with alternative properties and no need to sample from alternatives. """ trace_label = tracing.extend_trace_label(trace_label, 'simple_simulate') assert len(choosers) > 0 result_list = [] # segment by person type and pick the right spec for each person type for i, chooser_chunk, chunk_trace_label \ in chunk.adaptive_chunked_choosers(choosers, chunk_size, trace_label): choices = _simple_simulate( chooser_chunk, spec, nest_spec, skims=skims, locals_d=locals_d, custom_chooser=custom_chooser, log_alt_losers=log_alt_losers, want_logsums=want_logsums, estimator=estimator, trace_label=chunk_trace_label, trace_choice_name=trace_choice_name, trace_column_names=trace_column_names) result_list.append(choices) chunk.log_df(trace_label, f'result_list', result_list) if len(result_list) > 1: choices = pd.concat(result_list) assert len(choices.index == len(choosers.index)) return choices def simple_simulate_by_chunk_id(choosers, spec, nest_spec, skims=None, locals_d=None, chunk_size=0, custom_chooser=None, log_alt_losers=False, want_logsums=False, estimator=None, trace_label=None, trace_choice_name=None): """ chunk_by_chunk_id wrapper for simple_simulate """ result_list = [] for i, chooser_chunk, chunk_trace_label \ in chunk.adaptive_chunked_choosers_by_chunk_id(choosers, chunk_size, trace_label): choices = _simple_simulate( chooser_chunk, spec, nest_spec, skims=skims, locals_d=locals_d, custom_chooser=custom_chooser, log_alt_losers=log_alt_losers, want_logsums=want_logsums, estimator=estimator, trace_label=chunk_trace_label, trace_choice_name=trace_choice_name) result_list.append(choices) chunk.log_df(trace_label, f'result_list', result_list) if len(result_list) > 1: choices = pd.concat(result_list) return choices def eval_mnl_logsums(choosers, spec, locals_d, trace_label=None): """ like eval_nl except return logsums instead of making choices Returns ------- logsums : pandas.Series Index will be that of `choosers`, values will be logsum across spec column values """ # FIXME - untested and not currently used by any models... trace_label = tracing.extend_trace_label(trace_label, 'eval_mnl_logsums') have_trace_targets = tracing.has_trace_targets(choosers) logger.debug("running eval_mnl_logsums") # trace choosers if have_trace_targets: tracing.trace_df(choosers, '%s.choosers' % trace_label) utilities = eval_utilities(spec, choosers, locals_d, trace_label, have_trace_targets) chunk.log_df(trace_label, "utilities", utilities) if have_trace_targets: tracing.trace_df(utilities, '%s.raw_utilities' % trace_label, column_labels=['alternative', 'utility']) # - logsums # logsum is log of exponentiated utilities summed across columns of each chooser row logsums = np.log(np.exp(utilities.values).sum(axis=1)) logsums = pd.Series(logsums, index=choosers.index) chunk.log_df(trace_label, "logsums", logsums) # trace utilities if have_trace_targets: tracing.trace_df(logsums, '%s.logsums' % trace_label, column_labels=['alternative', 'logsum']) return logsums def eval_nl_logsums(choosers, spec, nest_spec, locals_d, trace_label=None): """ like eval_nl except return logsums instead of making choices Returns ------- logsums : pandas.Series Index will be that of `choosers`, values will be nest logsum based on spec column values """ trace_label = tracing.extend_trace_label(trace_label, 'eval_nl_logsums') have_trace_targets = tracing.has_trace_targets(choosers) logit.validate_nest_spec(nest_spec, trace_label) # trace choosers if have_trace_targets: tracing.trace_df(choosers, '%s.choosers' % trace_label) raw_utilities = eval_utilities(spec, choosers, locals_d, trace_label=trace_label, have_trace_targets=have_trace_targets) chunk.log_df(trace_label, "raw_utilities", raw_utilities) if have_trace_targets: tracing.trace_df(raw_utilities, '%s.raw_utilities' % trace_label, column_labels=['alternative', 'utility']) # - exponentiated utilities of leaves and nests nested_exp_utilities = compute_nested_exp_utilities(raw_utilities, nest_spec) chunk.log_df(trace_label, "nested_exp_utilities", nested_exp_utilities) del raw_utilities # done with raw_utilities chunk.log_df(trace_label, 'raw_utilities', None) # - logsums logsums = np.log(nested_exp_utilities.root) logsums = pd.Series(logsums, index=choosers.index) chunk.log_df(trace_label, "logsums", logsums) if have_trace_targets: # add logsum to nested_exp_utilities for tracing nested_exp_utilities['logsum'] = logsums tracing.trace_df(nested_exp_utilities, '%s.nested_exp_utilities' % trace_label, column_labels=['alternative', 'utility']) tracing.trace_df(logsums, '%s.logsums' % trace_label, column_labels=['alternative', 'logsum']) del nested_exp_utilities # done with nested_exp_utilities chunk.log_df(trace_label, 'nested_exp_utilities', None) return logsums def _simple_simulate_logsums(choosers, spec, nest_spec, skims=None, locals_d=None, trace_label=None): """ like simple_simulate except return logsums instead of making choices Returns ------- logsums : pandas.Series Index will be that of `choosers`, values will be nest logsum based on spec column values """ if skims is not None: set_skim_wrapper_targets(choosers, skims) if nest_spec is None: logsums = eval_mnl_logsums(choosers, spec, locals_d, trace_label=trace_label) else: logsums = eval_nl_logsums(choosers, spec, nest_spec, locals_d, trace_label=trace_label) return logsums def simple_simulate_logsums(choosers, spec, nest_spec, skims=None, locals_d=None, chunk_size=0, trace_label=None, chunk_tag=None): """ like simple_simulate except return logsums instead of making choices Returns ------- logsums : pandas.Series Index will be that of `choosers`, values will be nest logsum based on spec column values """ assert len(choosers) > 0 chunk_tag = chunk_tag or trace_label result_list = [] # segment by person type and pick the right spec for each person type for i, chooser_chunk, chunk_trace_label \ in chunk.adaptive_chunked_choosers(choosers, chunk_size, trace_label, chunk_tag): logsums = _simple_simulate_logsums( chooser_chunk, spec, nest_spec, skims, locals_d, chunk_trace_label) result_list.append(logsums) chunk.log_df(trace_label, f'result_list', result_list) if len(result_list) > 1: logsums = pd.concat(result_list) assert len(logsums.index == len(choosers.index)) return logsums
import unittest import pyrtl from pyrtl.rtllib import testingutils as utils from pyrtl.rtllib import muxes gen_in = utils.an_input_and_vals class TestPrioritizedMuxTrivial(unittest.TestCase): @classmethod def setUpClass(cls): pyrtl.reset_working_block() def test_empty(self): with self.assertRaises(pyrtl.PyrtlError): x = muxes.prioritized_mux([], []) def test_different_sel_and_val_lengths(self): a = pyrtl.WireVector(1) with self.assertRaises(pyrtl.PyrtlError): x = muxes.prioritized_mux([a], [a, a]) def test_invalid_select_width(self): a = pyrtl.WireVector(2) b = pyrtl.WireVector(2) c = pyrtl.WireVector(10) with self.assertRaises(pyrtl.PyrtlError): x = muxes.prioritized_mux([a, b], [c, c]) def test_one_wire(self): a = pyrtl.WireVector(1) b = pyrtl.WireVector(10) x = muxes.prioritized_mux([a], [b]) self.assertIs(b, x) def pri_mux_actual(sels , vals): # python version of the pri mux hardware assert(len(sels) == len(vals)) for index, s in enumerate(sels): if s: return vals[index] return vals[-1] class TestPrioritizedMuxSim(unittest.TestCase): def setUp(self): pyrtl.reset_working_block() def test_select_with_2_wires(self): val_width = 5 sels, sel_vals = utils.make_inputs_and_values(2, exact_bitwidth=1) mux_ins, vals = utils.make_inputs_and_values(2, exact_bitwidth=val_width) out = pyrtl.Output(val_width, "out") out <<= muxes.prioritized_mux(sels, mux_ins) actual = utils.sim_and_ret_out(out, sels + mux_ins, sel_vals + vals) expected = [pri_mux_actual(sel, val) for sel, val in zip(zip(*sel_vals), zip(*vals))] self.assertEqual(actual, expected) def test_select_with_5_wires(self): val_width = 5 sels, sel_vals = utils.make_inputs_and_values(5, exact_bitwidth=1, test_vals=50) mux_ins, vals = utils.make_inputs_and_values(5, exact_bitwidth=val_width, test_vals=50) out = pyrtl.Output(val_width, "out") out <<= muxes.prioritized_mux(sels, mux_ins) actual = utils.sim_and_ret_out(out, sels + mux_ins, sel_vals + vals) expected = [pri_mux_actual(sel, val) for sel, val in zip(zip(*sel_vals), zip(*vals))] self.assertEqual(actual, expected) class TestIsEquivelent(unittest.TestCase): def test_equivalent_const(self): a = pyrtl.Const(1) b = pyrtl.Const(1) c = pyrtl.Const(1, 2) d = pyrtl.Const(3) self.assertTrue(muxes._is_equivelent(a, a)) self.assertTrue(muxes._is_equivelent(a, b)) self.assertFalse(muxes._is_equivelent(a, c)) self.assertFalse(muxes._is_equivelent(a, d)) def test_equivalent(self): a = pyrtl.WireVector(2) b = pyrtl.Const(2, 2) c = pyrtl.Output() self.assertTrue(muxes._is_equivelent(a, a)) self.assertTrue(muxes._is_equivelent(c, c)) self.assertFalse(muxes._is_equivelent(a, b)) self.assertFalse(muxes._is_equivelent(a, c)) class TestSmartMuxTrivial(unittest.TestCase): def setUp(self): pyrtl.reset_working_block() def test_one_value(self): sel = pyrtl.WireVector(3) a = pyrtl.WireVector(1) self.assertIs(muxes.sparse_mux(sel, {6: a}), a) def test_dup_value(self): sel = pyrtl.WireVector(3) a = pyrtl.WireVector(1) self.assertIs(muxes.sparse_mux(sel, {6: a, 2: a}), a) def test_dup_consts1(self): sel = pyrtl.WireVector(1) c1 = pyrtl.Const(4) c2 = pyrtl.Const(4) res = muxes.sparse_mux(sel, {0: c1, 1: c2}) self.assertIsInstance(res, pyrtl.Const) self.assertEqual(res.val, 4) def test_dup_consts2(self): sel = pyrtl.WireVector(3) c1 = pyrtl.Const(4) c2 = pyrtl.Const(4) res = muxes.sparse_mux(sel, {6: c1, 2: c2}) self.assertIsInstance(res, pyrtl.Const) self.assertEqual(res.val, 4) def test_no_dup_2(self): sel = pyrtl.WireVector(3) c1 = pyrtl.Const(4) c2 = pyrtl.Const(6) res = muxes.sparse_mux(sel, {6: c1, 2: c2}) self.assertNotIsInstance(res, pyrtl.Const) def test_no_dup(self): sel = pyrtl.WireVector(3) a = pyrtl.WireVector(3) b = pyrtl.WireVector(3) res = muxes.sparse_mux(sel, {6: a, 2: b}) self.assertIsNot(res, a) self.assertIsNot(res, b) class TestSmartMux(unittest.TestCase): def setUp(self): pyrtl.reset_working_block() def test_two_vals(self): sel, sel_vals = gen_in(1) a1, a1_vals = gen_in(3) a2, a2_vals = gen_in(3) res = pyrtl.Output(name="output") res <<= muxes.sparse_mux(sel, {0: a1, 1: a2}) in_vals = [sel_vals, a1_vals, a2_vals] out_res = utils.sim_and_ret_out(res, [sel, a1, a2], in_vals) expected_out = [e2 if sel else e1 for sel, e1, e2 in zip(*in_vals)] self.assertEqual(out_res, expected_out) def test_two_vals_big(self): sel = pyrtl.Input(3) a1, a1_vals = gen_in(3) a2, a2_vals = gen_in(3) res = pyrtl.Output(name="output") sel_vals = [utils.uniform_dist(1) for i in range(20)] real_sel = [6 if s else 2 for s in sel_vals] res <<= muxes.sparse_mux(sel, {2: a1, 6: a2}) out_res = utils.sim_and_ret_out(res, [sel, a1, a2], [real_sel, a1_vals, a2_vals]) expected_out = [e2 if sel else e1 for sel, e1, e2 in zip(sel_vals, a1_vals, a2_vals)] self.assertEqual(out_res, expected_out) def test_two_big_close(self): sel = pyrtl.Input(3) a1, a1_vals = gen_in(3) a2, a2_vals = gen_in(3) res = pyrtl.Output(name="output") sel_vals = [utils.uniform_dist(1) for i in range(20)] real_sel = [6 if s else 5 for s in sel_vals] res <<= muxes.sparse_mux(sel, {5: a1, 6: a2}) out_res = utils.sim_and_ret_out(res, [sel, a1, a2], [real_sel, a1_vals, a2_vals]) expected_out = [e2 if sel else e1 for sel, e1, e2 in zip(sel_vals, a1_vals, a2_vals)] self.assertEqual(out_res, expected_out) class TestSmartMuxDefault(unittest.TestCase): def setUp(self): pyrtl.reset_working_block() def test_default(self): sel, sel_vals = gen_in(3) a1, a1_vals = gen_in(3) a2, a2_vals = gen_in(3) default, default_vals = gen_in(3) res = pyrtl.Output(name="output") res <<= muxes.sparse_mux(sel, {5: a1, 6: a2, muxes.SparseDefault: default}) out_res = utils.sim_and_ret_out(res, [sel, a1, a2, default], [sel_vals, a1_vals, a2_vals, default_vals]) expected_out = [e2 if sel == 6 else e1 if sel == 5 else d for sel, e1, e2, d in zip(sel_vals, a1_vals, a2_vals, default_vals)] self.assertEqual(out_res, expected_out) class TestMultiSelector(unittest.TestCase): def setUp(self): pyrtl.reset_working_block() def test_value_already_set(self): sel = pyrtl.Input(1) wire = pyrtl.WireVector(8) i1_out = pyrtl.Output(name="i1_out") i2_out = pyrtl.Output(name="i2_out") with muxes.MultiSelector(sel, i1_out, i2_out) as mul_sel: mul_sel.option(0, wire, wire) with self.assertRaises(pyrtl.PyrtlError): mul_sel.option(0, wire, wire) def test_incorrect_number_of_wires(self): sel = pyrtl.Input(1) wire = pyrtl.WireVector(8) i1_out = pyrtl.Output(name="i1_out") i2_out = pyrtl.Output(name="i2_out") with muxes.MultiSelector(sel, i1_out, i2_out) as mul_sel: mul_sel.option(0, wire, wire) with self.assertRaises(pyrtl.PyrtlError): mul_sel.option(1, wire, wire, wire) def test_incorrect_number_of_wires_2(self): sel = pyrtl.Input(1) wire = pyrtl.WireVector(8) i1_out = pyrtl.Output(name="i1_out") i2_out = pyrtl.Output(name="i2_out") i3_out = pyrtl.Output(name="i3_out") mul_sel = muxes.MultiSelector(sel, i1_out, i2_out, i3_out) with self.assertRaises(pyrtl.PyrtlError): mul_sel.option(0, wire, wire) class TestMultiSelectorSim(unittest.TestCase): def setUp(self): pyrtl.reset_working_block() def test_really_simple(self): sel, sel_vals = gen_in(1) i1_0, i1_0_vals = gen_in(8) i2_0, i2_0_vals = gen_in(8) i1_1, i1_1_vals = gen_in(8) i2_1, i2_1_vals = gen_in(8) i1_out = pyrtl.Output(name="i1_out") i2_out = pyrtl.Output(name="i2_out") with muxes.MultiSelector(sel, i1_out, i2_out) as mul_sel: mul_sel.option(0, i1_0, i2_0) mul_sel.option(1, i1_1, i2_1) actual_outputs = utils.sim_and_ret_outws([sel, i1_0, i1_1, i2_0, i2_1], [sel_vals, i1_0_vals, i1_1_vals, i2_0_vals, i2_1_vals]) expected_i1_out = [v1 if s else v0 for s, v0, v1 in zip(sel_vals, i1_0_vals, i1_1_vals )] expected_i2_out = [v1 if s else v0 for s, v0, v1 in zip(sel_vals, i2_0_vals, i2_1_vals )] self.assertEqual(actual_outputs[i1_out], expected_i1_out) self.assertEqual(actual_outputs[i2_out], expected_i2_out) def test_simple(self): sel, sel_vals = gen_in(2) x1s, x1_vals = (list(x) for x in zip(*(gen_in(8) for i in range(4)))) x2s, x2_vals = (list(x) for x in zip(*(gen_in(8) for i in range(4)))) x3s, x3_vals = (list(x) for x in zip(*(gen_in(8) for i in range(4)))) i1_out = pyrtl.Output(name="i1_out") i2_out = pyrtl.Output(name="i2_out") i3_out = pyrtl.Output(name="i3_out") with muxes.MultiSelector(sel, i1_out, i2_out, i3_out) as mu: for i in range(4): mu.option(i, x1s[i], x2s[i], x3s[i]) wires = [sel] + x1s + x2s + x3s vals = [sel_vals] + x1_vals + x2_vals + x3_vals actual_outputs = utils.sim_and_ret_outws(wires, vals) expected_i1_out = [v[s] for s, v in zip(sel_vals, zip(*x1_vals))] expected_i2_out = [v[s] for s, v in zip(sel_vals, zip(*x2_vals))] expected_i3_out = [v[s] for s, v in zip(sel_vals, zip(*x3_vals))] self.assertEqual(actual_outputs[i1_out], expected_i1_out) self.assertEqual(actual_outputs[i2_out], expected_i2_out) self.assertEqual(actual_outputs[i3_out], expected_i3_out) class TestDemux(unittest.TestCase): def setUp(self): pyrtl.reset_working_block() def test_simple_demux(self): in_w, in_vals = utils.an_input_and_vals(2) outs = (pyrtl.Output(name="output_" + str(i)) for i in range(4)) demux_outs = pyrtl.rtllib.muxes.demux(in_w) for out_w, demux_out in zip(outs, demux_outs): out_w <<= demux_out traces = utils.sim_and_ret_outws((in_w,), (in_vals,)) for cycle in range(20): for i, out_wire in enumerate(outs): self.assertEqual(in_vals[i] == i, traces[out_wire][cycle]) def test_demux_2(self): in_w, in_vals = utils.an_input_and_vals(1) outs = (pyrtl.Output(name="output_" + str(i)) for i in range(2)) demux_outs = pyrtl.rtllib.muxes._demux_2(in_w) for out_w, demux_out in zip(outs, demux_outs): out_w <<= demux_out traces = utils.sim_and_ret_outws((in_w,), (in_vals,)) for cycle in range(20): for i, out_wire in enumerate(outs): self.assertEqual(in_vals[i] == i, traces[out_wire][cycle]) def test_large_demux(self): in_w, in_vals = utils.an_input_and_vals(5) outs = (pyrtl.Output(name="output_" + str(i)) for i in range(32)) demux_outs = pyrtl.rtllib.muxes.demux(in_w) for out_w, demux_out in zip(outs, demux_outs): self.assertEqual(len(demux_out), 1) out_w <<= demux_out traces = utils.sim_and_ret_outws((in_w,), (in_vals,)) for cycle in range(20): for i, out_wire in enumerate(outs): self.assertEqual(in_vals[i] == i, traces[out_wire][cycle])
# -*- coding: utf-8 -*- from django.http import HttpRequest from django.test import TestCase from ipware import get_client_ip class IPv4TestCase(TestCase): """IP address Test""" def test_meta_none(self): request = HttpRequest() request.META = {} ip, routable = get_client_ip(request) self.assertIsNone(ip) self.assertFalse(routable) def test_meta_single(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', } result = get_client_ip(request) self.assertEqual(result, ("177.139.233.139", True)) def test_meta_multi(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', 'REMOTE_ADDR': '177.139.233.133', } result = get_client_ip(request) self.assertEqual(result, ("177.139.233.139", True)) def test_meta_multi_precedence_order(self): request = HttpRequest() request.META = { 'X_FORWARDED_FOR': '177.139.233.138, 198.84.193.157, 198.84.193.158', 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', 'REMOTE_ADDR': '177.139.233.133', } result = get_client_ip(request) self.assertEqual(result, ("177.139.233.139", True)) def test_meta_proxy_order_left_most(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', } result = get_client_ip(request, proxy_order='left-most') self.assertEqual(result, ("177.139.233.139", True)) def test_meta_proxy_order_right_most(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', } result = get_client_ip(request, proxy_order='right-most') self.assertEqual(result, ("198.84.193.158", True)) def test_meta_multi_precedence_private_first(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '10.0.0.0, 10.0.0.1, 10.0.0.2', 'X_FORWARDED_FOR': '177.139.233.138, 198.84.193.157, 198.84.193.158', 'REMOTE_ADDR': '177.139.233.133', } result = get_client_ip(request) self.assertEqual(result, ("177.139.233.138", True)) def test_meta_multi_precedence_invalid_first(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': 'unknown, 10.0.0.1, 10.0.0.2', 'X_FORWARDED_FOR': '177.139.233.138, 198.84.193.157, 198.84.193.158', 'REMOTE_ADDR': '177.139.233.133', } result = get_client_ip(request) self.assertEqual(result, ("177.139.233.138", True)) def test_meta_error_only(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': 'unknown, 177.139.233.139, 198.84.193.157, 198.84.193.158', } result = get_client_ip(request) self.assertEqual(result, (None, False)) def test_meta_error_first(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': 'unknown, 177.139.233.139, 198.84.193.157, 198.84.193.158', 'X_FORWARDED_FOR': '177.139.233.138, 198.84.193.157, 198.84.193.158', } result = get_client_ip(request) self.assertEqual(result, ("177.139.233.138", True)) def test_meta_singleton(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139', } result = get_client_ip(request) self.assertEqual(result, ("177.139.233.139", True)) def test_meta_singleton_proxy_count(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139', } result = get_client_ip(request, proxy_count=1) self.assertEqual(result, (None, False)) def test_meta_singleton_proxy_count_private(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '10.0.0.0', 'HTTP_X_REAL_IP': '177.139.233.139', } result = get_client_ip(request, proxy_count=1) self.assertEqual(result, (None, False)) def test_meta_singleton_private_fallback(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '10.0.0.0', 'HTTP_X_REAL_IP': '177.139.233.139', } result = get_client_ip(request) self.assertEqual(result, ("177.139.233.139", True)) def test_meta_proxy_trusted_ips_exact_ip_check(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', } result = get_client_ip(request, proxy_trusted_ips=['198.84.193.158']) self.assertEqual(result, ("177.139.233.139", True)) def test_meta_proxy_trusted_ips_exact_ips_check(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', } result = get_client_ip(request, proxy_trusted_ips=['198.84.193.157', '198.84.193.158']) self.assertEqual(result, ("177.139.233.139", True)) def test_meta_proxy_trusted_ips_subnet_start_with_check(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', } result = get_client_ip(request, proxy_trusted_ips=['198.84.193']) self.assertEqual(result, ("177.139.233.139", True)) def test_meta_proxy_trusted_ips_does_not_start_with_check(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', } result = get_client_ip(request, proxy_trusted_ips=['84.193.158']) self.assertEqual(result, (None, False)) def test_meta_proxy_trusted_ips_proxy_count(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', } result = get_client_ip(request, proxy_count=2, proxy_trusted_ips=['198.84.193.158']) self.assertEqual(result, ("177.139.233.139", True)) def test_meta_proxy_trusted_ips_proxy_count_less_error(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.158', } result = get_client_ip(request, proxy_count=2, proxy_trusted_ips=['198.84.193.158']) self.assertEqual(result, (None, False)) def test_meta_proxy_trusted_ips_proxy_count_more_error(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', } result = get_client_ip(request, proxy_count=1, proxy_trusted_ips=['198.84.193.158']) self.assertEqual(result, (None, False)) def test_meta_proxy_trusted_ips_proxy_count_more_error_fallback(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', 'HTTP_X_REAL_IP': '177.139.233.139', } result = get_client_ip(request, proxy_count=1, proxy_trusted_ips=['198.84.193.158']) self.assertEqual(result, (None, False)) def test_best_matched_ip(self): request = HttpRequest() request.META = { 'HTTP_X_REAL_IP': '192.168.1.1', 'REMOTE_ADDR': '177.31.233.133', } ip = get_client_ip(request) self.assertEqual(ip, ("177.31.233.133", True)) def test_best_matched_ip_public(self): request = HttpRequest() request.META = { 'HTTP_X_REAL_IP': '177.31.233.122', 'REMOTE_ADDR': '177.31.233.133', } ip = get_client_ip(request) self.assertEqual(ip, ("177.31.233.122", True)) def test_best_matched_ip_private(self): request = HttpRequest() request.META = { 'HTTP_X_REAL_IP': '192.168.1.1', 'REMOTE_ADDR': '127.0.0.1', } ip = get_client_ip(request) self.assertEqual(ip, ("192.168.1.1", False)) def test_best_matched_ip_private_loopback_precedence(self): request = HttpRequest() request.META = { 'HTTP_X_REAL_IP': '127.0.0.1', 'REMOTE_ADDR': '192.168.1.1', } ip = get_client_ip(request) self.assertEqual(ip, ("192.168.1.1", False)) def test_best_matched_ip_private_precedence(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '172.25.0.1', 'REMOTE_ADDR': '172.25.0.3', } ip = get_client_ip(request) self.assertEqual(ip, ("172.25.0.3", False)) def test_100_low_range_public(self): request = HttpRequest() request.META = { 'HTTP_X_REAL_IP': '100.63.0.9', } ip = get_client_ip(request) self.assertEqual(ip, ("100.63.0.9", True)) def test_100_block_private(self): request = HttpRequest() request.META = { 'HTTP_X_REAL_IP': '100.76.0.9', } ip = get_client_ip(request) self.assertEqual(ip, ("100.76.0.9", False)) def test_100_high_range_public(self): request = HttpRequest() request.META = { 'HTTP_X_REAL_IP': '100.128.0.9', } ip = get_client_ip(request) self.assertEqual(ip, ("100.128.0.9", True)) def test_request_header_order_specific(self): request = HttpRequest() request.META = { 'HTTP_X_REAL_IP': '192.168.1.1', 'REMOTE_ADDR': '177.139.233.139', 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', } ip = get_client_ip(request, request_header_order=['HTTP_X_FORWARDED_FOR']) self.assertEqual(ip, ("177.139.233.139", True)) def test_request_header_order_multiple(self): request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_FOR': '177.139.233.139, 198.84.193.157, 198.84.193.158', 'X_FORWARDED_FOR': '177.139.233.138, 198.84.193.157, 198.84.193.158', 'REMOTE_ADDR': '177.139.233.133', } ip = get_client_ip(request, request_header_order=['X_FORWARDED_FOR', 'HTTP_X_FORWARDED_FOR']) self.assertEqual(ip, ("177.139.233.138", True))
# Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """GAX wrapper for Pubsub API requests.""" # pylint: disable=import-error from google.gax import CallOptions from google.gax import INITIAL_PAGE from google.gax.errors import GaxError from google.gax.grpc import exc_to_code from google.pubsub.v1.pubsub_pb2 import PubsubMessage from google.pubsub.v1.pubsub_pb2 import PushConfig from grpc.beta.interfaces import StatusCode # pylint: enable=import-error from gcloud.exceptions import Conflict from gcloud.exceptions import NotFound from gcloud._helpers import _to_bytes def _build_paging_options(page_token=None): """Helper for :meth:'_PublisherAPI.list_topics' et aliae.""" if page_token is None: page_token = INITIAL_PAGE options = {'page_token': page_token} return CallOptions(**options) class _PublisherAPI(object): """Helper mapping publisher-related APIs. :type gax_api: :class:`google.pubsub.v1.publisher_api.PublisherApi` :param gax_api: API object used to make GAX requests. """ def __init__(self, gax_api): self._gax_api = gax_api def list_topics(self, project, page_size=0, page_token=None): """List topics for the project associated with this API. See: https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/list :type project: string :param project: project ID :type page_size: int :param page_size: maximum number of topics to return, If not passed, defaults to a value set by the API. :type page_token: string :param page_token: opaque marker for the next "page" of topics. If not passed, the API will return the first page of topics. :rtype: tuple, (list, str) :returns: list of ``Topic`` resource dicts, plus a "next page token" string: if not None, indicates that more topics can be retrieved with another call (pass that value as ``page_token``). """ options = _build_paging_options(page_token) path = 'projects/%s' % (project,) page_iter = self._gax_api.list_topics( path, page_size=page_size, options=options) topics = [{'name': topic_pb.name} for topic_pb in page_iter.next()] token = page_iter.page_token or None return topics, token def topic_create(self, topic_path): """API call: create a topic See: https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/create :type topic_path: string :param topic_path: fully-qualified path of the new topic, in format ``projects/<PROJECT>/topics/<TOPIC_NAME>``. :rtype: dict :returns: ``Topic`` resource returned from the API. :raises: :exc:`gcloud.exceptions.Conflict` if the topic already exists """ try: topic_pb = self._gax_api.create_topic(topic_path) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION: raise Conflict(topic_path) raise return {'name': topic_pb.name} def topic_get(self, topic_path): """API call: retrieve a topic See: https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/get :type topic_path: string :param topic_path: fully-qualified path of the topic, in format ``projects/<PROJECT>/topics/<TOPIC_NAME>``. :rtype: dict :returns: ``Topic`` resource returned from the API. :raises: :exc:`gcloud.exceptions.NotFound` if the topic does not exist """ try: topic_pb = self._gax_api.get_topic(topic_path) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(topic_path) raise return {'name': topic_pb.name} def topic_delete(self, topic_path): """API call: delete a topic See: https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/create :type topic_path: string :param topic_path: fully-qualified path of the new topic, in format ``projects/<PROJECT>/topics/<TOPIC_NAME>``. :rtype: dict :returns: ``Topic`` resource returned from the API. """ try: self._gax_api.delete_topic(topic_path) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(topic_path) raise def topic_publish(self, topic_path, messages): """API call: publish one or more messages to a topic See: https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/publish :type topic_path: string :param topic_path: fully-qualified path of the topic, in format ``projects/<PROJECT>/topics/<TOPIC_NAME>``. :type messages: list of dict :param messages: messages to be published. :rtype: list of string :returns: list of opaque IDs for published messages. :raises: :exc:`gcloud.exceptions.NotFound` if the topic does not exist """ message_pbs = [_message_pb_from_dict(message) for message in messages] try: event = self._gax_api.publish(topic_path, message_pbs) if not event.is_set(): event.wait() except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(topic_path) raise return event.result.message_ids def topic_list_subscriptions(self, topic_path, page_size=0, page_token=None): """API call: list subscriptions bound to a topic See: https://cloud.google.com/pubsub/reference/rest/v1/projects.topics.subscriptions/list :type topic_path: string :param topic_path: fully-qualified path of the topic, in format ``projects/<PROJECT>/topics/<TOPIC_NAME>``. :type page_size: int :param page_size: maximum number of subscriptions to return, If not passed, defaults to a value set by the API. :type page_token: string :param page_token: opaque marker for the next "page" of subscriptions. If not passed, the API will return the first page of subscriptions. :rtype: list of strings :returns: fully-qualified names of subscriptions for the supplied topic. :raises: :exc:`gcloud.exceptions.NotFound` if the topic does not exist """ options = _build_paging_options(page_token) try: page_iter = self._gax_api.list_topic_subscriptions( topic_path, page_size=page_size, options=options) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(topic_path) raise subs = page_iter.next() token = page_iter.page_token or None return subs, token class _SubscriberAPI(object): """Helper mapping subscriber-related APIs. :type gax_api: :class:`google.pubsub.v1.publisher_api.SubscriberApi` :param gax_api: API object used to make GAX requests. """ def __init__(self, gax_api): self._gax_api = gax_api def list_subscriptions(self, project, page_size=0, page_token=None): """List subscriptions for the project associated with this API. See: https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/list :type project: string :param project: project ID :type page_size: int :param page_size: maximum number of subscriptions to return, If not passed, defaults to a value set by the API. :type page_token: string :param page_token: opaque marker for the next "page" of subscriptions. If not passed, the API will return the first page of subscriptions. :rtype: tuple, (list, str) :returns: list of ``Subscription`` resource dicts, plus a "next page token" string: if not None, indicates that more topics can be retrieved with another call (pass that value as ``page_token``). """ options = _build_paging_options(page_token) path = 'projects/%s' % (project,) page_iter = self._gax_api.list_subscriptions( path, page_size=page_size, options=options) subscriptions = [_subscription_pb_to_mapping(sub_pb) for sub_pb in page_iter.next()] token = page_iter.page_token or None return subscriptions, token def subscription_create(self, subscription_path, topic_path, ack_deadline=None, push_endpoint=None): """API call: create a subscription See: https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/create :type subscription_path: string :param subscription_path: the fully-qualified path of the new subscription, in format ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. :type topic_path: string :param topic_path: the fully-qualified path of the topic being subscribed, in format ``projects/<PROJECT>/topics/<TOPIC_NAME>``. :type ack_deadline: int, or ``NoneType`` :param ack_deadline: the deadline (in seconds) by which messages pulled from the back-end must be acknowledged. :type push_endpoint: string, or ``NoneType`` :param push_endpoint: URL to which messages will be pushed by the back-end. If not set, the application must pull messages. :rtype: dict :returns: ``Subscription`` resource returned from the API. """ if push_endpoint is not None: push_config = PushConfig(push_endpoint=push_endpoint) else: push_config = None if ack_deadline is None: ack_deadline = 0 try: sub_pb = self._gax_api.create_subscription( subscription_path, topic_path, push_config, ack_deadline) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION: raise Conflict(topic_path) raise return _subscription_pb_to_mapping(sub_pb) def subscription_get(self, subscription_path): """API call: retrieve a subscription See: https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/get :type subscription_path: string :param subscription_path: the fully-qualified path of the subscription, in format ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. :rtype: dict :returns: ``Subscription`` resource returned from the API. """ try: sub_pb = self._gax_api.get_subscription(subscription_path) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(subscription_path) raise return _subscription_pb_to_mapping(sub_pb) def subscription_delete(self, subscription_path): """API call: delete a subscription See: https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/delete :type subscription_path: string :param subscription_path: the fully-qualified path of the subscription, in format ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. """ try: self._gax_api.delete_subscription(subscription_path) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(subscription_path) raise def subscription_modify_push_config(self, subscription_path, push_endpoint): """API call: update push config of a subscription See: https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/modifyPushConfig :type subscription_path: string :param subscription_path: the fully-qualified path of the new subscription, in format ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. :type push_endpoint: string, or ``NoneType`` :param push_endpoint: URL to which messages will be pushed by the back-end. If not set, the application must pull messages. """ push_config = PushConfig(push_endpoint=push_endpoint) try: self._gax_api.modify_push_config(subscription_path, push_config) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(subscription_path) raise def subscription_pull(self, subscription_path, return_immediately=False, max_messages=1): """API call: retrieve messages for a subscription See: https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/modifyPushConfig :type subscription_path: string :param subscription_path: the fully-qualified path of the new subscription, in format ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. :type return_immediately: boolean :param return_immediately: if True, the back-end returns even if no messages are available; if False, the API call blocks until one or more messages are available. :type max_messages: int :param max_messages: the maximum number of messages to return. :rtype: list of dict :returns: the ``receivedMessages`` element of the response. """ try: response_pb = self._gax_api.pull( subscription_path, max_messages, return_immediately) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(subscription_path) raise return [_received_message_pb_to_mapping(rmpb) for rmpb in response_pb.received_messages] def subscription_acknowledge(self, subscription_path, ack_ids): """API call: acknowledge retrieved messages See: https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/modifyPushConfig :type subscription_path: string :param subscription_path: the fully-qualified path of the new subscription, in format ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. :type ack_ids: list of string :param ack_ids: ack IDs of messages being acknowledged """ try: self._gax_api.acknowledge(subscription_path, ack_ids) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(subscription_path) raise def subscription_modify_ack_deadline(self, subscription_path, ack_ids, ack_deadline): """API call: update ack deadline for retrieved messages See: https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/modifyAckDeadline :type subscription_path: string :param subscription_path: the fully-qualified path of the new subscription, in format ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. :type ack_ids: list of string :param ack_ids: ack IDs of messages being acknowledged :type ack_deadline: int :param ack_deadline: the deadline (in seconds) by which messages pulled from the back-end must be acknowledged. """ try: self._gax_api.modify_ack_deadline( subscription_path, ack_ids, ack_deadline) except GaxError as exc: if exc_to_code(exc.cause) == StatusCode.NOT_FOUND: raise NotFound(subscription_path) raise def _message_pb_from_dict(message): """Helper for :meth:`_PublisherAPI.topic_publish`.""" return PubsubMessage(data=_to_bytes(message['data']), attributes=message['attributes']) def _subscription_pb_to_mapping(sub_pb): """Helper for :meth:`list_subscriptions`, et aliae Ideally, would use a function from :mod:`protobuf.json_format`, but the right one isn't public. See: https://github.com/google/protobuf/issues/1351 """ mapping = { 'name': sub_pb.name, 'topic': sub_pb.topic, 'ackDeadlineSeconds': sub_pb.ack_deadline_seconds, } if sub_pb.push_config.push_endpoint != '': mapping['pushConfig'] = { 'pushEndpoint': sub_pb.push_config.push_endpoint, } return mapping def _message_pb_to_mapping(message_pb): """Helper for :meth:`pull`, et aliae Ideally, would use a function from :mod:`protobuf.json_format`, but the right one isn't public. See: https://github.com/google/protobuf/issues/1351 """ return { 'messageId': message_pb.message_id, 'data': message_pb.data, 'attributes': message_pb.attributes, } def _received_message_pb_to_mapping(received_message_pb): """Helper for :meth:`pull`, et aliae Ideally, would use a function from :mod:`protobuf.json_format`, but the right one isn't public. See: https://github.com/google/protobuf/issues/1351 """ return { 'ackId': received_message_pb.ack_id, 'message': _message_pb_to_mapping( received_message_pb.message), }
import socket import traceback import subprocess import json from datetime import datetime, timedelta from celery.utils.log import get_task_logger from pyquery import PyQuery as pq import requests from requests.adapters import HTTPAdapter from . import celery, db, app from .models import Instance, ssllabs_grade_int CONNECT_TIMEOUT, READ_TIMEOUT = 5.0, 30.0 MAX_RETRIES = 2 USER_AGENT = 'Meow' try: from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) except ImportError: pass logger = get_task_logger(__name__) class InstanceDown(Exception): pass def rsess(): s = requests.Session() # Just so one random 500 doesn't break an uptime; two consecutive errors are # worrying though. s.mount('http://', HTTPAdapter(max_retries=MAX_RETRIES)) s.mount('https://', HTTPAdapter(max_retries=MAX_RETRIES)) s.headers.update({ 'User-Agent': USER_AGENT, }) return s def try_get(sess, url): try: response = sess.get(url, timeout=(CONNECT_TIMEOUT, READ_TIMEOUT)) response.raise_for_status() return response.elapsed, response.text except: logger.exception("try_get") return None def try_get_multiproto(domain, path): sess = rsess() url = 'https://%s%s' % (domain, path) r = try_get(sess, url) if r: return 'https', url, r[0], r[1] url = 'http://%s%s' % (domain, path) r = try_get(sess, url) if r: return 'http', url, r[0], r[1] raise InstanceDown("No protocol available for %s/%s" % (domain, path)) def get_about_more_page(domain): return try_get_multiproto(domain, '/about/more') def get_about_page(domain): return try_get_multiproto(domain, '/about') def parse_n(s): s = s.replace(',', '') s = s.strip() return int(s) def parse_title(s): s = s.replace('\n', '') s = s.rsplit('-', 1)[1] s = s.strip() return s def fetch_instance_data(i): protocol, about_url, rtime1, about_page = get_about_page(i.domain) protocol, about_more_url, rtime2, about_more_page = get_about_more_page(i.domain) # Parse /about if 'Get started' in about_page or 'user_password_confirmation' in about_page: i.open = True elif 'registrations on this instance are closed' in about_page or 'closed-registrations-message' in about_page: i.open = False else: i.open = None # Parse /about/more try: tree = pq(about_more_page) title_q = tree('head title') if title_q: i.name = parse_title(title_q[0].text) ib = tree('.information-board strong') if ib and len(ib) == 3: i.users = parse_n(ib[0].text) i.statuses = parse_n(ib[1].text) i.connections = parse_n(ib[2].text) except: traceback.print_exc() pass i.https_available = (protocol == 'https') if not i.rules_url: i.rules_url = about_more_url # /about/more should be slightly more accurate in terms of server load return (rtime1.total_seconds() + rtime2.total_seconds() * 3) / 4 def check_ip_version(i): addrs = socket.getaddrinfo(i.domain, 443) ipv4_addrs = set(addr[4][0] for addr in addrs if addr[0] == socket.AF_INET) ipv6_addrs = set(addr[4][0] for addr in addrs if addr[0] == socket.AF_INET6) proto = 'https' if i.https_available else 'http' print(addrs) print(ipv4_addrs) print(ipv6_addrs) common = dict( timeout=(CONNECT_TIMEOUT, READ_TIMEOUT), verify=False, headers={ 'User-Agent': USER_AGENT, 'Host': i.domain, }, ) rtimes = [] if ipv4_addrs: while ipv4_addrs: url = '%s://%s/' % (proto, ipv4_addrs.pop()) try: print(url) response = requests.head(url, **common) response.raise_for_status() rtimes.append(response.elapsed) i.ipv4 = True break except: traceback.print_exc() i.ipv4 = False else: i.ipv4 = False if ipv6_addrs: while ipv6_addrs: url = '%s://[%s]/' % (proto, ipv6_addrs.pop()) try: response = requests.head(url, **common) response.raise_for_status() rtimes.append(response.elapsed) i.ipv6 = True break except: i.ipv6 = False else: i.ipv6 = False if not rtimes: raise Exception("Couldn't ping any host") return sum(t.total_seconds() for t in rtimes) / len(rtimes) @celery.task(autoretry_for=(Exception,)) def add_instance(domain, sec, notes, rules_url): # Just make sure we don't get duplicates with any retry if Instance.query.filter_by(domain=domain).count() > 0: return i = Instance() i.domain = domain i.sec = sec i.notes = notes i.rules_url = rules_url # Again, give more weight to /about/more but count in IPv4 vs IPv6 time rtime = 0 rtime += fetch_instance_data(i) * 3 rtime += check_ip_version(i) rtime = rtime / 4 rtime = int(round(rtime * 1000)) i.last_state = True i.last_state_change = datetime.utcnow() db.session.add(i) db.session.flush() # Stats have been updated on the Instance, we just archive them as a Ping i.update_state(True, response_time=rtime, users=i.users, statuses=i.statuses, connections=i.connections) db.session.commit() if i.https_available: ssltest_by_id.delay(i.id) @celery.task() def ssltest_by_id(instance_id): i = Instance.query.filter_by(id=instance_id).first() if not i: logger.warning("Unknown instance #%d", instance_id) return ssltest(i) def ssltest(i): bin_path = app.config['SSLLABS_SCAN_BIN'] cmd = [bin_path, '-usecache', '-quiet', i.domain] r = subprocess.check_output(cmd).decode('utf-8') d = json.loads(r) grades = [ssllabs_grade_int(e['grade']) for e in d[0]['endpoints'] if 'grade' in e] logger.debug("Got grades: %r", grades) i.https_grade = min(grades) logger.debug("score: %d", i.https_grade) db.session.commit() @celery.task() def update_instance_by_id(instance_id): i = Instance.query.filter_by(id=instance_id).first() if not i: logger.warning("Unknown instance #%d", instance_id) return return update_instance(i) @celery.task() def update_all_instances(): instances = list(Instance.query.order_by(Instance.id).all()) logger.info("Updating %d instances...", len(instances)) states = [update_instance(i) for i in instances] b_up = len([b for (b, a) in states if b is True]) a_up = len([a for (b, a) in states if a is True]) b_down = len([b for (b, a) in states if b is False]) a_down = len([a for (b, a) in states if a is False]) logger.info("up: %d -> %d (%+d)", b_up, a_up, (a_up - b_up)) logger.info("down: %d -> %d (%+d)", b_down, a_down, (a_down - b_down)) def update_instance(i): prev_state = i.last_state state = None try: rtime = 0 rtime += fetch_instance_data(i) * 3 rtime += check_ip_version(i) rtime = rtime / 4 rtime = int(round(rtime * 1000)) state = True i.update_state(True, response_time=rtime, users=i.users, statuses=i.statuses, connections=i.connections) logger.debug("updated instance '%s' #%d: UP %.2fms", i.domain, i.id, rtime) db.session.commit() except: state = False db.session.refresh(i) i.update_state(False, 0) logger.debug("updated instance '%s' #%d: DOWN", i.domain, i.id) db.session.commit() return (prev_state, state)
import functools import matplotlib.widgets import vaex.ui.plugin from vaex.ui import undo from vaex.ui.qt import * from vaex.ui.icons import iconfile import logging import vaex.ui.undo as undo import vaex.ui.qt as dialogs import re logger = logging.getLogger("vaex.plugin.tasks") def loglog(plot_window): for layer in plot_window.layers: for i, expression in enumerate(layer.state.expressions): layer.set_expression("log10(%s)" % expression, i) plot_window.queue_history_change("task: log/log") def removelog(plot_window): def remove_log(expression): return re.sub("^\s*(log|log2|log10)\((.*?)\)\s*$", "\\2", expression) for layer in plot_window.layers: for i, expression in enumerate(layer.state.expressions): layer.set_expression(remove_log(expression), i) plot_window.queue_history_change("task: remove log/log") def loglog_and_sigma3(plot_window): removelog(plot_window) loglog(plot_window) plot_window.queue_history_change(None) sigma3(plot_window) plot_window.queue_history_change("task: log/log and 3 sigma region") def sigma3(plot_window): if plot_window.layers: layer = plot_window.layers[0] if layer.dataset.is_local(): executor = vaex.execution.Executor() else: executor = vaex.remote.ServerExecutor() subspace = layer.dataset.subspace(*layer.state.expressions, executor=executor, delay=True) means = subspace.mean() with dialogs.ProgressExecution(plot_window, "Calculating mean", executor=executor) as progress: progress.add_task(means) progress.execute() logger.debug("get means") means = means.get() logger.debug("got means") vars = subspace.var(means=means) with dialogs.ProgressExecution(plot_window, "Calculating variance", executor=executor) as progress: progress.add_task(vars) progress.execute() #limits = limits.get() vars = vars.get() stds = vars**0.5 sigmas = 3 limits = list(zip(means-sigmas*stds, means+sigmas*stds)) #plot_window.ranges_show = limits plot_window.set_ranges(range(len(limits)), limits, add_to_history=True, reason="3 sigma region") #plot_window.update_all_layers() #for layer in plot_window.layers: # layer.flag_needs_update() logger.debug("means=%r", means) logger.debug("vars=%r", vars) logger.debug("limits=%r", limits) plot_window.queue_history_change("task: 3 sigma region") #plot_window.queue_update() def subtract_mean(plot_window): if plot_window.layers: layer = plot_window.layers[0] executor = vaex.execution.Executor() subspace = layer.dataset.subspace(*layer.state.expressions, executor=executor, delay=True) means = subspace.mean() with dialogs.ProgressExecution(plot_window, "Calculating mean", executor=executor): executor.execute() means = means.get() new_expressions = ["(%s) - %s" % (expression, mean) for expression, mean in zip(layer.state.expressions, means)] for i in range(len(new_expressions)): vmin, vmax = layer.plot_window.state.ranges_viewport[i] vmin -= means[i] vmax -= means[i] layer.plot_window.set_range(vmin, vmax, i) for i in range(len(new_expressions)): layer.set_expression(new_expressions[i], i) plot_window.update_all_layers() plot_window.queue_history_change("task: remove mean") @vaex.ui.plugin.pluginclass class TasksPlugin(vaex.ui.plugin.PluginPlot): name = "tasks" def __init__(self, dialog): super(TasksPlugin, self).__init__(dialog) dialog.plug_toolbar(self.plug_toolbar, 1.6) def plug_toolbar(self): logger.info("adding %s plugin" % self.name) self.menu = QtGui.QMenu("&Tasks") self.dialog.menu_bar.addMenu(self.menu) self.tasks_button = QtGui.QToolButton() self.tasks_button.setIcon(QtGui.QIcon(iconfile('gear'))) self.tasks_button.setText("Tasks") self.tasks_button.setPopupMode(QtGui.QToolButton.InstantPopup) self.tasks_button.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.tasks_button.setMenu(self.menu) #self.action_tasks = QtGui.QAction(QtGui.QIcon(iconfile('gear')), 'Tasks', self.dialog) self.dialog.toolbar.addWidget(self.tasks_button) self.action_tasks_loglog = QtGui.QAction(QtGui.QIcon(iconfile('gear')), 'Log-Log', self.dialog) self.menu.addAction(self.action_tasks_loglog) self.action_tasks_loglog.triggered.connect(lambda *arg: loglog(self.dialog)) self.action_tasks_loglog = QtGui.QAction(QtGui.QIcon(iconfile('gear')), '3 sigma region', self.dialog) self.menu.addAction(self.action_tasks_loglog) self.action_tasks_loglog.triggered.connect(lambda *arg: sigma3(self.dialog)) self.action_tasks_loglog_3sigma = QtGui.QAction(QtGui.QIcon(iconfile('gear')), 'Log-Log and 3 sigma region', self.dialog) self.menu.addAction(self.action_tasks_loglog_3sigma) self.action_tasks_loglog_3sigma.triggered.connect(lambda *arg: loglog_and_sigma3(self.dialog)) self.action_tasks_removelog = QtGui.QAction(QtGui.QIcon(iconfile('gear')), 'Remove log', self.dialog) self.menu.addAction(self.action_tasks_removelog) self.action_tasks_removelog.triggered.connect(lambda *arg: removelog(self.dialog)) self.action_tasks_subtract_mean = QtGui.QAction(QtGui.QIcon(iconfile('gear')), 'Subtract mean', self.dialog) self.menu.addAction(self.action_tasks_subtract_mean) self.action_tasks_subtract_mean.triggered.connect(lambda *arg: subtract_mean(self.dialog)) def __(self): self.action_store = QtGui.QAction(QtGui.QIcon(iconfile('gear')), 'Store', self.dialog) self.action_store.setShortcut("Ctrl+B") self.action_store_toolbar = QtGui.QAction(QtGui.QIcon(iconfile('star')), 'Store', self.dialog) self.dialog.toolbar.addAction(self.action_store_toolbar) self.action_store.triggered.connect(self.on_store) self.action_store_toolbar.triggered.connect(self.on_store) self.changed_handle = storage_plots.changed.connect(self.load_options_menu) self.load_options_menu() self.dialog.menu_mode.addSeparator() self.action_zoom_rect = QtGui.QAction(QtGui.QIcon(iconfile('zoom')), '&Zoom to rect', self.dialog) self.action_zoom_rect.setShortcut("Ctrl+Alt+Z") self.dialog.menu_mode.addAction(self.action_zoom_rect) self.action_zoom_x = QtGui.QAction(QtGui.QIcon(iconfile('zoom_x')), '&Zoom x', self.dialog) self.action_zoom_y = QtGui.QAction(QtGui.QIcon(iconfile('zoom_y')), '&Zoom y', self.dialog) self.action_zoom = QtGui.QAction(QtGui.QIcon(iconfile('zoom')), '&Zoom(you should not read this)', self.dialog) self.action_zoom_x.setShortcut("Ctrl+Alt+X") self.action_zoom_y.setShortcut("Ctrl+Alt+Y") self.dialog.menu_mode.addAction(self.action_zoom_x) self.dialog.menu_mode.addAction(self.action_zoom_y) self.dialog.menu_mode.addSeparator() self.action_zoom_out = QtGui.QAction(QtGui.QIcon(iconfile('zoom_out')), '&Zoom out', self.dialog) self.action_zoom_in = QtGui.QAction(QtGui.QIcon(iconfile('zoom_in')), '&Zoom in', self.dialog) self.action_zoom_fit = QtGui.QAction(QtGui.QIcon(iconfile('arrow_out')), '&Reset view', self.dialog) #self.action_zoom_use = QtGui.QAction(QtGui.QIcon(iconfile('chart_bar')), '&Use zoom area', self.dialog) self.action_zoom_out.setShortcut("Ctrl+Alt+-") self.action_zoom_in.setShortcut("Ctrl+Alt++") self.action_zoom_fit.setShortcut("Ctrl+Alt+0") self.dialog.menu_mode.addAction(self.action_zoom_out) self.dialog.menu_mode.addAction(self.action_zoom_in) self.dialog.menu_mode.addAction(self.action_zoom_fit) self.dialog.action_group_main.addAction(self.action_zoom_rect) self.dialog.action_group_main.addAction(self.action_zoom_x) self.dialog.action_group_main.addAction(self.action_zoom_y) #self.dialog.toolbar.addAction(self.action_zoom_out) #self.dialog.add_shortcut(self.action_zoom_in,"+") #self.dialog.add_shortcut(self.action_zoom_out,"-") #self.dialog.add_shortcut(self.action_zoom_rect,"Z") #self.dialog.add_shortcut(self.action_zoom_x,"Alt+X") #self.dialog.add_shortcut(self.action_zoom_y,"Alt+Y") #self.dialog.add_shortcut(self.action_zoom_fit, "0") self.dialog.toolbar.addAction(self.action_zoom) self.zoom_menu = QtGui.QMenu() self.action_zoom.setMenu(self.zoom_menu) self.zoom_menu.addAction(self.action_zoom_rect) self.zoom_menu.addAction(self.action_zoom_x) self.zoom_menu.addAction(self.action_zoom_y) if self.dialog.dimensions == 1: self.lastActionZoom = self.action_zoom_x # this makes more sense for histograms as default else: self.lastActionZoom = self.action_zoom_rect self.dialog.toolbar.addSeparator() #self.dialog.toolbar.addAction(self.action_zoom_out) self.dialog.toolbar.addAction(self.action_zoom_fit) self.action_zoom.triggered.connect(self.onActionZoom) self.action_zoom_out.triggered.connect(self.onZoomOut) self.action_zoom_in.triggered.connect(self.onZoomIn) self.action_zoom_fit.triggered.connect(self.onZoomFit) #self.action_zoom_use.triggered.connect(self.onZoomUse) self.action_zoom.setCheckable(True) self.action_zoom_rect.setCheckable(True) self.action_zoom_x.setCheckable(True) self.action_zoom_y.setCheckable(True)
#! /Users/rkrsn/miniconda/bin/python from __future__ import print_function, division import os import sys # Update PYTHONPATH root = os.path.abspath(os.path.join(os.getcwd().split('src')[0], 'src')) if not root in sys.path: sys.path.append(root) import numpy as np import csv from random import seed as rseed # from cliffsDelta import cliffs from lib.dEvol import tuner from tools.sk import rdivDemo from numpy import sum from Prediction import rforest, Bugs from methods1 import * from Planner.CROSSTREES import xtrees from Planner.HOW import treatments as HOW from Planner.strawman import strawman def say(x): sys.stdout.write(str(x)) def write2file(data, fname='Untitled', ext='.txt'): with open('.temp/' + fname + ext, 'w') as fwrite: writer = csv.writer(fwrite, delimiter=',') if not isinstance(data[0], list): writer.writerow(data) else: for b in data: writer.writerow(b) def genTable(tbl, rows, name='tmp'): header = [h.name for h in tbl.headers[:-1]] with open(name + '.csv', 'w') as csvfile: writer = csv.writer(csvfile, delimiter=',') writer.writerow(header) for el in rows: if len(el[:-1]) < len(header): writer.writerow(el) else: writer.writerow(el[:-1]) return createTbl([name + '.csv']) class run(): def __init__( self, pred=rforest, _smoteit=True, _n=-1, _tuneit=False, dataName=None, reps=1): self.pred = pred self.dataName = dataName self.out, self.out_pred = [self.dataName], [] self._smoteit = _smoteit self.train, self.test = self.categorize() self.reps = reps self._n = _n self.tunedParams = None if not _tuneit \ else tuner(self.pred, self.train[_n]) self.headers = createTbl( self.train[ self._n], isBin=False, bugThres=1).headers def categorize(self): dir = os.path.join(root, 'Data/Jureczko') self.projects = [Name for _, Name, __ in walk(dir)][0] self.numData = len(self.projects) # Number of data one, two = explore(dir) data = [one[i] + two[i] for i in xrange(len(one))] def withinClass(data): N = len(data) return [(data[:n], [data[n]]) for n in range(1, N)] def whereis(): for indx, name in enumerate(self.projects): if name == self.dataName: return indx try: return [ dat[0] for dat in withinClass(data[whereis()])], [ dat[1] for dat in withinClass(data[whereis()])] # Train, Test except: set_trace() def logResults(self, *args): for a in args: print(a) def go(self): base = lambda X: sorted(X)[-1] - sorted(X)[0] newRows = lambda newTab: map(lambda Rows: Rows.cells[:-1], newTab._rows) after = lambda newTab: self.pred( train_DF, newTab, tunings=self.tunedParams, smoteit=True) frac = lambda aft: 1 - (sum([0 if a < 1 else 1 for a in aft]) \ / sum([0 if b < 1 else 1 for b in actual])) for planner in ['xtrees', 'CD']: out = [planner] for _ in xrange(self.reps): predRows = [] train_DF = createTbl(self.train[self._n], isBin=True) test_df = createTbl(self.test[self._n], isBin=True) actual = np.array(Bugs(test_df)) before = self.pred(train_DF, test_df, tunings=self.tunedParams, smoteit=True) predRows = [row.cells for row in createTbl( self.test[self._n], isBin=True)._rows if row.cells[-2] > 0] predTest = genTable(test_df, rows=predRows, name='Before_temp') "Apply Different Planners" if planner == 'xtrees': newTab = xtrees(train=self.train[-1], test_DF=predTest, bin=False, majority=True).main() genTable(test_df, rows=newRows(newTab), name='After_xtrees') # set_trace() elif planner == 'XTREE': newTab = xtrees(train=self.train[-1], test_DF=predTest, bin=False, majority=False).main() elif planner == 'BIC': newTab = HOW(train=self.train[-1], test=self.test[-1], test_df=predTest).main() elif planner == 'CD': newTab = strawman(train=self.train[-1], test=self.test[-1]).main() elif planner == 'CD+FS': newTab = strawman(train=self.train[-1], test=self.test[-1] , prune=True).main() out.append(frac(after(newTab))) # self.logResults(out) yield out # ---------- Debug ---------- # set_trace() def delta1(self, cDict, headers, norm): for el in cDict: D = len(headers[:-2]) * [0] for k in el.keys(): for i, n in enumerate(headers[:-1]): if n.name[1:] == k: D[i] += 100 yield D def delta0(self, headers, norm, Planner='xtrees'): before, after = open('.temp/before.txt'), open('.temp/' + Planner + '.txt') D = len(headers[:-1]) * [0] for line1, line2 in zip(before, after): row1 = np.array([float(l) for l in line1.strip().split(',')[:-2]]) row2 = np.array([float(l) for l in line2.strip().split(',')[:-1]]) changed = (row2 - row1).tolist() for i, c in enumerate(changed): if c > 0: D[i] += 100 return D def deltas(self, planner): delta = [] train_DF = createTbl(self.train[self._n], isBin=True, bugThres=1) test_df = createTbl(self.test[self._n], isBin=True, bugThres=1) actual = np.array(Bugs(test_df)) before = self.pred(train_DF, test_df, tunings=self.tunedParams, smoteit=True) allRows = np.array( map( lambda Rows: np.array( Rows.cells[ :- 1]), train_DF._rows + test_df._rows)) def min_max(): N = len(allRows[0]) base = lambda X: sorted(X)[-1] - sorted(X)[0] return [base([r[i] for r in allRows]) for i in xrange(N)] predRows = [row.cells for row in createTbl( self.test[self._n], isBin=True)._rows if row.cells[-2] > 0] write2file(predRows, fname='before') # save file """ Apply Learner """ for _ in xrange(1): predTest = genTable(test_df, rows=predRows) newRows = lambda newTab: map(lambda Rows: Rows.cells[:-1], newTab._rows) "Apply Different Planners" if planner == 'xtrees': xTrees = xtrees(train=self.train[-1], test_DF=predTest, bin=False, majority=True).main(justDeltas=True) delta.append( [d for d in self.delta1(xTrees, train_DF.headers, norm=len(predRows))]) return (np.sum( delta[0], axis=0) / np.array((len(predRows[0]) - 2) * [len(predRows)])).tolist() elif planner == 'XTREE' or planner == 'XTREE': C4_5 = xtrees(train=self.train[-1], test_DF=predTest, bin=False, majority=False).main(justDeltas=True) delta.append( [d for d in self.delta1(C4_5, train_DF.headers, norm=len(predRows))]) return (np.sum( delta[0], axis=0) / np.array((len(predRows[0]) - 2) * [len(predRows)])).tolist() elif planner == 'BIC': how = HOW(train=self.train[-1], test=self.test[-1], test_df=predTest).main(justDeltas=True) delta.append( [d for d in self.delta1(how, train_DF.headers, norm=len(predRows))]) return (np.sum( delta[0], axis=0) / np.array((len(predRows[0]) - 2) * [len(predRows)])).tolist() elif planner == 'CD': baseln = strawman( train=self.train[-1], test=self.test[-1]).main(justDeltas=True) delta.append( [d for d in self.delta1(baseln, train_DF.headers, norm=len(predRows))]) return (np.sum( delta[0], axis=0) / np.array((len(predRows[0]) - 2) * [len(predRows)])).tolist() elif planner == 'CD+FS': baselnFss = strawman( train=self.train[-1], test=self.test[-1], prune=True).main(justDeltas=True) delta.append( [d for d in self.delta1(baselnFss, train_DF.headers, norm=len(predRows))]) return (np.sum( delta[0], axis=0) / np.array((len(predRows[0]) - 2) * [len(predRows)])).tolist() # -------- DEBUG! -------- # set_trace() def deltaCSVwriter0(): Planners = ['XTREE', 'BIC', 'CD', 'CD+FS'] print(',%s,%s,%s,%s' % tuple(Planners)) for name in ['ant', 'ivy', 'jedit', 'lucene', 'poi']: say(name) delta = [] R = run(dataName=name, reps=1) # Setup Files. for p in Planners: delta.append(R.deltas(planner=p)) D = np.mean(delta, axis=1).tolist() for n in D: say(',%0.2f' % (n)) print('') # set_trace() def deltaCSVwriter(type='Indv'): if type == 'Indv': for name in ['lucene']: print('##', name) delta = [] Planners = ['XTREE', 'BIC', 'CD', 'CD+FS'] R = run(dataName=name, reps=1) # Setup Files. for p in Planners: delta.append(R.deltas(planner=p)) def getRow(i): for d in delta: yield d[i] # set_trace() with open('/Users/rkrsn/git/GNU-Plots/rkrsn/errorbar/%s.csv' % (name), 'w') as csvfile: writer = csv.writer(csvfile, delimiter=' ') writer.writerow(["Features"] + Planners) for i, h in enumerate(run(dataName=name).headers[:-2]): writer.writerow([h.name[1:]] + [el for el in getRow(i)]) # set_trace() elif type == 'All': delta = [] for name in ['ivy', 'jedit', 'lucene', 'poi', 'ant']: print('##', name) delta.extend(run(dataName=name, reps=4).deltas()) y = np.median(delta, axis=0) yhi, ylo = np.percentile(delta, q=[75, 25], axis=0) dat1 = sorted([(h.name[1:], a, b, c) for h, a, b, c in zip( run(dataName=name).headers[:-2], y, ylo, yhi)], key=lambda F: F[1]) dat = np.asarray([(d[0], n, d[1], d[2], d[3]) for d, n in zip(dat1, range(1, 21))]) with open('/Users/rkrsn/git/GNU-Plots/rkrsn/errorbar/all.csv', 'w') as csvfile: writer = csv.writer(csvfile, delimiter=' ') for el in dat[()]: writer.writerow(el) def rdiv(): lst = [] def striplines(line): listedline = line.strip().split(',') # split around the = sign listedline[0] = listedline[0][2:-1] lists = [listedline[0]] for ll in listedline[1:-1]: lists.append(float(ll)) return lists f = open('./jedit.dat') for line in f: lst.append(striplines(line[:-1])) rdivDemo(lst, isLatex=False) set_trace() def deltaTest(): for file in ['ivy', 'poi', 'jedit', 'ant', 'lucene']: print('##', file) R = run(dataName=file, reps=12).deltas() def _test(file='ant'): rseed(1) for file in ['ivy', 'lucene', 'jedit', 'poi', 'ant']: print('## %s\n' % (file)) R = [r for r in run(dataName=file, reps=10, _tuneit=False).go()] rdivDemo(R, isLatex=False) if __name__ == '__main__': _test() # deltaTest() # rdiv() # deltaCSVwriter(type='All') # deltaCSVwriter(type='Indv') # deltaCSVwriter0() # eval(cmd())
# Copyright 2013 Brocade Communications System, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # (Some parts adapted from LinuxBridge Plugin) # TODO(shiv) need support for security groups """Implentation of Brocade Neutron Plugin.""" from oslo.config import cfg from oslo import messaging from oslo.utils import importutils from oslo_context import context as oslo_context from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import l3_rpc from neutron.api.rpc.handlers import metadata_rpc from neutron.api.rpc.handlers import securitygroups_rpc from neutron.common import constants as q_const from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import api as db from neutron.db import db_base_plugin_v2 from neutron.db import external_net_db from neutron.db import extraroute_db from neutron.db import l3_agentschedulers_db from neutron.db import portbindings_base from neutron.db import securitygroups_rpc_base as sg_db_rpc from neutron.extensions import portbindings from neutron.extensions import securitygroup as ext_sg from neutron.i18n import _LE, _LI from neutron.openstack.common import log as logging from neutron.plugins.brocade.db import models as brocade_db from neutron.plugins.brocade import vlanbm as vbm from neutron.plugins.common import constants as svc_constants LOG = logging.getLogger(__name__) PLUGIN_VERSION = 0.88 AGENT_OWNER_PREFIX = "network:" NOS_DRIVER = 'neutron.plugins.brocade.nos.nosdriver.NOSdriver' SWITCH_OPTS = [cfg.StrOpt('address', default='', help=_('The address of the host to SSH to')), cfg.StrOpt('username', default='', help=_('The SSH username to use')), cfg.StrOpt('password', default='', secret=True, help=_('The SSH password to use')), cfg.StrOpt('ostype', default='NOS', help=_('Currently unused')) ] PHYSICAL_INTERFACE_OPTS = [cfg.StrOpt('physical_interface', default='eth0', help=_('The network interface to use when creating' 'a port')) ] cfg.CONF.register_opts(SWITCH_OPTS, "SWITCH") cfg.CONF.register_opts(PHYSICAL_INTERFACE_OPTS, "PHYSICAL_INTERFACE") class BridgeRpcCallbacks(object): """Agent callback.""" target = messaging.Target(version='1.2') # Device names start with "tap" # history # 1.1 Support Security Group RPC # 1.2 Support get_devices_details_list def get_device_details(self, rpc_context, **kwargs): """Agent requests device details.""" agent_id = kwargs.get('agent_id') device = kwargs.get('device') LOG.debug("Device %(device)s details requested from %(agent_id)s", {'device': device, 'agent_id': agent_id}) port = brocade_db.get_port(rpc_context, device[len(q_const.TAP_DEVICE_PREFIX):]) if port: entry = {'device': device, 'vlan_id': port.vlan_id, 'network_id': port.network_id, 'port_id': port.port_id, 'physical_network': port.physical_interface, 'admin_state_up': port.admin_state_up } else: entry = {'device': device} LOG.debug("%s can not be found in database", device) return entry def get_devices_details_list(self, rpc_context, **kwargs): return [ self.get_device_details( rpc_context, device=device, **kwargs ) for device in kwargs.pop('devices', []) ] def update_device_down(self, rpc_context, **kwargs): """Device no longer exists on agent.""" device = kwargs.get('device') port = self.get_port_from_device(device) if port: entry = {'device': device, 'exists': True} # Set port status to DOWN port_id = port['port_id'] brocade_db.update_port_state(rpc_context, port_id, False) else: entry = {'device': device, 'exists': False} LOG.debug("%s can not be found in database", device) return entry class SecurityGroupServerRpcMixin(sg_db_rpc.SecurityGroupServerRpcMixin): @classmethod def get_port_from_device(cls, device): """Get port from the brocade specific db.""" # TODO(shh) context is not being passed as # an argument to this function; # # need to be fixed in: # file: neutron/db/securtygroups_rpc_base.py # function: securitygroup_rules_for_devices() # which needs to pass context to us # Doing what other plugins are doing session = db.get_session() port = brocade_db.get_port_from_device( session, device[len(q_const.TAP_DEVICE_PREFIX):]) # TODO(shiv): need to extend the db model to include device owners # make it appears that the device owner is of type network if port: port['device'] = device port['device_owner'] = AGENT_OWNER_PREFIX port['binding:vif_type'] = 'bridge' return port class AgentNotifierApi(sg_rpc.SecurityGroupAgentRpcApiMixin): """Agent side of the linux bridge rpc API. API version history: 1.0 - Initial version. 1.1 - Added get_active_networks_info, create_dhcp_port, and update_dhcp_port methods. """ def __init__(self, topic): self.topic = topic target = messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) self.topic_network_delete = topics.get_topic_name(topic, topics.NETWORK, topics.DELETE) self.topic_port_update = topics.get_topic_name(topic, topics.PORT, topics.UPDATE) def network_delete(self, context, network_id): cctxt = self.client.prepare(topic=self.topic_network_delete, fanout=True) cctxt.cast(context, 'network_delete', network_id=network_id) def port_update(self, context, port, physical_network, vlan_id): cctxt = self.client.prepare(topic=self.topic_port_update, fanout=True) cctxt.cast(context, 'port_update', port=port, physical_network=physical_network, vlan_id=vlan_id) class BrocadePluginV2(db_base_plugin_v2.NeutronDbPluginV2, external_net_db.External_net_db_mixin, extraroute_db.ExtraRoute_db_mixin, SecurityGroupServerRpcMixin, l3_agentschedulers_db.L3AgentSchedulerDbMixin, agentschedulers_db.DhcpAgentSchedulerDbMixin, portbindings_base.PortBindingBaseMixin): """BrocadePluginV2 is a Neutron plugin. Provides L2 Virtual Network functionality using VDX. Upper layer driver class that interfaces to NETCONF layer below. """ def __init__(self): """Initialize Brocade Plugin. Specify switch address and db configuration. """ super(BrocadePluginV2, self).__init__() self.supported_extension_aliases = ["binding", "security-group", "external-net", "router", "extraroute", "agent", "l3_agent_scheduler", "dhcp_agent_scheduler"] self.physical_interface = (cfg.CONF.PHYSICAL_INTERFACE. physical_interface) self.base_binding_dict = self._get_base_binding_dict() portbindings_base.register_port_dict_function() self.ctxt = oslo_context.get_admin_context() self.ctxt.session = db.get_session() self._vlan_bitmap = vbm.VlanBitmap(self.ctxt) self._setup_rpc() self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) self.router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver ) self.brocade_init() def brocade_init(self): """Brocade specific initialization.""" self._switch = {'address': cfg.CONF.SWITCH.address, 'username': cfg.CONF.SWITCH.username, 'password': cfg.CONF.SWITCH.password } self._driver = importutils.import_object(NOS_DRIVER) def _setup_rpc(self): # RPC support self.service_topics = {svc_constants.CORE: topics.PLUGIN, svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} self.rpc_context = oslo_context.RequestContext('neutron', 'neutron', is_admin=False) self.conn = n_rpc.create_connection(new=True) self.endpoints = [BridgeRpcCallbacks(), securitygroups_rpc.SecurityGroupServerRpcCallback(), dhcp_rpc.DhcpRpcCallback(), l3_rpc.L3RpcCallback(), agents_db.AgentExtRpcCallback(), metadata_rpc.MetadataRpcCallback()] for svc_topic in self.service_topics.values(): self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) # Consume from all consumers in threads self.conn.consume_in_threads() self.notifier = AgentNotifierApi(topics.AGENT) self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( l3_rpc_agent_api.L3AgentNotifyAPI() ) def create_network(self, context, network): """Create network. This call to create network translates to creation of port-profile on the physical switch. """ with context.session.begin(subtransactions=True): net = super(BrocadePluginV2, self).create_network(context, network) net_uuid = net['id'] vlan_id = self._vlan_bitmap.get_next_vlan(None) switch = self._switch try: self._driver.create_network(switch['address'], switch['username'], switch['password'], vlan_id) except Exception: # Proper formatting LOG.exception(_LE("Brocade NOS driver error")) LOG.debug("Returning the allocated vlan (%d) to the pool", vlan_id) self._vlan_bitmap.release_vlan(int(vlan_id)) raise Exception(_("Brocade plugin raised exception, " "check logs")) brocade_db.create_network(context, net_uuid, vlan_id) self._process_l3_create(context, net, network['network']) LOG.info(_LI("Allocated vlan (%d) from the pool"), vlan_id) return net def delete_network(self, context, net_id): """Delete network. This call to delete the network translates to removing the port-profile on the physical switch. """ with context.session.begin(subtransactions=True): self._process_l3_delete(context, net_id) result = super(BrocadePluginV2, self).delete_network(context, net_id) # we must delete all ports in db first (foreign key constraint) # there is no need to delete port in the driver (its a no-op) # (actually: note there is no such call to the driver) bports = brocade_db.get_ports(context, net_id) for bport in bports: brocade_db.delete_port(context, bport['port_id']) # find the vlan for this network net = brocade_db.get_network(context, net_id) vlan_id = net['vlan'] # Tell hw to do remove PP switch = self._switch try: self._driver.delete_network(switch['address'], switch['username'], switch['password'], vlan_id) except Exception: # Proper formatting LOG.exception(_LE("Brocade NOS driver error")) raise Exception(_("Brocade plugin raised exception, " "check logs")) # now ok to delete the network brocade_db.delete_network(context, net_id) # relinquish vlan in bitmap self._vlan_bitmap.release_vlan(int(vlan_id)) return result def update_network(self, context, id, network): session = context.session with session.begin(subtransactions=True): net = super(BrocadePluginV2, self).update_network(context, id, network) self._process_l3_update(context, net, network['network']) return net def create_port(self, context, port): """Create logical port on the switch.""" tenant_id = port['port']['tenant_id'] network_id = port['port']['network_id'] admin_state_up = port['port']['admin_state_up'] physical_interface = self.physical_interface with context.session.begin(subtransactions=True): bnet = brocade_db.get_network(context, network_id) vlan_id = bnet['vlan'] neutron_port = super(BrocadePluginV2, self).create_port(context, port) self._process_portbindings_create_and_update(context, port['port'], neutron_port) interface_mac = neutron_port['mac_address'] port_id = neutron_port['id'] switch = self._switch # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx mac = self.mac_reformat_62to34(interface_mac) try: self._driver.associate_mac_to_network(switch['address'], switch['username'], switch['password'], vlan_id, mac) except Exception: # Proper formatting LOG.exception(_LE("Brocade NOS driver error")) raise Exception(_("Brocade plugin raised exception, " "check logs")) # save to brocade persistent db brocade_db.create_port(context, port_id, network_id, physical_interface, vlan_id, tenant_id, admin_state_up) # apply any extensions return neutron_port def delete_port(self, context, port_id): with context.session.begin(subtransactions=True): neutron_port = self.get_port(context, port_id) interface_mac = neutron_port['mac_address'] # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx mac = self.mac_reformat_62to34(interface_mac) brocade_port = brocade_db.get_port(context, port_id) vlan_id = brocade_port['vlan_id'] switch = self._switch try: self._driver.dissociate_mac_from_network(switch['address'], switch['username'], switch['password'], vlan_id, mac) except Exception: LOG.exception(_LE("Brocade NOS driver error")) raise Exception( _("Brocade plugin raised exception, check logs")) super(BrocadePluginV2, self).delete_port(context, port_id) brocade_db.delete_port(context, port_id) def update_port(self, context, port_id, port): original_port = self.get_port(context, port_id) session = context.session port_updated = False with session.begin(subtransactions=True): # delete the port binding and read it with the new rules if ext_sg.SECURITYGROUPS in port['port']: port['port'][ext_sg.SECURITYGROUPS] = ( self._get_security_groups_on_port(context, port)) self._delete_port_security_group_bindings(context, port_id) # process_port_create_security_group also needs port id port['port']['id'] = port_id self._process_port_create_security_group( context, port['port'], port['port'][ext_sg.SECURITYGROUPS]) port_updated = True port_data = port['port'] port = super(BrocadePluginV2, self).update_port( context, port_id, port) self._process_portbindings_create_and_update(context, port_data, port) if original_port['admin_state_up'] != port['admin_state_up']: port_updated = True if (original_port['fixed_ips'] != port['fixed_ips'] or not utils.compare_elements( original_port.get(ext_sg.SECURITYGROUPS), port.get(ext_sg.SECURITYGROUPS))): self.notifier.security_groups_member_updated( context, port.get(ext_sg.SECURITYGROUPS)) if port_updated: self._notify_port_updated(context, port) return port def _notify_port_updated(self, context, port): port_id = port['id'] bport = brocade_db.get_port(context, port_id) self.notifier.port_update(context, port, bport.physical_interface, bport.vlan_id) def _get_base_binding_dict(self): binding = { portbindings.VIF_TYPE: portbindings.VIF_TYPE_BRIDGE, portbindings.VIF_DETAILS: { # TODO(rkukura): Replace with new VIF security details portbindings.CAP_PORT_FILTER: 'security-group' in self.supported_extension_aliases}} return binding def get_plugin_version(self): """Get version number of the plugin.""" return PLUGIN_VERSION @staticmethod def mac_reformat_62to34(interface_mac): """Transform MAC address format. Transforms from 6 groups of 2 hexadecimal numbers delimited by ":" to 3 groups of 4 hexadecimals numbers delimited by ".". :param interface_mac: MAC address in the format xx:xx:xx:xx:xx:xx :type interface_mac: string :returns: MAC address in the format xxxx.xxxx.xxxx :rtype: string """ mac = interface_mac.replace(":", "") mac = mac[0:4] + "." + mac[4:8] + "." + mac[8:12] return mac
# Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Exceptions that can be thrown by calliope tools. The exceptions in this file, and those that extend them, can be thrown by the Run() function in calliope tools without worrying about stack traces littering the screen in CLI mode. In interpreter mode, they are not caught from within calliope. """ from functools import wraps import os import sys from googlecloudsdk.api_lib.util import exceptions as api_exceptions from googlecloudsdk.core import exceptions as core_exceptions from googlecloudsdk.core import log from googlecloudsdk.core.console import console_attr from googlecloudsdk.core.console import console_attr_os def NewErrorFromCurrentException(error, *args): """Creates a new error based on the current exception being handled. If no exception is being handled, a new error with the given args is created. If there is a current exception, the original exception is first logged (to file only). A new error is then created with the same args as the current one. Args: error: The new error to create. *args: The standard args taken by the constructor of Exception for the new exception that is created. If None, the args from the exception currently being handled will be used. Returns: The generated error exception. """ (_, current_exception, _) = sys.exc_info() # Log original exception details and traceback to the log file if we are # currently handling an exception. if current_exception: file_logger = log.file_only_logger file_logger.error('Handling the source of a tool exception, ' 'original details follow.') file_logger.exception(current_exception) if args: return error(*args) elif current_exception: return error(*current_exception.args) return error('An unknown error has occurred') # TODO(b/32328530): Remove ToolException when the last ref is gone class ToolException(core_exceptions.Error): """ToolException is for Run methods to throw for non-code-bug errors. Attributes: command_name: The dotted group and command name for the command that threw this exception. This value is set by calliope. """ @staticmethod def FromCurrent(*args): return NewErrorFromCurrentException(ToolException, *args) class ExitCodeNoError(core_exceptions.Error): """A special exception for exit codes without error messages. If this exception is raised, it's identical in behavior to returning from the command code, except the overall exit code will be different. """ class FailedSubCommand(core_exceptions.Error): """Exception capturing a subcommand which did sys.exit(code).""" def __init__(self, cmd, code): super(FailedSubCommand, self).__init__( 'Failed command: [{0}] with exit code [{1}]'.format( ' '.join(cmd), code), exit_code=code) def RaiseErrorInsteadOf(error, *error_types): """A decorator that re-raises as an error. If any of the error_types are raised in the decorated function, this decorator will re-raise as an error. Args: error: Exception, The new exception to raise. *error_types: [Exception], A list of exception types that this decorator will watch for. Returns: The decorated function. """ def Wrap(func): """Wrapper function for the decorator.""" @wraps(func) def TryFunc(*args, **kwargs): try: return func(*args, **kwargs) except error_types: (_, _, exc_traceback) = sys.exc_info() # The 3 element form takes (type, instance, traceback). If the first # element is an instance, it is used as the type and instance and the # second element must be None. This preserves the original traceback. # pylint:disable=nonstandard-exception, ToolException is an Exception. raise NewErrorFromCurrentException(error), None, exc_traceback return TryFunc return Wrap # TODO(b/32328530): Remove RaiseToolExceptionInsteadOf when the last ref is gone def RaiseToolExceptionInsteadOf(*error_types): """A decorator that re-raises as ToolException.""" return RaiseErrorInsteadOf(ToolException, *error_types) def _TruncateToLineWidth(string, align, width, fill=''): """Truncate string to line width, right aligning at align. Examples (assuming a screen width of 10): >>> _TruncateToLineWidth('foo', 0) 'foo' >>> # Align to the beginning. Should truncate the end. ... _TruncateToLineWidth('0123456789abcdef', 0) '0123456789' >>> _TruncateToLineWidth('0123456789abcdef', 0, fill='...') '0123456...' >>> # Align to the end. Should truncate the beginning. ... _TruncateToLineWidth('0123456789abcdef', 16) '6789abcdef' >>> _TruncateToLineWidth('0123456789abcdef', 16, fill='...') '...9abcdef' >>> # Align to the middle (note: the index is toward the end of the string, ... # because this function right-aligns to the given index). ... # Should truncate the begnining and end. ... _TruncateToLineWidth('0123456789abcdef', 12) '23456789ab' >>> _TruncateToLineWidth('0123456789abcdef', 12, fill='...') '...5678...' Args: string: string to truncate align: index to right-align to width: maximum length for the resulting string fill: if given, indicate truncation with this string. Must be shorter than terminal width / 2. Returns: str, the truncated string Raises: ValueError, if provided fill is too long for the terminal. """ if len(fill) >= width / 2: # Either the caller provided a fill that's way too long, or the user has a # terminal that's way too narrow. In either case, we aren't going to be able # to make this look nice, but we don't want to throw an error because that # will mask the original error. log.warn('Screen not wide enough to display correct error message.') return string if len(string) <= width: return string if align > width: string = fill + string[align-width+len(fill):] if len(string) <= width: return string string = string[:width-len(fill)] + fill return string _MARKER = '^ invalid character' # pylint: disable=g-doc-bad-indent def _FormatNonAsciiMarkerString(args): u"""Format a string that will mark the first non-ASCII character it contains. Example: >>> args = ['command.py', '--foo=\xce\x94'] >>> _FormatNonAsciiMarkerString(args) == ( ... 'command.py --foo=\u0394\n' ... ' ^ invalid character' ... ) True Args: args: The arg list for the command executed Returns: unicode, a properly formatted string with two lines, the second of which indicates the non-ASCII character in the first. Raises: ValueError: if the given string is all ASCII characters """ # nonascii will be True if at least one arg contained a non-ASCII character nonascii = False # pos is the position of the first non-ASCII character in ' '.join(args) pos = 0 for arg in args: try: # idx is the index of the first non-ASCII character in arg for idx, char in enumerate(arg): char.decode('ascii') except UnicodeError: # idx will remain set, indicating the first non-ASCII character pos += idx nonascii = True break # this arg was all ASCII; add 1 for the ' ' between args pos += len(arg) + 1 if not nonascii: raise ValueError('The command line is composed entirely of ASCII ' 'characters.') # Make a string that, when printed in parallel, will point to the non-ASCII # character marker_string = ' ' * pos + _MARKER # Make sure that this will still print out nicely on an odd-sized screen align = len(marker_string) args_string = u' '.join([console_attr.EncodeForOutput(arg) for arg in args]) width, _ = console_attr_os.GetTermSize() fill = '...' if width < len(_MARKER) + len(fill): # It's hopeless to try to wrap this and make it look nice. Preserve it in # full for logs and so on. return '\n'.join((args_string, marker_string)) # If len(args_string) < width < len(marker_string) (ex:) # # args_string = 'command BAD' # marker_string = ' ^ invalid character' # width = len('----------------') # # then the truncation can give a result like the following: # # args_string = 'command BAD' # marker_string = ' ^ invalid character' # # (This occurs when args_string is short enough to not be truncated, but # marker_string is long enough to be truncated.) # # ljust args_string to make it as long as marker_string before passing to # _TruncateToLineWidth, which will yield compatible truncations. rstrip at the # end to get rid of the new trailing spaces. formatted_args_string = _TruncateToLineWidth(args_string.ljust(align), align, width, fill=fill).rstrip() formatted_marker_string = _TruncateToLineWidth(marker_string, align, width) return u'\n'.join((formatted_args_string, formatted_marker_string)) class InvalidCharacterInArgException(ToolException): """InvalidCharacterInArgException is for non-ASCII CLI arguments.""" def __init__(self, args, invalid_arg): self.invalid_arg = invalid_arg cmd = os.path.basename(args[0]) if cmd.endswith('.py'): cmd = cmd[:-3] args = [cmd] + args[1:] super(InvalidCharacterInArgException, self).__init__( u'Failed to read command line argument [{0}] because it does ' u'not appear to be valid 7-bit ASCII.\n\n' u'{1}'.format( console_attr.EncodeForOutput(self.invalid_arg), _FormatNonAsciiMarkerString(args))) # TODO(user): Eventually use api_exceptions.HttpException exclusively. class HttpException(api_exceptions.HttpException): """HttpException is raised whenever the Http response status code != 200. See api_lib.util.exceptions.HttpException for full documentation. """ def __init__(self, error, error_format='{message}'): super(HttpException, self).__init__(error, error_format) class InvalidArgumentException(ToolException): """InvalidArgumentException is for malformed arguments.""" def __init__(self, parameter_name, message): super(InvalidArgumentException, self).__init__( u'Invalid value for [{0}]: {1}'.format(parameter_name, message)) self.parameter_name = parameter_name class ConflictingArgumentsException(ToolException): """ConflictingArgumentsException arguments that are mutually exclusive.""" def __init__(self, *parameter_names): super(ConflictingArgumentsException, self).__init__( u'arguments not allowed simultaneously: ' + ', '.join(parameter_names)) self.parameter_names = parameter_names class UnknownArgumentException(ToolException): """UnknownArgumentException is for arguments with unexpected values.""" def __init__(self, parameter_name, message): super(UnknownArgumentException, self).__init__( u'Unknown value for [{0}]: {1}'.format(parameter_name, message)) self.parameter_name = parameter_name class RequiredArgumentException(ToolException): """An exception for when a usually optional argument is required in this case. """ def __init__(self, parameter_name, message): super(RequiredArgumentException, self).__init__( 'Missing required argument [{0}]: {1}'.format(parameter_name, message)) self.parameter_name = parameter_name class MinimumArgumentException(ToolException): """An exception for when one of several arguments is required.""" def __init__(self, parameter_names, message): super(MinimumArgumentException, self).__init__( 'One of [{0}] must be supplied: {1}'.format( ', '.join(['{0}'.format(p) for p in parameter_names]), message) ) class BadFileException(ToolException): """BadFileException is for problems reading or writing a file.""" # pylint: disable=g-import-not-at-top, Delay the import of this because # importing store is relatively expensive. def _GetTokenRefreshError(exc): from googlecloudsdk.core.credentials import store return store.TokenRefreshError(exc) # In general, lower level libraries should be catching exceptions and re-raising # exceptions that extend core.Error so nice error messages come out. There are # some error classes that want to be handled as recoverable errors, but cannot # import the core_exceptions module (and therefore the Error class) for various # reasons (e.g. circular dependencies). To work around this, we keep a list of # known "friendly" error types, which we handle in the same way as core.Error. # Additionally, we provide an alternate exception class to convert the errors # to which may add additional information. We use strings here so that we don't # have to import all these libraries all the time, just to be able to handle the # errors when they come up. Only add errors here if there is no other way to # handle them. _KNOWN_ERRORS = { 'apitools.base.py.exceptions.HttpError': HttpException, 'googlecloudsdk.core.util.files.Error': lambda x: None, 'httplib.ResponseNotReady': core_exceptions.NetworkIssueError, 'oauth2client.client.AccessTokenRefreshError': _GetTokenRefreshError, 'ssl.SSLError': core_exceptions.NetworkIssueError, } def _GetExceptionName(exc): """Returns the exception name used as index into _KNOWN_ERRORS.""" if isinstance(exc, type): name = exc.__module__ + '.' + exc.__name__ else: name = exc.__class__.__module__ + '.' + exc.__class__.__name__ return name def ConvertKnownError(exc): """Convert the given exception into an alternate type if it is known. Args: exc: Exception, the exception to convert. Returns: None if this is not a known type, otherwise a new exception that should be logged. """ convert_to_known_err = _KNOWN_ERRORS.get(_GetExceptionName(exc)) if not convert_to_known_err: # This is not a known error type return None # If there is no known exception just return the original exception. return convert_to_known_err(exc) or exc
# -*- coding: utf-8 -*- from __future__ import print_function import threading # Python3 queue support. try: import Queue except ImportError: import queue as Queue import time import re from telebot import apihelper, types """ Module : telebot """ API_URL = r"https://api.telegram.org/" class ThreadPool: class WorkerThread(threading.Thread): count = 0 def __init__(self, queue): threading.Thread.__init__(self, name="WorkerThread{0}".format(self.__class__.count + 1)) self.__class__.count += 1 self.queue = queue self.daemon = True self._running = True self.start() def run(self): while self._running: try: task, args, kwargs = self.queue.get() task(*args, **kwargs) except Queue.Empty: time.sleep(0) pass def stop(self): self._running = False def __init__(self, num_threads=4): self.tasks = Queue.Queue() self.workers = [self.WorkerThread(self.tasks) for _ in range(num_threads)] self.num_threads = num_threads def put(self, func, *args, **kwargs): self.tasks.put((func, args, kwargs)) def close(self): for worker in self.workers: worker.stop() for worker in self.workers: worker.join() class TeleBot: """ This is TeleBot Class Methods: getMe sendMessage forwardMessage sendPhoto sendAudio sendDocument sendSticker sendVideo sendLocation sendChatAction getUserProfilePhotos getUpdates """ def __init__(self, token, create_threads=True, num_threads=4): """ :param token: bot API token :param create_threads: Create thread for message handler :param num_threads: Number of worker in thread pool. :return: """ self.token = token self.update_listener = [] self.polling_thread = None self.__stop_polling = False self.last_update_id = 0 self.num_threads = num_threads self.__create_threads = create_threads self.message_handlers = [] if self.__create_threads: self.worker_pool = ThreadPool(num_threads) def get_update(self): """ Retrieves any updates from the Telegram API. Registered listeners and applicable message handlers will be notified when a new message arrives. :raises ApiException when a call has failed. """ updates = apihelper.get_updates(self.token, offset=(self.last_update_id + 1), timeout=20) new_messages = [] for update in updates: if update['update_id'] > self.last_update_id: self.last_update_id = update['update_id'] msg = types.Message.de_json(update['message']) new_messages.append(msg) if len(new_messages) > 0: self.process_new_messages(new_messages) def process_new_messages(self, new_messages): self.__notify_update(new_messages) self._notify_command_handlers(new_messages) def __notify_update(self, new_messages): for listener in self.update_listener: if self.__create_threads: self.worker_pool.put(listener, new_messages) else: listener(new_messages) def polling(self, none_stop=False): """ This function creates a new Thread that calls an internal __polling function. This allows the bot to retrieve Updates automagically and notify listeners and message handlers accordingly. Do not call this function more than once! Always get updates. :param none_stop: Do not stop polling when Exception occur. :return: """ self.__stop_polling = False self.polling_thread = threading.Thread(target=self.__polling, args=([none_stop])) self.polling_thread.daemon = True self.polling_thread.start() def __polling(self, none_stop): print('TeleBot: Started polling.') while not self.__stop_polling: try: self.get_update() except Exception as e: if not none_stop: self.__stop_polling = True print("TeleBot: Exception occurred. Stopping.") print(e) print('TeleBot: Stopped polling.') def stop_polling(self): self.__stop_polling = True def set_update_listener(self, listener): self.update_listener.append(listener) def get_me(self): result = apihelper.get_me(self.token) return types.User.de_json(result) def get_user_profile_photos(self, user_id, offset=None, limit=None): """ Retrieves the user profile photos of the person with 'user_id' See https://core.telegram.org/bots/api#getuserprofilephotos :param user_id: :param offset: :param limit: :return: API reply. """ result = apihelper.get_user_profile_photos(self.token, user_id, offset, limit) return types.UserProfilePhotos.de_json(result) def send_message(self, chat_id, text, disable_web_page_preview=None, reply_to_message_id=None, reply_markup=None): """ Use this method to send text messages. :param chat_id: :param text: :param disable_web_page_preview: :param reply_to_message_id: :param reply_markup: :return: API reply. """ return types.Message.de_json( apihelper.send_message(self.token, chat_id, text, disable_web_page_preview, reply_to_message_id, reply_markup)) def forward_message(self, chat_id, from_chat_id, message_id): """ Use this method to forward messages of any kind. :param chat_id: which chat to forward :param from_chat_id: which chat message from :param message_id: message id :return: API reply. """ return types.Message.de_json(apihelper.forward_message(self.token, chat_id, from_chat_id, message_id)) def send_photo(self, chat_id, photo, caption=None, reply_to_message_id=None, reply_markup=None): """ Use this method to send photos. :param chat_id: :param photo: :param caption: :param reply_to_message_id: :param reply_markup: :return: API reply. """ return types.Message.de_json( apihelper.send_photo(self.token, chat_id, photo, caption, reply_to_message_id, reply_markup)) def send_audio(self, chat_id, data, reply_to_message_id=None, reply_markup=None): """ Use this method to send audio files, if you want Telegram clients to display the file as a playable voice message. For this to work, your audio must be in an .ogg file encoded with OPUS :param chat_id: :param data: :param reply_to_message_id: :param reply_markup: :return: API reply. """ return types.Message.de_json( apihelper.send_data(self.token, chat_id, data, 'audio', reply_to_message_id, reply_markup)) def send_document(self, chat_id, data, reply_to_message_id=None, reply_markup=None): """ Use this method to send general files. :param chat_id: :param data: :param reply_to_message_id: :param reply_markup: :return: API reply. """ return types.Message.de_json( apihelper.send_data(self.token, chat_id, data, 'document', reply_to_message_id, reply_markup)) def send_sticker(self, chat_id, data, reply_to_message_id=None, reply_markup=None): """ Use this method to send .webp stickers. :param chat_id: :param data: :param reply_to_message_id: :param reply_markup: :return: API reply. """ return types.Message.de_json( apihelper.send_data(self.token, chat_id, data, 'sticker', reply_to_message_id, reply_markup)) def send_video(self, chat_id, data, reply_to_message_id=None, reply_markup=None): """ Use this method to send video files, Telegram clients support mp4 videos. :param chat_id: :param data: :param reply_to_message_id: :param reply_markup: :return: API reply. """ return types.Message.de_json( apihelper.send_data(self.token, chat_id, data, 'video', reply_to_message_id, reply_markup)) def send_location(self, chat_id, latitude, longitude, reply_to_message_id=None, reply_markup=None): """ Use this method to send point on the map. :param chat_id: :param latitude: :param longitude: :param reply_to_message_id: :param reply_markup: :return: API reply. """ return types.Message.de_json( apihelper.send_location(self.token, chat_id, latitude, longitude, reply_to_message_id, reply_markup)) def send_chat_action(self, chat_id, action): """ Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status). :param chat_id: :param action: One of the following strings: 'typing', 'upload_photo', 'record_video', 'upload_video', 'record_audio', 'upload_audio', 'upload_document', 'find_location'. :return: API reply. :type: boolean """ return apihelper.send_chat_action(self.token, chat_id, action) def reply_to(self, message, text, **kwargs): return self.send_message(message.chat.id, text, reply_to_message_id=message.message_id, **kwargs) def message_handler(self, commands=None, regexp=None, func=None, content_types=['text']): """ Message handler decorator. This decorator can be used to decorate functions that must handle certain types of messages. All message handlers are tested in the order they were added. Example: bot = TeleBot('TOKEN') # Handles all messages which text matches regexp. @bot.message_handler(regexp='someregexp') def command_help(message): bot.send_message(message.chat.id, 'Did someone call for help?') # Handle all sent documents of type 'text/plain'. @bot.message_handler(func=lambda message: message.document.mime_type == 'text/plain', content_types=['document']) def command_handle_document(message): bot.send_message(message.chat.id, 'Document received, sir!') # Handle all other commands. @bot.message_handler(func=lambda message: True, content_types=['audio', 'video', 'document', 'text', 'location', 'contact', 'sticker']) def default_command(message): bot.send_message(message.chat.id, "This is the default command handler.") :param regexp: Optional regular expression. :param func: Optional lambda function. The lambda receives the message to test as the first parameter. It must return True if the command should handle the message. :param content_types: This commands' supported content types. Must be a list. Defaults to ['text']. """ def decorator(fn): func_dict = {'function': fn, 'content_types': content_types} if regexp: func_dict['regexp'] = regexp if 'text' in content_types else None if func: func_dict['lambda'] = func if commands: func_dict['commands'] = commands if 'text' in content_types else None self.message_handlers.append(func_dict) return fn return decorator @staticmethod def is_command(text): """ Checks if `text` is a command. Telegram chat commands start with the '/' character. :param text: Text to check. :return: True if `text` is a command, else False. """ return text.startswith('/') @staticmethod def extract_command(text): """ Extracts the command from `text` (minus the '/') if `text` is a command (see is_command). If `text` is not a command, this function returns None. Examples: extract_command('/help'): 'help' extract_command('/help@BotName'): 'help' extract_command('/search black eyed peas'): 'search' extract_command('Good day to you'): None :param text: String to extract the command from :return: the command if `text` is a command, else None. """ return text.split()[0].split('@')[0][1:] if TeleBot.is_command(text) else None @staticmethod def _test_message_handler(message_handler, message): if message.content_type not in message_handler['content_types']: return False if 'commands' in message_handler and message.content_type == 'text': return TeleBot.extract_command(message.text) in message_handler['commands'] if 'regexp' in message_handler and message.content_type == 'text' and re.search(message_handler['regexp'], message.text): return False if 'lambda' in message_handler: return message_handler['lambda'](message) return False def _notify_command_handlers(self, new_messages): for message in new_messages: for message_handler in self.message_handlers: if self._test_message_handler(message_handler, message): if self.__create_threads: self.worker_pool.put(message_handler['function'], message) # t = threading.Thread(target=message_handler['function'], args=(message,)) # t.start() else: message_handler['function'](message) break class AsyncTask: def __init__(self, target, *args, **kwargs): self.target = target self.args = args self.kwargs = kwargs self.done = False self.thread = threading.Thread(target=self._run) self.thread.start() def _run(self): try: self.result = self.target(*self.args, **self.kwargs) except Exception as e: self.result = e self.done = True def wait(self): if not self.done: self.thread.join() if isinstance(self.result, Exception): raise self.result else: return self.result def async(): def decorator(fn): def wrapper(*args, **kwargs): return AsyncTask(fn, *args, **kwargs) return wrapper return decorator class AsyncTeleBot(TeleBot): def __init__(self, *args, **kwargs): TeleBot.__init__(self, *args, **kwargs) @async() def get_me(self): return TeleBot.get_me(self) @async() def get_user_profile_photos(self, *args, **kwargs): return TeleBot.get_user_profile_photos(self, *args, **kwargs) @async() def send_message(self, *args, **kwargs): return TeleBot.send_message(self, *args, **kwargs) @async() def forward_message(self, *args, **kwargs): return TeleBot.forward_message(self, *args, **kwargs) @async() def send_photo(self, *args, **kwargs): return TeleBot.send_photo(self, *args, **kwargs) @async() def send_audio(self, *args, **kwargs): return TeleBot.send_audio(self, *args, **kwargs) @async() def send_document(self, *args, **kwargs): return TeleBot.send_document(self, *args, **kwargs) @async() def send_sticker(self, *args, **kwargs): return TeleBot.send_sticker(self, *args, **kwargs) @async() def send_video(self, *args, **kwargs): return TeleBot.send_video(self, *args, **kwargs) @async() def send_location(self, *args, **kwargs): return TeleBot.send_location(self, *args, **kwargs) @async() def send_chat_action(self, *args, **kwargs): return TeleBot.send_chat_action(self, *args, **kwargs)
# -*- coding: utf-8 -*- import logging import logging.handlers import requests import json import re from bs4 import BeautifulSoup import lxml import time from datetime import datetime as dt import os import sys print os.path.dirname(sys.executable) # Suppress urrlib3 https warnings import urllib3 DISCOVER_URL = 'https://www.kickstarter.com/discover/advanced' CATEGORY_URL = 'https://www.kickstarter.com/discover' PROJECT_REGEX = re.compile(r'window.current_project = \"(.+)\"') TIMEOUT = 10. REQUEST_LIMIT = 10 class Pykick(object): ''' A simple module to access the kickstarter API and handle the pagination Functions: Pykick.get_newest will return an iterator to the 4000 newest individual projects. Pykick.get will return an iterator to 4000 projects Pykick.get_creator_data will scrape data directly from a user page on kickstarter.com. These urls are in the project json dicts. ''' def __init__(self, loglevel = logging.INFO, logfile = './logs/pykick.log'): ''' Module to access kickstarter projects ''' # create the logger instance and set loglevel self.logger = logging.getLogger("pykick.Pykick") self.logger.setLevel(loglevel) # check if the log folder exists, if not make it if not os.path.exists(os.path.dirname(logfile)): os.mkdir(os.path.dirname(logfile)) # Lets make one log file per day and keep backups for a week fh = logging.handlers.TimedRotatingFileHandler(logfile, when='D', interval=1, backupCount=7) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) fh.setLevel(loglevel) ch = logging.StreamHandler() ch.setLevel(loglevel) ch.setFormatter(formatter) self.logger.addHandler(fh) self.logger.addHandler(ch) self.requests_counter = 0 def __iter_pages(self, options): # Set the counters to zero counter = 0 total_hits = 0 running = True while running: # Try to get a response using requests from the discover url using options set above r = requests.get(DISCOVER_URL, params=options) if r.status_code==200: # Got a response, convert it to json! resp = r.json() # There is always a key total_hits, even if we exceeded the page limit, in that case projects is an empty array total_hits = resp['total_hits'] # Increase the counter by the number of found projects counter += len(resp['projects']) # logging self.logger.info('total_hits: %s', total_hits) self.logger.info("Scanning page: %s" % options['page']) self.logger.info("Project: %s out of %s" %(counter, total_hits)) yield resp['projects'] # stop the loop if there was an error, e.g. the url might be broken (in the future this should be # fixed the in the db) else: self.logger.critical("requests error, status code: %s" % r.status_code) running = False # stop the while loop if we reached the end (page 200) or found all projects, whatever is reached first if (counter == total_hits) or (options['page'] == 200): running = False # go to the next page options['page'] += 1 def __iter_projects(self, options={}): # the default starting page is 1, this could also be changed by hand to start at a later page (maximum 200) options.setdefault('page', 1) options['format'] = 'json' # go through all the pages and emit individual projects for page in self.__iter_pages(options): for project in page: yield project def __handle_request(self, url): # try to contact the url try: r = requests.get(url, timeout = TIMEOUT) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e: self.logger.warning("No response, url: %s \n Error: %s" % (url, e)) return None # if the status code is not 200, log the error if r.status_code!=200: self.logger.critical("No response, url: %s, status code: %s", url, r.status_code) return None else: return r def __extract_data(self, r): # Search for the project data in the response text and extract the json data if there is one project_text = PROJECT_REGEX.search(r.text).groups() if project_text: # fix some html chars for quotes, replace excess slashs project_text = project_text[0].replace('&quot;','"').replace('\\\\','\\') # if we can convert the response into a json, return it, otherwise raise an error and return None try: resp = json.loads(project_text) return resp except ValueError, e: self.logger.critical("Error in loading request into JSON") self.logger.critical(e) return None # if there was no project, return none else: self.logger.critical("No project text found on project page") return None def __extract_creator_data(self, r, url): # load the url text into beautifulsoup print url # print r.text i = 0 try: soup = BeautifulSoup(r.text, 'html.parser') print 'no error' i = 1 except: print 'error' if i == 1: try: # Try to find the nav lists and all 'a' elements within list_items = soup.findAll('li', {'class', 'nav--subnav__item'}) except IndexError as e: # if we can't find project_nav, we raise an error self.logger.info('Couldnt get user page, user page deleted? %s', url) return None # create a dict for the listed data found in project_nav. This should be: backed, created and comments counts try: creator_data = {item.text.split()[0]:item.text.split()[1] for item in list_items if len(item.text.split())>1} self.logger.info('Updated creator data: %s', creator_data) except AttributeError as e: self.logger.warning('Failed to extract creator data: %s', list_items) return creator_data return None def get_newest(self,options={}): ''' Returns an iterator for the newest projects at kickstarter ''' options.setdefault('sort', 'newest') return self.get(options) def get(self, options = {}): ''' Returns an iterator for projects that returns dicts of individual project records. By default the format is 'json'. Change this by calling the function with a dictionary with keys 'sort' and 'format' or any other option needed, e.g. 'category_id'. E.g.: options = {'sort' : 'most_funded', 'format' : 'json', 'category_id' : 3} to find projects from the category 'comics' (3), sorted by funding and in json format. ''' return self.__iter_projects(options) def get_categories(self): ''' returns an updated dictionary with the main Kickstarter categories (not including subcategories) and the count of live projects in them. Scraped from the mainpage kickstarter.com ''' categories = {} r = requests.get(CATEGORY_URL) if r.status_code==200: soup = BeautifulSoup(r.text) else: self.logger.critical("Couldn't get soupify category page %s" % CATEGORY_URL) self.logger.critical('requests status code: %s' % r.status_code) counts = soup.find_all('div', {'class' : 'h4 bold'}) names = soup.find_all('div', {'class' : 'js-category-name category-name mobile-table full-height'}) assert len(counts) == len(names) for name, count in zip(names, counts): c = count.contents[0].split(" ")[0].replace(",","") n = name.find('div', {'class' : 'h3'}).contents[0] categories.setdefault(n, c) return categories def get_project(self, project_url): ''' Scrapes from an individual kickstarter project page what it can get. The project data is hidden in the response in json format. The function will search for this data, reformat and return it as a dictionary. Input: the project url. The format is https://www.kickstarter.com/projects/[project-id]/[project-name/slug?] Returns: a python dictionary with the project data ''' # Let's try to get the project data r = self.__handle_request(project_url) if r: # If we got an answer, reset the requests counter and return the project self.requests_counter = 0 project = self.__extract_data(r) return project else: # We got no response, if the request counter is still smaller than request limit, try again self.logger.critical('received empty project! url, trying again: %s' % project_url) if self.requests_counter < REQUEST_LIMIT: self.logger.critical("attempt %i out of %i" % (self.requests_counter, REQUEST_LIMIT)) self.requests_counter += 1 self.get_project(project_url) else: self.logger.critical("gave up to get project %s" % project_url) return None def get_creator_data(self, creator_url): ''' Scans a kickstarter user page and returns the following data as a dictioniory: Returns information about a kickstarter: 'Backed' : Number of projects backed by the user 'Comments' : Number of comments on the personal page 'Created' : Number of projects created by the user ''' r = self.__handle_request(creator_url) return self.__extract_creator_data(r, creator_url)
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import argparse from knack.util import CLIError from azure.mgmt.eventgrid.models import ( NumberGreaterThanAdvancedFilter, NumberGreaterThanOrEqualsAdvancedFilter, NumberInAdvancedFilter, NumberLessThanAdvancedFilter, NumberLessThanOrEqualsAdvancedFilter, NumberNotInAdvancedFilter, StringBeginsWithAdvancedFilter, StringContainsAdvancedFilter, StringEndsWithAdvancedFilter, StringInAdvancedFilter, StringNotInAdvancedFilter, BoolEqualsAdvancedFilter, StringNotBeginsWithAdvancedFilter, StringNotContainsAdvancedFilter, StringNotEndsWithAdvancedFilter, IsNullOrUndefinedAdvancedFilter, IsNotNullAdvancedFilter, NumberInRangeAdvancedFilter, NumberNotInRangeAdvancedFilter) NUMBERIN = "NumberIn" NUMBERNOTIN = "NumberNotIn" STRINGIN = "StringIn" STRINGNOTIN = "StringNotIn" STRINGBEGINSWITH = "StringBeginsWith" STRINGCONTAINS = "StringContains" STRINGENDSWITH = "StringEndsWith" NUMBERGREATERTHAN = "NumberGreaterThan" NUMBERGREATERTHANOREQUALS = "NumberGreaterThanOrEquals" NUMBERLESSTHAN = "NumberLessThan" NUMBERLESSTHANOREQUALS = "NumberLessThanOrEquals" BOOLEQUALS = "BoolEquals" NUMBERINRANGE = "NumberInRange" NUMBERNOTINRANGE = "NumberNotInRange" STRINGNOTBEGINSWITH = "StringNotBeginsWith" STRINGNOTENDSWITH = "StringNotEndsWith" STRINGNOTCONTAINS = "StringNotContains" ISNULLORUNDEFINED = "IsNullOrUndefined" ISNOTNULL = "IsNotNull" # pylint: disable=protected-access # pylint: disable=too-few-public-methods class EventSubscriptionAddFilter(argparse._AppendAction): def __call__(self, parser, namespace, values, option_string=None): _validate_min_values_len(values) key = values[0] operator = values[1] # operators that support no value if operator.lower() == ISNULLORUNDEFINED.lower(): advanced_filter = _get_zero_value_advanced_filter(key, operator, values) elif operator.lower() == ISNOTNULL.lower(): advanced_filter = _get_zero_value_advanced_filter(key, operator, values) # operators that support single value elif operator.lower() == NUMBERLESSTHAN.lower(): advanced_filter = _get_single_value_advanced_filter(key, operator, values) elif operator.lower() == NUMBERLESSTHANOREQUALS.lower(): advanced_filter = _get_single_value_advanced_filter(key, operator, values) elif operator.lower() == NUMBERGREATERTHAN.lower(): advanced_filter = _get_single_value_advanced_filter(key, operator, values) elif operator.lower() == NUMBERGREATERTHANOREQUALS.lower(): advanced_filter = _get_single_value_advanced_filter(key, operator, values) elif operator.lower() == BOOLEQUALS.lower(): advanced_filter = _get_single_value_advanced_filter(key, operator, values) # operators that support multiple values elif operator.lower() == NUMBERIN.lower() or operator.lower() == NUMBERNOTIN.lower(): advanced_filter = _get_multi_value_advanced_filter(key, operator, values) elif operator.lower() == STRINGIN.lower(): advanced_filter = _get_multi_value_advanced_filter(key, operator, values) elif operator.lower() == STRINGNOTIN.lower(): advanced_filter = _get_multi_value_advanced_filter(key, operator, values) elif operator.lower() == STRINGBEGINSWITH.lower(): advanced_filter = _get_multi_value_advanced_filter(key, operator, values) elif operator.lower() == STRINGNOTBEGINSWITH.lower(): advanced_filter = _get_multi_value_advanced_filter(key, operator, values) elif operator.lower() == STRINGENDSWITH.lower(): advanced_filter = _get_multi_value_advanced_filter(key, operator, values) elif operator.lower() == STRINGNOTENDSWITH.lower(): advanced_filter = _get_multi_value_advanced_filter(key, operator, values) elif operator.lower() == STRINGCONTAINS.lower(): advanced_filter = _get_multi_value_advanced_filter(key, operator, values) elif operator.lower() == STRINGNOTCONTAINS.lower(): advanced_filter = _get_multi_value_advanced_filter(key, operator, values) # operators that support range of values elif operator.lower() == NUMBERINRANGE.lower(): advanced_filter = _get_range_advanced_filter(key, operator, values) elif operator.lower() == NUMBERNOTINRANGE.lower(): advanced_filter = _get_range_advanced_filter(key, operator, values) else: raise CLIError("--advanced-filter: The specified filter operator '{}' is not" " a valid operator. Supported values are ".format(operator) + NUMBERIN + "," + NUMBERNOTIN + "," + STRINGIN + "," + STRINGNOTIN + "," + STRINGBEGINSWITH + "," + STRINGCONTAINS + "," + STRINGENDSWITH + "," + NUMBERGREATERTHAN + "," + NUMBERGREATERTHANOREQUALS + "," + NUMBERLESSTHAN + "," + NUMBERLESSTHANOREQUALS + "," + BOOLEQUALS + "," + NUMBERINRANGE + "," + NUMBERNOTINRANGE + "," + ISNULLORUNDEFINED + "," + ISNOTNULL + "," + STRINGNOTBEGINSWITH + "," + STRINGNOTENDSWITH + "," + STRINGNOTCONTAINS + ".") if namespace.advanced_filter is None: namespace.advanced_filter = [] namespace.advanced_filter.append(advanced_filter) def _get_zero_value_advanced_filter(key, operator, values): if len(values) != 2: raise CLIError("--advanced-filter: For '{}' operator no filter value " "must be specified.".format(operator)) if operator.lower() == ISNULLORUNDEFINED.lower(): advanced_filter = IsNullOrUndefinedAdvancedFilter(key=key) elif operator.lower() == ISNOTNULL.lower(): advanced_filter = IsNotNullAdvancedFilter(key=key) else: raise CLIError("--advanced-filter: The specified filter operator '{}' is not" " a zero value operator. Supported operators are ".format(operator) + ISNULLORUNDEFINED + "," + ISNOTNULL + ".") return advanced_filter def _get_single_value_advanced_filter(key, operator, values): if len(values) != 3: raise CLIError("--advanced-filter: For '{}' operator only one filter value " "must be specified.".format(operator)) if operator.lower() == NUMBERLESSTHAN.lower(): advanced_filter = NumberLessThanAdvancedFilter(key=key, value=float(values[2])) elif operator.lower() == NUMBERLESSTHANOREQUALS.lower(): advanced_filter = NumberLessThanOrEqualsAdvancedFilter(key=key, value=float(values[2])) elif operator.lower() == NUMBERGREATERTHAN.lower(): advanced_filter = NumberGreaterThanAdvancedFilter(key=key, value=float(values[2])) elif operator.lower() == NUMBERGREATERTHANOREQUALS.lower(): advanced_filter = NumberGreaterThanOrEqualsAdvancedFilter(key=key, value=float(values[2])) elif operator.lower() == BOOLEQUALS.lower(): advanced_filter = BoolEqualsAdvancedFilter(key=key, value=bool(values[2])) else: raise CLIError("--advanced-filter: The specified filter operator '{}' is not" " a single value operator. Supported operators are ".format(operator) + NUMBERLESSTHAN + "," + NUMBERLESSTHANOREQUALS + "," + NUMBERGREATERTHAN + "," + NUMBERGREATERTHANOREQUALS + "," + BOOLEQUALS + ".") return advanced_filter def _get_multi_value_advanced_filter(key, operator, values): if len(values) < 3: raise CLIError("--advanced-filter: For '{}' operator at least one filter value " "must be specified.".format(operator)) if operator.lower() == NUMBERIN.lower(): float_values = [float(i) for i in values[2:]] advanced_filter = NumberInAdvancedFilter(key=key, values=float_values) elif operator.lower() == NUMBERNOTIN.lower(): float_values = [float(i) for i in values[2:]] advanced_filter = NumberNotInAdvancedFilter(key=key, values=float_values) elif operator.lower() == STRINGIN.lower(): advanced_filter = StringInAdvancedFilter(key=key, values=values[2:]) elif operator.lower() == STRINGNOTIN.lower(): advanced_filter = StringNotInAdvancedFilter(key=key, values=values[2:]) elif operator.lower() == STRINGBEGINSWITH.lower(): advanced_filter = StringBeginsWithAdvancedFilter(key=key, values=values[2:]) elif operator.lower() == STRINGNOTBEGINSWITH.lower(): advanced_filter = StringNotBeginsWithAdvancedFilter(key=key, values=values[2:]) elif operator.lower() == STRINGENDSWITH.lower(): advanced_filter = StringEndsWithAdvancedFilter(key=key, values=values[2:]) elif operator.lower() == STRINGNOTENDSWITH.lower(): advanced_filter = StringNotEndsWithAdvancedFilter(key=key, values=values[2:]) elif operator.lower() == STRINGCONTAINS.lower(): advanced_filter = StringContainsAdvancedFilter(key=key, values=values[2:]) elif operator.lower() == STRINGNOTCONTAINS.lower(): advanced_filter = StringNotContainsAdvancedFilter(key=key, values=values[2:]) else: raise CLIError("--advanced-filter: The specified filter operator '{}' is not " " a multi-value operator. Supported operators are ".format(operator) + NUMBERIN + "," + NUMBERNOTIN + "," + STRINGIN + "," + STRINGNOTIN + "," + STRINGBEGINSWITH + "," + STRINGNOTBEGINSWITH + "," + STRINGENDSWITH + "," + STRINGNOTENDSWITH + "," + STRINGCONTAINS + "," + STRINGNOTCONTAINS + ".") return advanced_filter def _get_range_advanced_filter(key, operator, values): if len(values) < 3: raise CLIError("--advanced-filter: For '{}' operator at least one range filter value " "like 'value1,value2' must be specified.".format(operator)) result = [] for value in values[2:]: float_value = [float(i) for i in value.split(',')] result.append(float_value) if operator.lower() == NUMBERINRANGE.lower(): advanced_filter = NumberInRangeAdvancedFilter(key=key, values=result) elif operator.lower() == NUMBERNOTINRANGE.lower(): advanced_filter = NumberNotInRangeAdvancedFilter(key=key, values=result) else: raise CLIError("--advanced-filter: The specified filter operator '{}' is not " " a range value operator. Supported operators are ".format(operator) + NUMBERINRANGE + "," + NUMBERNOTINRANGE + ".") return advanced_filter def _validate_min_values_len(values): valuesLen = len(values) if valuesLen < 2: raise CLIError("usage error: --advanced-filter KEY[.INNERKEY] FILTEROPERATOR VALUE [VALUE...]")
# # qc.py - Quantum Computing Library for Python # # by Pius Fischer, February 13-20, 2016 # # Various functions for mathematically simulating the quantum circuit model of computation. # # Example 1 - Superdense coding (sending two classical bits a1 and a2 from Alice to Bob via # an entangled pair of qubits) # # from qc import * <-- Import this library. # qA, qB = 'A', 'B' <-- Define the names to be used for the qubits. # a1, a2 = 1, 0 <-- Initialize the bits to be sent. # prepareBell(qA, qB) <-- Create the entangled pair qA and qB. # encodeBell(a1, a2, qA) <-- Alice encodes a1 and a2 onto qA (which also affects qB). # b1, b2 = measureBell(qA, qB) <-- Bob recovers b1 and b2 by Bell measurement of qA and qB. # # See the sendSuperdense() function below. # # Example 2 - Quantum teleportation (transferring the state of a qubit qC from Alice to Bob # via an entangled pair of qubits qA and qB and two classical bits b1 and b2) # # from qc import * <-- Import this library. # qA, qB, qC = 'A', 'B', 'C' <-- Define the names to be used for the qubits. # createQubit(qC, 0.8, 0.6) <-- Initialize the qubit to be teleported. # prepareBell(qA, qB) <-- Create the entangled pair qA and qB. # b1, b2 = measureBell(qC, qA) <-- Alice gets b1 and b2 by Bell measurement of qC and qA. # encodeBell(b1, b2, qB) <-- Bob encodes b1 and b2 onto qB. Now qB is in the same # state that qC was in before Alice's Bell measurement. # # See the teleportQubit() function below. # import math import random __all__ = ( 'IdentityMatrix', 'IdentityMatrixN', 'HadamardGate', 'XGate', 'YGate', 'ZGate', 'PhaseShiftGate', 'ControlledGate', 'ControlledNotGate', 'SwapGate', 'SqrtSwapGate', 'FourierTransform', 'multiplyMatrixByScalar', 'multiplyMatrixByMatrix', 'combineTransforms', 'roundedMatrix', 'compareMatrices', 'compareVectors', 'roundedStateVector', 'compareStateVectors', 'clearSystem', 'printSystem', 'printQubit', 'createQubit', 'removeQubit', 'measureQubit', 'applyGate', 'qubitArray', 'prepareBell', 'encodeBell', 'measureBell', 'sendSuperdense', 'teleportQubit', 'quantumFourierTransform', ) def validState(V, minSize=2): vlen = len(V) assert vlen & (vlen - 1) == 0 assert vlen >= minSize return vlen def validMatrix(U, size=None): numRows = len(U) if size is None: assert numRows & (numRows - 1) == 0 else: assert numRows == size for row in U: assert len(row) == numRows return numRows def multiplyMatrixByScalar(scalar, U): validMatrix(U) return [[scalar * element for element in row] for row in U] def multiplyMatrixByMatrix(U1, U2): validMatrix(U2, validMatrix(U1)) U2 = zip(*U2) return [[sum([e1 * e2 for e1, e2 in zip(row1, row2)]) for row2 in U2] for row1 in U1] def simplify(N): real = round(N.real, 14) imag = round(N.imag, 14) if real == 0: # Sometimes real is rounded to -0 ... Really set it to 0. real = 0 if imag == 0: return real return complex(real, imag) def roundedMatrix(U): return [[simplify(col) for col in row] for row in U] def compareMatrices(X, Y, tolerance=1.5e-14, verbose=True): if len(X) != len(Y): if verbose: print "numRows(X) ({}) != numRows(Y) ({})".format(len(X), len(Y)) return False rX = roundedMatrix(X) rY = roundedMatrix(Y) equal = True for row, (rowX, rowY) in enumerate(zip(rX, rY)): if len(rowX) != len(rowY): if verbose: print "numCols(X[{0}]) ({1}) != numCols(Y[{0}]) ({2})".format(row, len(rowX), len(rowY)) return False for col, (a, b) in enumerate(zip(rowX, rowY)): if a != b and abs(a - b) > tolerance: equal = False if verbose: print "X[{0},{1}] != Y[{0},{1}]:".format(row, col) print "X[{},{}] = {: .16f}".format(row, col, a) print "Y[{},{}] = {: .16f}".format(row, col, b) print return equal IdentityMatrix = ((1, 0), (0, 1)) def IdentityMatrixN(N): return [[1 if row == col else 0 for col in xrange(N)] for row in xrange(N)] HadamardGate = multiplyMatrixByScalar(1/math.sqrt(2), ((1, 1), (1, -1))) XGate = ((0, 1), (1, 0)) YGate = ((0, -1J), (1J, 0)) ZGate = ((1, 0), (0, -1)) def PhaseShiftGate(phi): return ((1, 0), (0, complex(math.cos(phi), math.sin(phi)))) def ControlledGate(U): validMatrix(U, 2) return ((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, U[0][0], U[0][1]), (0, 0, U[1][0], U[1][1])) ControlledNotGate = ControlledGate(XGate) SwapGate = ( (1, 0, 0, 0), (0, 0, 1, 0), (0, 1, 0, 0), (0, 0, 0, 1)) SqrtSwapGate = ( (1, 0, 0, 0), (0, 0.5 + 0.5J, 0.5 - 0.5J, 0), (0, 0.5 - 0.5J, 0.5 + 0.5J, 0), (0, 0, 0, 1)) def FourierTransform(N): phi = 2 * math.pi / N w = complex(math.cos(phi), math.sin(phi)) sqrtN = math.sqrt(N) return [[(w ** (row * col) / sqrtN) for col in xrange(N)] for row in xrange(N)] def changeState(U, V): # Input: # U is a unitary matrix (n x n) # V is a state vector (n x 1) # Output: # V which is overwritten by the product of U and V validMatrix(U, validState(V)) newState = [sum([ue * ve for ue, ve in zip(row, V)]) for row in U] for i, element in enumerate(newState): V[i] = element return V def combineStates(V1, V2): # Input: # V1 is a state vector (m x 1) # V2 is a state vector (n x 1) # Output: # A state vector (mn x 1) that is the Kronecker product of V1 and V2 validState(V1) validState(V2) return [e1 * e2 for e1 in V1 for e2 in V2] def combineTransforms(U1, U2): # Input: # U1 is a unitary matrix (m x m) # U2 is a unitary matrix (n x n) # Output: # A unitary matrix (mn x mn) that is the Kronecker product of U1 and U2 validMatrix(U1) validMatrix(U2) return [[e1 * e2 for e1 in row1 for e2 in row2] for row1 in U1 for row2 in U2] def changeLeadingState(U, V): vlen = validState(V) ulen = validMatrix(U) while vlen > ulen: U = combineTransforms(U, IdentityMatrix) ulen <<= 1 changeState(U, V) qubitStateMap = {} class QubitState(object): def __init__(self, name, pa0, pa1): # # pa0 is the probability amplitude for the 0 state # pa1 is the probability amplitude for the 1 state # assert name not in qubitStateMap assert abs(1.0 - (abs(pa0)**2 + abs(pa1)**2)) < 1e-14 self.stateVector = [pa0, pa1] self.qubitNames = [name] qubitStateMap[name] = self def extend(self, otherState): if self is otherState: return self.stateVector = combineStates(self.stateVector, otherState.stateVector) self.qubitNames.extend(otherState.qubitNames) for name in otherState.qubitNames: qubitStateMap[name] = self def length(self): return len(self.qubitNames) def reorder(self, newOrder): if self.qubitNames[:len(newOrder)] == newOrder: return nameMap = {name: i for i, name in enumerate(reversed(self.qubitNames))} newOrder = [(name, nameMap.pop(name)) for name in newOrder] newOrder.extend([(name, nameMap.pop(name)) for name in self.qubitNames if name in nameMap]) bitShift = [(newPos, oldPos) for newPos, (name, oldPos) in enumerate(reversed(newOrder))] self.stateVector = [ self.stateVector[ sum([(((state >> newPos) & 1) << oldPos) for newPos, oldPos in bitShift]) ] for state in xrange(len(self.stateVector)) ] self.qubitNames = [name for name, i in newOrder] def transform(self, unitaryMatrix): changeLeadingState(unitaryMatrix, self.stateVector) def measure(self, name): self.reorder([name]) # The probability that the measurement will be 0 is the sum of the squares of # the absolute values of the first half of the elements of the state vector. V = self.stateVector vlen = len(V) prob0 = sum([abs(pa)**2 for pa in V[:vlen/2]]) prob1 = sum([abs(pa)**2 for pa in V[vlen/2:]]) assert abs(1.0 - (prob0 + prob1)) < 1e-14 if random.random() < prob0: measurement = 0 V[vlen/2:] = [] norm = math.sqrt(prob0) prob0, prob1 = 1, 0 else: measurement = 1 V[:vlen/2] = [] norm = math.sqrt(prob1) prob0, prob1 = 0, 1 for i, pa in enumerate(V): V[i] = pa / norm del qubitStateMap[name] del self.qubitNames[0] QubitState(name, prob0, prob1) return measurement def roundedStateVector(self): return [simplify(pa) for pa in self.stateVector] def printState(self): stateFormat = ' {:0' + str(len(self.qubitNames)) + 'b} -> {: } p={}' print ','.join(self.qubitNames), '= [' for state, pa in enumerate(self.stateVector): print stateFormat.format(state, simplify(pa), simplify(abs(pa)**2)) print ']' def clearSystem(): qubitStateMap.clear() def createQubit(name, pa0, pa1): QubitState(name, pa0, pa1) def removeQubit(name): assert qubitStateMap[name].length() == 1 del qubitStateMap[name] def printQubit(name): qubitStateMap[name].printState() def printSystem(): printed = {} for name, state in qubitStateMap.iteritems(): if state not in printed: state.printState() printed[state] = True def applyGate(gate, *qubits): ulen = validMatrix(gate) qlen = 1 << len(qubits) assert ulen == qlen >= 2 # Combine state vectors as necessary so all the qubits are in the same state vector: qState = qubitStateMap[qubits[0]] for name in qubits[1:]: qState.extend(qubitStateMap[name]) qState.reorder(qubits) qState.transform(gate) def measureQubit(name): return qubitStateMap[name].measure(name) def roundedStateVector(name): return qubitStateMap[name].roundedStateVector() def compareVectors(v1, v2, name1='X', name2='Y', tolerance=1.5e-14, verbose=True): if len(v1) != len(v2): if verbose: print "Length of {} ({}) != Length of {} ({})".format(name1, len(v1), name2, len(v2)) return False equal = True for i, (a, b) in enumerate(zip(v1, v2)): if a != b and abs(a - b) > tolerance: equal = False if verbose: print "{0}[{2}] != {1}[{2}]:".format(name1, name2, i) print "{}[{}] = {: .16f}".format(name1, i, a) print "{}[{}] = {: .16f}".format(name2, i, b) print return equal def compareStateVectors(qX, qY, tolerance=1.5e-14, verbose=True): vX = roundedStateVector(qX) vY = roundedStateVector(qY) return compareVectors(vX, vY, qX, qY, tolerance, verbose) def qubitArray(namePrefix, size): return [namePrefix + str(i + 1) for i in xrange(size)] def prepareBell(q1, q2, initialState=0): # Input: # q1 and q2 are the names to be given to the two entangled qubits. # # initialState is an optional integer between 0 and 3 representing the # initial (pre-entangled) state of the two qubits. The default is 0. # # Output: None # # The two qubits q1 and q2 are created and entangled with each other in # one of the four Bell states (depending on the initial state): # # Initial state Bell state # ------------------------ ---------- # 00 => q1=[1,0], q2=[1,0] --> 00+11 => (|00> + |11>)/sqrt(2) # 01 => q1=[1,0], q2=[0,1] --> 01+10 => (|01> + |10>)/sqrt(2) # 10 => q1=[0,1], q2=[1,0] --> 00-11 => (|00> - |11>)/sqrt(2) # 11 => q1=[0,1], q2=[0,1] --> 01-10 => (|01> - |10>)/sqrt(2) assert 0 <= initialState <= 3 if (initialState & 2) == 0: createQubit(q1, 1, 0) else: createQubit(q1, 0, 1) if (initialState & 1) == 0: createQubit(q2, 1, 0) else: createQubit(q2, 0, 1) applyGate(HadamardGate, q1) applyGate(ControlledNotGate, q1, q2) def encodeBell(bit1, bit2, qubit): # Input: # bit1 and bit2 are classical bits that determine which quantum gate(s) # to apply to the input qubit. # # Output: None # assert bit1 == 0 or bit1 == 1 assert bit2 == 0 or bit2 == 1 if bit1 == 0: if bit2 == 0: U = IdentityMatrix else: U = XGate else: if bit2 == 0: U = ZGate else: U = multiplyMatrixByMatrix(ZGate, XGate) applyGate(U, qubit) def measureBell(q1, q2): # Input: # q1 and q2 are the two qubits to be measured "in the Bell basis" # Output: # The tuple (b1, b2): # b1 is the result of the measurement on q1 # b2 is the result of the measurement on q2 # Apply a controlled NOT gate on q1 and q2, with q1 as the control bit. # Then apply a Hadamard gate on q1. applyGate(ControlledNotGate, q1, q2) applyGate(HadamardGate, q1) b1 = measureQubit(q1) b2 = measureQubit(q2) return b1, b2 def sendSuperdense(a1, a2, senderQubit, receiverQubit): prepareBell(senderQubit, receiverQubit) encodeBell(a1, a2, senderQubit) return measureBell(senderQubit, receiverQubit) def teleportQubit(fromQubit, viaQubit, toQubit): prepareBell(viaQubit, toQubit) b1, b2 = measureBell(fromQubit, viaQubit) encodeBell(b1, b2, toQubit) def quantumFourierTransform(x): # # Apply the quantum Fourier transform on x by applying Hadamard gates and controlled phase gates. # # This is equivalent to applyGate(FourierTransform(1 << len(x)), *x) # n = len(x) for i in xrange(n): for j in xrange(i, 0, -1): R = ControlledGate(PhaseShiftGate(math.pi / (1 << j))) applyGate(R, x[i], x[i - j]) applyGate(HadamardGate, x[i]) for i in xrange(n / 2): applyGate(SwapGate, x[i], x[n - 1 - i]) applyGate(IdentityMatrixN(1 << n), *x) # Reorder the state vector to be in the same order as the input x
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the currently experimental in-graph batch ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading import time import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import function from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.framework.errors import InvalidArgumentError from tensorflow.python.ops import array_ops from tensorflow.python.ops import batch_ops from tensorflow.python.ops import gen_batch_ops from tensorflow.python.ops import gen_functional_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import script_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test def delayed_plus1(x): """Sleeps for 100ms then returns x+1.""" time.sleep(0.1) return x + 1 @test_util.run_all_in_graph_and_eager_modes class BatchOpsTest(test.TestCase): """Tests for batch_ops.{un,}batch.""" # Test for only non eager mode as batching in eager context as a functionality # is TBD. def testBasicBatch(self): """Tests that a single batched tensor executes together and only once.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) batched, index, _ = batch_ops.batch( [inp], num_batch_threads=1, max_batch_size=2, batch_timeout_micros=36000000, grad_timeout_micros=0, batching_queue="") thread_results = [] def worker(): thread_results.extend( sess.run([batched, index], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([batched, index], feed_dict={inp: [2]}) worker_thread.join() # At this point either the thread or the main did the batch and the other # should have empty results. if list(thread_results[0][0]): batch_t = thread_results[0][0] index_t = thread_results[1] empty_b = main_results[0][0] empty_m = main_results[1] else: batch_t = main_results[0][0] index_t = main_results[1] empty_b = thread_results[0][0] empty_m = thread_results[1] # Check that both the inputs made it out exactly once. self.assertAllEqual(sorted(batch_t), (1, 2)) # Check that we get 2 rows in the index tensor. self.assertEqual(len(index_t), 2) # Check that the other ones are empty. self.assertEqual(len(empty_b), 0) self.assertEqual(len(empty_m), 0) def testBatchWithPadding(self): """Test that batching with padding up to an allowed batch size works.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[2]) batched, index, _ = batch_ops.batch( [inp], num_batch_threads=1, max_batch_size=10, batch_timeout_micros=100000, # 100ms allowed_batch_sizes=[5, 10], grad_timeout_micros=0, batching_queue="") thread_results = [] def worker(): thread_results.extend( sess.run([batched, index], feed_dict={inp: [1, 3]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([batched, index], feed_dict={inp: [2, 4]}) worker_thread.join() # At this point either the thread or the main did the batch and the other # should have empty results. if list(thread_results[0][0]): batch_t = thread_results[0][0] else: batch_t = main_results[0][0] # Check that the batch tensor incorporates the padding. self.assertEqual(len(batch_t), 5) def testMultipleBatch(self): """Tests that multiple batched tensors execute together.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) batched, _, _ = batch_ops.batch( [inp0, inp1], num_batch_threads=1, max_batch_size=2, batch_timeout_micros=36000000, grad_timeout_micros=0, batching_queue="") thread_results = [] def worker(): thread_results.extend( sess.run([batched], feed_dict={inp0: [1], inp1: [2]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([batched], feed_dict={inp0: [2], inp1: [3]}) worker_thread.join() # At this point either the thread or the main did the batch and the other # should have empty results. if list(thread_results[0][0]): batch_t = thread_results[0] empty_t = main_results[0] else: batch_t = main_results[0] empty_t = thread_results[0] # Assert that the tensors were batched together. self.assertAllEqual(sorted(batch_t[0]), [1, 2]) self.assertAllEqual(sorted(batch_t[1]), [2, 3]) self.assertAllEqual(empty_t[0], []) self.assertAllEqual(empty_t[1], []) def testIllegalBatchDifferentDim0Sizes(self): """Tests illegally feeding tensors with different dim0 sizes.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[2]) batched, index, _ = batch_ops.batch( [inp0, inp1], num_batch_threads=1, max_batch_size=2, batch_timeout_micros=0, grad_timeout_micros=0, batching_queue="") with self.assertRaises(Exception) as raised: _ = sess.run([batched, index], feed_dict={inp0: [0], inp1: [1, 2]}) self.assertGreater( raised.exception.message.find("must have equal 0th-dimension size"), 0) def testBasicUnbatch(self): """Tests that batch and unbatch work together.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) batched, index, id_t = batch_ops.batch( [inp], num_batch_threads=1, max_batch_size=10, batch_timeout_micros=100000, # 100ms allowed_batch_sizes=[3, 10], grad_timeout_micros=0, batching_queue="") computation = batched[0] + 1 result = batch_ops.unbatch(computation, index, id_t, timeout_micros=1000000, shared_name="unbatch") thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [2]}) worker_thread.join() self.assertEqual(thread_results[0], [2]) self.assertEqual(main_results[0], [3]) def testBasicUnbatchDecorated(self): """Tests that the batch_function decorator works.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: # TODO(apassos): Removing this line causes test flakiness! Ideally should # be investigated. default_inp = array_ops.placeholder_with_default(2, shape=[]) # pylint: disable=unused-variable @batch_ops.batch_function(1, 10, 100000) def computation(in_t): self.assertTrue(in_t.shape is not None) return in_t + 1 inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) result = computation(inp) thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [2]}) worker_thread.join() self.assertEqual(thread_results[0], [2]) self.assertEqual(main_results[0], [3]) def testBatchDecoratedWithCapturedInput(self): """Tests that the batch_function decorator works.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: captured_inp0 = array_ops.placeholder_with_default(2., shape=[]) captured_inp1 = resource_variable_ops.ResourceVariable(3.) with ops.device("/cpu:0"): captured_inp2 = resource_variable_ops.ResourceVariable(4.) @batch_ops.batch_function(1, 10, 100000) def computation(in_t): return in_t + captured_inp0 + captured_inp1 + captured_inp2 inp = array_ops.placeholder(dtype=dtypes.float32, shape=[1]) result = computation(inp) thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [1]})) sess.run(variables.global_variables_initializer()) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [2]}) worker_thread.join() self.assertEqual(thread_results[0], [10]) self.assertEqual(main_results[0], [11]) @test_util.disable_xla("DeviceIndex returns sentinel value with XLA") def testBatchDecoratedGpu(self): if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: @batch_ops.batch_function(1, 10, 100000) def computation(in_t): # index is 0 on CPU and 1 on GPU index = gen_functional_ops.DeviceIndex(device_names=["CPU", "GPU"]) return in_t + math_ops.cast(index, dtypes.float32) inp = array_ops.placeholder(dtype=dtypes.float32, shape=[1]) result = computation(inp) thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [10.]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [20.]}) worker_thread.join() self.assertEqual(thread_results[0], [10 + test_util.is_gpu_available()]) self.assertEqual(main_results[0], [20 + test_util.is_gpu_available()]) def testBatchFunctionOp(self): """Tests that the batch_function op works.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: @function.Defun(dtypes.int32) def computation(in_t): return in_t + 1 inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) result = gen_batch_ops.batch_function( [inp], num_batch_threads=1, max_batch_size=10, batch_timeout_micros=100000, Tout=[dtypes.int32], f=computation, captured_tensors=computation.captured_inputs) thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [2]}) worker_thread.join() self.assertEqual(thread_results[0], [2]) self.assertEqual(main_results[0], [3]) def testBatchFunctionOpWithCapturedInput(self): """Tests that batch_function op works with captured input.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: captured_inp0 = array_ops.placeholder_with_default(2, shape=[]) captured_inp1 = array_ops.placeholder_with_default(1, shape=[]) inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) @function.Defun(dtypes.int32) def computation(inp): return inp + captured_inp0 - captured_inp1 result = gen_batch_ops.batch_function( num_batch_threads=1, max_batch_size=10, batch_timeout_micros=100000, # 100ms allowed_batch_sizes=[3, 10], batching_queue="", f=computation, in_tensors=[inp], captured_tensors=computation.captured_inputs, Tout=[o.type for o in computation.definition.signature.output_arg]) thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [2]}) worker_thread.join() self.assertEqual(thread_results[0], [2]) self.assertEqual(main_results[0], [3]) def testBatchFunctionOpWithInputError(self): """Tests that batch_function op works with error in the inputs.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) @function.Defun(dtypes.int32, dtypes.int32) def computation(in0, in1): return in0 + in1 result = gen_batch_ops.batch_function( [inp], # computation actually expects 2 inputs. num_batch_threads=1, max_batch_size=10, batch_timeout_micros=100000, # 100ms batching_queue="", f=computation, captured_tensors=computation.captured_inputs, Tout=[o.type for o in computation.definition.signature.output_arg]) with self.assertRaisesRegex( InvalidArgumentError, r"Function takes 2 argument\(s\) but 1 argument\(s\) were passed"): sess.run([result], feed_dict={inp: [2]}) def testBatchFunctionOpWithLargeBatchSplitted(self): """Tests that the batch_function op works with large batch splitted.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: @function.Defun(dtypes.int32) def computation(in_t): return in_t + 3 inp = array_ops.placeholder(dtype=dtypes.int32) result = gen_batch_ops.batch_function( [inp], num_batch_threads=2, # enable_large_batch_splitting is True, so it's valid as long as # max('allowed_batch_sizes') <= 'max_batch_size'. allowed_batch_sizes=[1, 2], max_batch_size=5, batch_timeout_micros=100000, # 100ms Tout=[dtypes.int32], enable_large_batch_splitting=True, f=computation, captured_tensors=computation.captured_inputs) thread1_results = [] thread2_results = [] # Input sizes of worker1 and main thread are larger than # max(allowed_batch_sizes), while input size of worker2 is smaller. def worker1(): thread1_results.extend( sess.run([result], feed_dict={inp: [5, 6, 7, 8, 9]})) worker_thread1 = threading.Thread(target=worker1) worker_thread1.start() def worker2(): thread2_results.extend(sess.run([result], feed_dict={inp: [10]})) worker_thread2 = threading.Thread(target=worker2) worker_thread2.start() main_results = sess.run([result], feed_dict={inp: [2, 3, 4]}) worker_thread1.join() worker_thread2.join() self.assertTrue( np.all(np.equal(thread2_results[0], np.array([13], dtype=np.int32)))) self.assertTrue( np.all( np.equal(thread1_results[0], np.array([8, 9, 10, 11, 12], dtype=np.int32)))) self.assertTrue( np.all( np.equal(main_results[0], np.array([5, 6, 7], dtype=np.int32)))) def testBasicUnbatchDecoratedWithReshape(self): """Tests that the batch_function decorator works.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: @batch_ops.batch_function(1, 10, 100000) def computation(in_t): return array_ops.reshape(in_t, [-1]) + 1 inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1, 1]) result = computation(inp) thread_results = [] def worker(): thread_results.extend(sess.run([result], feed_dict={inp: [[1]]})) worker_thread = threading.Thread(target=worker) worker_thread.start() main_results = sess.run([result], feed_dict={inp: [[2]]}) worker_thread.join() self.assertEqual(thread_results[0], [2]) self.assertEqual(main_results[0], [3]) def testUnbatchTimeout(self): """Tests that the unbatch timeout works.""" if context.executing_eagerly(): return with self.cached_session(use_gpu=True) as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) batched, index, id_t = batch_ops.batch( [inp], num_batch_threads=1, max_batch_size=2, batch_timeout_micros=36000000, grad_timeout_micros=0, batching_queue="") computation = batched[0] + 1 timeout_micros = 10 result = batch_ops.unbatch(computation, index, id_t, timeout_micros, shared_name="shared_unbatch") # Set up a parallel pipeline that delays the computation, but uses the # same unbatch resource object as the non-delayed pipeline. computation_delayed = script_ops.py_func(delayed_plus1, [batched[0]], dtypes.int32) result_delayed = batch_ops.unbatch(computation_delayed, index, id_t, timeout_micros, shared_name="shared_unbatch") thread_results = [] def worker(): # A first call using the non-delayed pipeline. The batcher will send an # empty tensor along the non-delayed pipeline. thread_results.extend(sess.run([result], feed_dict={inp: [1]})) worker_thread = threading.Thread(target=worker) worker_thread.start() time.sleep(0.1) # Ensure the thread's call starts first. # A second call using the delayed pipeline. The batcher will send the # batched tensor along the delayed pipeline, thus delaying the arrival of # the batched tensor at the unbatch op, relative to the empty tensor. # # TODO(olston, apassos): Avoid relying on the order in which the batch op # emits the empty tensor versus the batched one. _ = sess.run([result_delayed], feed_dict={inp: [2]}) worker_thread.join() # The thread's call should hit the timeout, and thus get 0 results. self.assertEqual(len(thread_results), 0) if __name__ == "__main__": test.main()
from flexx.app._component2 import PyComponent, JsComponent from flexx.app._component2 import BaseAppComponent, LocalComponent, ProxyComponent from flexx.event import Component from flexx import event, app from flexx.util.testing import run_tests_if_main, raises, skip class StubSession: id = 'y' status = 2 app = None def _register_component(self, c, id=None): id = id or 'x' c._id = id c._uid = self.id + '_' + id def _unregister_component(self, c): pass def send_command(self, *command): pass def keep_alive(self, ob): pass class MyPComponent1(PyComponent): CSS = "xx" foo = event.IntProp() foo2 = event.IntProp() @event.action def increase_foo(self): self._mutate_foo(self.foo + 1) @event.reaction('foo') def track_foo(self, *events): pass class MyJComponent1(JsComponent): CSS = "xx" foo = event.IntProp() foo2 = event.IntProp() @event.action def increase_foo(self): self._mutate_foo(self.foo + 1) @event.reaction('foo') def track_foo(self, *events): pass class MyPComponent2(MyPComponent1): pass class MyJComponent2(MyJComponent1): pass all_classes = [MyPComponent2, MyJComponent2, MyPComponent2.JS, MyJComponent2.JS, MyPComponent1, MyJComponent1, MyPComponent1.JS, MyJComponent1.JS, PyComponent, JsComponent, PyComponent.JS, JsComponent.JS, LocalComponent, ProxyComponent, BaseAppComponent, Component] def test_pycomponent_heritage(): C = MyPComponent2 # Names and repr assert C.__name__ == C.JS.__name__ assert 'PyComponent' in repr(C) and 'PyComponent' in repr(C.JS) assert not 'proxy' in repr(C) and 'proxy' in repr(C.JS) assert not 'JS' in repr(C) and 'for JS' in repr(C.JS) mro = [MyPComponent2, MyPComponent1, PyComponent, LocalComponent, BaseAppComponent, Component, object] # Validate inheritance of py class assert C.mro() == mro # Also check issubclass() for cls in mro: assert issubclass(C, cls) for cls in all_classes: if cls not in mro: assert not issubclass(C, cls) # Also check isinstance() foo = C(flx_session=StubSession()) for cls in mro: assert isinstance(foo, cls) for cls in all_classes: if cls not in mro: assert not isinstance(foo, cls) mro = [MyPComponent2.JS, MyPComponent1.JS, PyComponent.JS, ProxyComponent, BaseAppComponent, Component, object] # Validate inheritance of JS class assert C.JS.mro() == mro # Also check issubclass() for cls in mro: assert issubclass(C.JS, cls) for cls in all_classes: if cls not in mro: assert not issubclass(C.JS, cls) def test_jscomponent_heritage(): session = app.manager.get_default_session() if session is None: session = app.manager.create_default_session() C = MyJComponent2 # Names and repr assert C.__name__ == C.JS.__name__ assert 'JsComponent' in repr(C) and 'JsComponent' in repr(C.JS) assert 'proxy' in repr(C) and 'proxy' not in repr(C.JS) assert not 'JS' in repr(C) and 'for JS' in repr(C.JS) mro = [MyJComponent2, MyJComponent1, JsComponent, ProxyComponent, BaseAppComponent, Component, object] # Validate inheritance of py class assert C.mro() == mro # Also check issubclass() for cls in mro: assert issubclass(C, cls) for cls in all_classes: if cls not in mro: assert not issubclass(C, cls) # Also check isinstance() foo = C(flx_session=session) for cls in mro: assert isinstance(foo, cls) for cls in all_classes: if cls not in mro: assert not isinstance(foo, cls) mro = [MyJComponent2.JS, MyJComponent1.JS, JsComponent.JS, LocalComponent, BaseAppComponent, Component, object] # Validate inheritance of JS class assert C.JS.mro() == mro # Also check issubclass() for cls in mro: assert issubclass(C.JS, cls) for cls in all_classes: if cls not in mro: assert not issubclass(C.JS, cls) def test_properties(): assert MyPComponent2.__properties__ == ['foo', 'foo2'] assert MyPComponent2.JS.__properties__ == ['foo', 'foo2'] assert MyJComponent2.__properties__ == ['foo', 'foo2'] assert MyJComponent2.JS.__properties__ == ['foo', 'foo2'] assert MyPComponent2.__actions__ == ['increase_foo'] assert MyPComponent2.JS.__actions__ == ['_emit_at_proxy'] assert MyJComponent2.__actions__ == ['_emit_at_proxy'] assert MyJComponent2.JS.__actions__ == ['increase_foo'] assert MyPComponent2.__reactions__ == ['track_foo'] assert MyPComponent2.JS.__reactions__ == [] assert MyJComponent2.__reactions__ == [] assert MyJComponent2.JS.__reactions__ == ['track_foo'] def test_cannot_instantiate_without_session(): app.manager.remove_default_session() with raises(RuntimeError) as err: PyComponent() assert 'needs a session!' in str(err.value) with raises(RuntimeError) as err: JsComponent() assert 'needs a session!' in str(err.value) def test_generated_js1(): m = app.assets.modules['flexx.app._component2'] js = m.get_js() classes = [] for line in js.splitlines(): if '._base_class =' in line: classes.append(line.split('.')[0]) assert classes == ['LocalProperty', 'BaseAppComponent', 'LocalComponent', 'ProxyComponent', 'StubComponent', 'JsComponent', 'PyComponent'] print(classes) def test_generated_js2(): js = MyPComponent2.JS.CODE assert '__properties__ = ["foo", "foo2"]' in js assert js.count('foo2') == 1 # in __properties__ assert js.count('increase_foo') == 0 assert js.count('_mutate_') == 0 js = MyJComponent2.JS.CODE assert '__properties__ = ["foo", "foo2"]' in js assert js.count('foo2') == 2 # in __properties__ and __proxy_properties__ assert js.count('increase_foo') == 1 assert js.count('_mutate_') == 0 def test_generated_css1(): assert not hasattr(MyPComponent1.JS, 'CSS') assert not hasattr(MyJComponent1.JS, 'CSS') assert not hasattr(MyPComponent2.JS, 'CSS') assert not hasattr(MyJComponent2.JS, 'CSS') assert MyPComponent1.CSS == 'xx' assert MyJComponent1.CSS == 'xx' assert MyPComponent2.CSS == '' assert MyJComponent2.CSS == '' def test_misc(): clss = app.get_component_classes() assert PyComponent in clss and JsComponent in clss assert LocalComponent not in clss and ProxyComponent not in clss assert BaseAppComponent not in clss # Assert that the list is a copy clss.remove(PyComponent) assert PyComponent in app.get_component_classes() run_tests_if_main()
import os import tempfile import zipfile import shutil from dateutil import parser from unittest import TestCase import datetime as dtime from django.contrib.auth.models import Group, User from django.utils import timezone from hs_core.hydroshare import resource, get_resource_by_shortkey from hs_core.tests.api.utils import MyTemporaryUploadedFile from hs_core.models import GenericResource from hs_core.testing import MockIRODSTestCaseMixin from hs_core import hydroshare class TestCreateResource(MockIRODSTestCaseMixin, TestCase): def setUp(self): super(TestCreateResource, self).setUp() self.tmp_dir = tempfile.mkdtemp() self.hs_group, _ = Group.objects.get_or_create(name='Hydroshare Author') # create a user self.user = hydroshare.create_account( 'test_user@email.com', username='mytestuser', first_name='some_first_name', last_name='some_last_name', superuser=False, groups=[self.hs_group] ) # create files file_one = "test1.txt" file_two = "test2.tif" open(file_one, "w").close() open(file_two, "w").close() # open files for read and upload self.file_one = open(file_one, "r") self.file_two = open(file_two, "r") # Make a text file self.txt_file_path = os.path.join(self.tmp_dir, 'text.txt') txt = open(self.txt_file_path, 'w') txt.write("Hello World\n") txt.close() self.raster_file_path = 'hs_core/tests/data/cea.tif' def tearDown(self): super(TestCreateResource, self).tearDown() shutil.rmtree(self.tmp_dir) self.user.uaccess.delete() self.user.delete() self.hs_group.delete() User.objects.all().delete() Group.objects.all().delete() GenericResource.objects.all().delete() self.file_one.close() os.remove(self.file_one.name) self.file_two.close() os.remove(self.file_two.name) def test_create_resource_without_content_files(self): res = resource.create_resource( 'GenericResource', self.user, 'My Test Resource' ) self.assertEqual(res.resource_type, 'GenericResource') self.assertTrue(isinstance(res, GenericResource)) self.assertTrue(res.metadata.title.value == 'My Test Resource') self.assertTrue(res.created.strftime('%m/%d/%Y') == dtime.datetime.today().strftime('%m/%d/%Y')) self.assertTrue(res.creator == self.user) self.assertTrue(res.short_id is not None, 'Short ID has not been created!') self.assertEqual(res.files.all().count(), 0, 'Resource has content files') if res: res.delete() def test_create_resource_with_content_files(self): new_res = resource.create_resource( 'GenericResource', self.user, 'My Test Resource', files=(self.file_one,) ) # test resource has one file self.assertEqual(new_res.files.all().count(), 1) # test the mime_type of the content file res_file = new_res.files.all().first() self.assertEqual(res_file.mime_type, "text/plain") # test the extension of the content file self.assertEqual(res_file.extension, ".txt") self.assertEqual(new_res.resource_type, 'GenericResource') self.assertTrue(isinstance(new_res, GenericResource), type(new_res)) self.assertTrue(new_res.metadata.title.value == 'My Test Resource') self.assertTrue(new_res.created.strftime('%m/%d/%Y') == dtime.datetime.today().strftime('%m/%d/%Y')) self.assertTrue(new_res.creator == self.user) self.assertTrue(new_res.short_id is not None, 'Short ID has not been created!') self.assertEqual(new_res.files.all().count(), 1, msg="Number of content files is not equal to 1") if new_res: new_res.delete() # test creating resource with multiple files new_res = resource.create_resource( 'GenericResource', self.user, 'My Test Resource', files=(self.file_one, self.file_two) ) # test resource has 2 files self.assertEquals(new_res.files.all().count(), 2, msg="Number of content files is not equal to 2") if new_res: new_res.delete() def test_create_resource_with_metadata(self): # Note: if element 'type' or 'format' is added to the following dictionary, they will be ignored # see: 'test_create_resource_with_metadata_for_type' and 'test_create_resource_with_metadata_for_format' # element 'publisher' can't be part of the following dictionary - resource creation will fail otherwise # 'publisher' element can be created only after the resource is published # see: 'test_create_resource_with_metadata_for_publisher' # only date element of type 'valid' is honored. Other date types metadata is ignored # see: 'test_create_resource_with_metadata_for_date' metadata_dict = [ {'description': {'abstract': 'My test abstract'}}, {'creator': {'name': 'John Smith', 'email': 'jsmith@gmail.com'}}, {'creator': {'name': 'Lisa Molley', 'email': 'lmolley@gmail.com'}}, {'contributor': {'name': 'Kelvin Marshal', 'email': 'kmarshal@yahoo.com', 'organization': 'Utah State University', 'profile_links': [{'type': 'yahooProfile', 'url': 'http://yahoo.com/LH001'}]}}, {'coverage': {'type': 'period', 'value': {'name': 'Name for period coverage', 'start': '1/1/2000', 'end': '12/12/2012'}}}, {'coverage': {'type': 'point', 'value': {'name': 'Name for point coverage', 'east': '56.45678', 'north': '12.6789', 'units': 'deg'}}}, {'identifier': {'name': 'someIdentifier', 'url':"http://some.org/001"}}, {'relation': {'type': 'isPartOf', 'value': 'http://hydroshare.org/resource/001'}}, {'rights': {'statement': 'This is the rights statement for this resource', 'url': 'http://rights.org/001'}}, {'source': {'derived_from': 'http://hydroshare.org/resource/0001'}}, {'subject': {'value': 'sub-1'}}, {'subject': {'value': 'sub-2'}}, {'language': {'code': 'fre'}}, {'date': {'type': 'valid', 'start_date': parser.parse('01/20/2016'), 'end_date': parser.parse('02/20/2016')}}, ] res = resource.create_resource( resource_type='GenericResource', owner=self.user, title='My Test Resource', metadata=metadata_dict ) # title element is created as part of resource creation self.assertEqual(res.metadata.title.value, 'My Test Resource', msg='resource title did not match') # resource description element is created as part of resource creation self.assertEqual(res.metadata.description.abstract, 'My test abstract') # the following 3 date elements should have been created as part of resource creation self.assertEqual(res.metadata.dates.all().count(), 3, msg="Number of date elements not equal to 3.") self.assertIn('created', [dt.type for dt in res.metadata.dates.all()], msg="Date element type 'Created' does not exist") self.assertIn('modified', [dt.type for dt in res.metadata.dates.all()], msg="Date element type 'Modified' does not exist") self.assertIn('valid', [dt.type for dt in res.metadata.dates.all()], msg="Date element type 'Modified' does not exist") # number of creators at this point should be 3 (2 are created based on supplied metadata and one is # automatically generated as part of the resource creation self.assertEqual(res.metadata.creators.all().count(), 3, msg='Number of creators not equal to 3') self.assertIn('John Smith', [cr.name for cr in res.metadata.creators.all()], msg="Creator 'John Smith' was not found") self.assertIn('Lisa Molley', [cr.name for cr in res.metadata.creators.all()], msg="Creator 'Lisa Molley' was not found") # number of contributors at this point should be 1 self.assertEqual(res.metadata.contributors.all().count(), 1, msg='Number of contributors not equal to 1') # there should be now 2 coverage elements as per the supplied metadata self.assertEqual(res.metadata.coverages.all().count(), 2, msg="Number of coverages not equal to 2.") # there should be no format elements self.assertEqual(res.metadata.formats.all().count(), 0, msg="Number of format elements not equal to 0.") # there should be now 2 identifier elements (one was created from the supplied metadat and the # other one was auto generated at the time of resource creation) self.assertEqual(res.metadata.identifiers.all().count(), 2, msg="Number of identifier elements not equal to 1.") # Language element created based on supplied metadata self.assertEqual(res.metadata.language.code, 'fre', msg="Resource has a language that is not French.") self.assertEqual(res.metadata.relations.all().count(), 1, msg="Number of relation elements is not equal to 1") self.assertEqual(res.metadata.rights.statement, 'This is the rights statement for this resource', msg="Statement of rights did not match.") self.assertEqual(res.metadata.rights.url, 'http://rights.org/001', msg="URL of rights did not match.") self.assertEqual(res.metadata.sources.all().count(), 1, msg="Number of sources is not equal to 1.") self.assertIn('http://hydroshare.org/resource/0001', [src.derived_from for src in res.metadata.sources.all()], msg="Source element with derived from value of %s does not exist." % 'http://hydroshare.org/resource/0001') # there should be 2 subject elements for this resource self.assertEqual(res.metadata.subjects.all().count(), 2, msg="Number of subject elements found not be 1.") self.assertIn('sub-1', [sub.value for sub in res.metadata.subjects.all()], msg="Subject element with value of %s does not exist." % 'sub-1') self.assertIn('sub-2', [sub.value for sub in res.metadata.subjects.all()], msg="Subject element with value of %s does not exist." % 'sub-1') # valid date should have been created self.assertEquals(res.metadata.dates.filter(type='valid').count(), 1) valid_date_element = res.metadata.dates.filter(type='valid').first() valid_start_date = timezone.make_aware(dtime.datetime.strptime('01/20/2016', "%m/%d/%Y"), timezone.get_default_timezone()) valid_end_date = timezone.make_aware(dtime.datetime.strptime('02/20/2016', "%m/%d/%Y"), timezone.get_default_timezone()) self.assertEquals(valid_date_element.start_date, valid_start_date) self.assertEquals(valid_date_element.end_date, valid_end_date) if res: res.delete() def test_create_resource_with_metadata_for_publisher(self): # trying to create a resource with metadata for publisher should fail due to the fact that the # resource is not yet published metadata_dict = [{'publisher': {'name': 'HydroShare', 'url': 'https://hydroshare.org'}}, ] with self.assertRaises(Exception): resource.create_resource(resource_type='GenericResource', owner=self.user, title='My Test Resource', metadata=metadata_dict ) def test_create_resource_with_metadata_for_type(self): # trying to create a resource with metadata for type element should ignore the provided type element data # and create the system generated type element metadata_dict = [{'type': {'url': 'https://hydroshare.org/GenericResource'}}, ] res = resource.create_resource( resource_type='GenericResource', owner=self.user, title='My Test Resource', metadata=metadata_dict ) type_url = '{0}/terms/{1}'.format(hydroshare.utils.current_site_url(), 'GenericResource') self.assertEqual(res.metadata.type.url, type_url, msg='type element url is wrong') if res: res.delete() def test_create_resource_with_metadata_for_format(self): # trying to create a resource with metadata for format element should ignore the provided format element data # as format elements are system generated based on resource content files metadata_dict = [{'format': {'value': 'plain/text'}}, {'format': {'value': 'image/tiff'}}] res = resource.create_resource( resource_type='GenericResource', owner=self.user, title='My Test Resource', metadata=metadata_dict ) self.assertEqual(res.metadata.formats.all().count(), 0, msg="Number of format elements not equal to 0.") if res: res.delete() def test_create_resource_with_metadata_for_date(self): # trying to create a resource with metadata for 'date' element of type 'created' or 'modified' should ignore # the provided date metadata as date of type created and modified are system generated based on resource # creation time. # trying to create a resource with metadata for 'date' element of type 'published' should ignore the provided # metadata as date of type published is created when the resource is published # trying to create a resource with metadata for 'date' element of type 'available' should ignore the provided # metadata as date of type available is created when the resource is made public. # the only date element that can be created at the time of resource creation by specifying necessary data is # of the type 'valid' metadata_dict = [{'date': {'type': 'created', 'start_date': parser.parse('01/16/2016')}}, {'date': {'type': 'modified', 'start_date': parser.parse('01/16/2016')}}, {'date': {'type': 'published', 'start_date': parser.parse('01/16/2016')}}, {'date': {'type': 'available', 'start_date': parser.parse('01/16/2016')}}, {'date': {'type': 'valid', 'start_date': parser.parse('01/20/2016'), 'end_date': parser.parse('02/20/2016')}}] res = resource.create_resource( resource_type='GenericResource', owner=self.user, title='My Test Resource', metadata=metadata_dict ) self.assertIn('created', [dt.type for dt in res.metadata.dates.all()], msg="Date element type 'Created' does not exist") self.assertIn('modified', [dt.type for dt in res.metadata.dates.all()], msg="Date element type 'Modified' does not exist") # skipped dates are created, modified, published, and available skipped_date = timezone.make_aware(dtime.datetime.strptime('01/16/2016', "%m/%d/%Y"), timezone.get_default_timezone()) self.assertNotIn(skipped_date, [dat.start_date for dat in res.metadata.dates.all()], msg="Matching date value was found") self.assertEquals(res.metadata.dates.filter(type='publisher').count(), 0, msg="Publisher date was found.") self.assertEquals(res.metadata.dates.filter(type='available').count(), 0, msg="Available date was found.") # valid date should have been created self.assertEquals(res.metadata.dates.filter(type='valid').count(), 1) valid_start_date = timezone.make_aware(dtime.datetime.strptime('01/20/2016', "%m/%d/%Y"), timezone.get_default_timezone()) valid_end_date = timezone.make_aware(dtime.datetime.strptime('02/20/2016', "%m/%d/%Y"), timezone.get_default_timezone()) self.assertIn(valid_start_date, [dt.start_date for dt in res.metadata.dates.all()], msg="Matching date value was not found") self.assertIn(valid_end_date, [dt.end_date for dt in res.metadata.dates.all()], msg="Matching date value was not found") if res: res.delete() def test_create_resource_with_file(self): raster = open(self.raster_file_path) res = resource.create_resource('GenericResource', self.user, 'My Test resource', files=(raster,)) pid = res.short_id # get the resource by pid res = get_resource_by_shortkey(pid) self.assertEqual(res.resource_type, 'GenericResource') self.assertTrue(isinstance(res, GenericResource), type(res)) self.assertEqual(res.metadata.title.value, 'My Test resource') self.assertEquals(res.files.all().count(), 1) if res: res.delete() def test_create_resource_with_two_files(self): raster = MyTemporaryUploadedFile(open(self.raster_file_path, 'rb'), name=self.raster_file_path, content_type='image/tiff', size=os.stat(self.raster_file_path).st_size) text = MyTemporaryUploadedFile(open(self.txt_file_path, 'r'), name=self.txt_file_path, content_type='text/plain', size=os.stat(self.txt_file_path).st_size) res = resource.create_resource('GenericResource', self.user, 'My Test resource', files=(raster, text)) pid = res.short_id # get the resource by pid res = get_resource_by_shortkey(pid) self.assertEqual(res.resource_type, 'GenericResource') self.assertTrue(isinstance(res, GenericResource), type(res)) self.assertEqual(res.metadata.title.value, 'My Test resource') self.assertEquals(res.files.all().count(), 2) if res: res.delete() def test_create_resource_with_zipfile(self): # Make a zip file zip_path = os.path.join(self.tmp_dir, 'test.zip') with zipfile.ZipFile(zip_path, 'w') as zfile: zfile.write(self.raster_file_path) zfile.write(self.txt_file_path) # Create a resource with zipfile, do not un-pack payload = MyTemporaryUploadedFile(open(zip_path, 'rb'), name=zip_path, content_type='application/zip', size=os.stat(zip_path).st_size) res = resource.create_resource('GenericResource', self.user, 'My Test resource', files=(payload,)) pid = res.short_id # get the resource by pid res = get_resource_by_shortkey(pid) self.assertEquals(res.files.all().count(), 1) # Create a resource with zipfile, un-pack payload2 = MyTemporaryUploadedFile(open(zip_path, 'rb'), name=zip_path, content_type='application/zip', size=os.stat(zip_path).st_size) res = resource.create_resource('GenericResource', self.user, 'My Test resource', files=(payload2,), unpack_file=True) pid = res.short_id res = get_resource_by_shortkey(pid) self.assertEquals(res.files.all().count(), 2) if res: res.delete()
#!/usr/bin/env python2.7 # Copyright 2017 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import ctypes import math import sys import yaml import json with open('src/core/lib/debug/stats_data.yaml') as f: attrs = yaml.load(f.read()) REQUIRED_FIELDS = ['name', 'doc'] def make_type(name, fields): return (collections.namedtuple(name, ' '.join(list(set(REQUIRED_FIELDS + fields)))), []) def c_str(s, encoding='ascii'): if isinstance(s, unicode): s = s.encode(encoding) result = '' for c in s: if not (32 <= ord(c) < 127) or c in ('\\', '"'): result += '\\%03o' % ord(c) else: result += c return '"' + result + '"' types = ( make_type('Counter', []), make_type('Histogram', ['max', 'buckets']), ) inst_map = dict((t[0].__name__, t[1]) for t in types) stats = [] for attr in attrs: found = False for t, lst in types: t_name = t.__name__.lower() if t_name in attr: name = attr[t_name] del attr[t_name] lst.append(t(name=name, **attr)) found = True break assert found, "Bad decl: %s" % attr def dbl2u64(d): return ctypes.c_ulonglong.from_buffer(ctypes.c_double(d)).value def shift_works_until(mapped_bounds, shift_bits): for i, ab in enumerate(zip(mapped_bounds, mapped_bounds[1:])): a, b = ab if (a >> shift_bits) == (b >> shift_bits): return i return len(mapped_bounds) def find_ideal_shift(mapped_bounds, max_size): best = None for shift_bits in reversed(range(0,64)): n = shift_works_until(mapped_bounds, shift_bits) if n == 0: continue table_size = mapped_bounds[n-1] >> shift_bits if table_size > max_size: continue if table_size > 65535: continue if best is None: best = (shift_bits, n, table_size) elif best[1] < n: best = (shift_bits, n, table_size) print best return best def gen_map_table(mapped_bounds, shift_data): tbl = [] cur = 0 print mapped_bounds mapped_bounds = [x >> shift_data[0] for x in mapped_bounds] print mapped_bounds for i in range(0, mapped_bounds[shift_data[1]-1]): while i > mapped_bounds[cur]: cur += 1 tbl.append(cur) return tbl static_tables = [] def decl_static_table(values, type): global static_tables v = (type, values) for i, vp in enumerate(static_tables): if v == vp: return i print "ADD TABLE: %s %r" % (type, values) r = len(static_tables) static_tables.append(v) return r def type_for_uint_table(table): mv = max(table) if mv < 2**8: return 'uint8_t' elif mv < 2**16: return 'uint16_t' elif mv < 2**32: return 'uint32_t' else: return 'uint64_t' def gen_bucket_code(histogram): bounds = [0, 1] done_trivial = False done_unmapped = False first_nontrivial = None first_unmapped = None while len(bounds) < histogram.buckets + 1: if len(bounds) == histogram.buckets: nextb = int(histogram.max) else: mul = math.pow(float(histogram.max) / bounds[-1], 1.0 / (histogram.buckets + 1 - len(bounds))) nextb = int(math.ceil(bounds[-1] * mul)) if nextb <= bounds[-1] + 1: nextb = bounds[-1] + 1 elif not done_trivial: done_trivial = True first_nontrivial = len(bounds) bounds.append(nextb) bounds_idx = decl_static_table(bounds, 'int') if done_trivial: first_nontrivial_code = dbl2u64(first_nontrivial) code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds] shift_data = find_ideal_shift(code_bounds[first_nontrivial:], 256 * histogram.buckets) #print first_nontrivial, shift_data, bounds #if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]] code = '\n/* Automatically generated by tools/codegen/core/gen_stats_data.py */\n' code += 'value = GPR_CLAMP(value, 0, %d);\n' % histogram.max map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data) if first_nontrivial is None: code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, value);\n' % histogram.name.upper()) else: code += 'if (value < %d) {\n' % first_nontrivial code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, value);\n' % histogram.name.upper()) code += 'return;\n' code += '}' first_nontrivial_code = dbl2u64(first_nontrivial) if shift_data is not None: map_table_idx = decl_static_table(map_table, type_for_uint_table(map_table)) code += 'union { double dbl; uint64_t uint; } _val, _bkt;\n' code += '_val.dbl = value;\n' code += 'if (_val.uint < %dull) {\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code) code += 'int bucket = ' code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)] + %d;\n' % (map_table_idx, first_nontrivial_code, shift_data[0], first_nontrivial) code += '_bkt.dbl = grpc_stats_table_%d[bucket];\n' % bounds_idx code += 'bucket -= (_val.uint < _bkt.uint);\n' code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, bucket);\n' % histogram.name.upper() code += 'return;\n' code += '}\n' code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, '% histogram.name.upper() code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_%d, %d));\n' % (bounds_idx, histogram.buckets) return (code, bounds_idx) # utility: print a big comment block into a set of files def put_banner(files, banner): for f in files: print >>f, '/*' for line in banner: print >>f, ' * %s' % line print >>f, ' */' print >>f with open('src/core/lib/debug/stats_data.h', 'w') as H: # copy-paste copyright notice from this file with open(sys.argv[0]) as my_source: copyright = [] for line in my_source: if line[0] != '#': break for line in my_source: if line[0] == '#': copyright.append(line) break for line in my_source: if line[0] != '#': break copyright.append(line) put_banner([H], [line[2:].rstrip() for line in copyright]) put_banner([H], ["Automatically generated by tools/codegen/core/gen_stats_data.py"]) print >>H, "#ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H" print >>H, "#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H" print >>H print >>H, "#include <inttypes.h>" print >>H, "#include \"src/core/lib/iomgr/exec_ctx.h\"" print >>H for typename, instances in sorted(inst_map.items()): print >>H, "typedef enum {" for inst in instances: print >>H, " GRPC_STATS_%s_%s," % (typename.upper(), inst.name.upper()) print >>H, " GRPC_STATS_%s_COUNT" % (typename.upper()) print >>H, "} grpc_stats_%ss;" % (typename.lower()) print >>H, "extern const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT];" % ( typename.lower(), typename.upper()) print >>H, "extern const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT];" % ( typename.lower(), typename.upper()) histo_start = [] histo_buckets = [] histo_bucket_boundaries = [] print >>H, "typedef enum {" first_slot = 0 for histogram in inst_map['Histogram']: histo_start.append(first_slot) histo_buckets.append(histogram.buckets) print >>H, " GRPC_STATS_HISTOGRAM_%s_FIRST_SLOT = %d," % (histogram.name.upper(), first_slot) print >>H, " GRPC_STATS_HISTOGRAM_%s_BUCKETS = %d," % (histogram.name.upper(), histogram.buckets) first_slot += histogram.buckets print >>H, " GRPC_STATS_HISTOGRAM_BUCKETS = %d" % first_slot print >>H, "} grpc_stats_histogram_constants;" for ctr in inst_map['Counter']: print >>H, ("#define GRPC_STATS_INC_%s(exec_ctx) " + "GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_%s)") % ( ctr.name.upper(), ctr.name.upper()) for histogram in inst_map['Histogram']: print >>H, "#define GRPC_STATS_INC_%s(exec_ctx, value) grpc_stats_inc_%s((exec_ctx), (int)(value))" % ( histogram.name.upper(), histogram.name.lower()) print >>H, "void grpc_stats_inc_%s(grpc_exec_ctx *exec_ctx, int x);" % histogram.name.lower() for i, tbl in enumerate(static_tables): print >>H, "extern const %s grpc_stats_table_%d[%d];" % (tbl[0], i, len(tbl[1])) print >>H, "extern const int grpc_stats_histo_buckets[%d];" % len(inst_map['Histogram']) print >>H, "extern const int grpc_stats_histo_start[%d];" % len(inst_map['Histogram']) print >>H, "extern const int *const grpc_stats_histo_bucket_boundaries[%d];" % len(inst_map['Histogram']) print >>H, "extern void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, int x);" % len(inst_map['Histogram']) print >>H print >>H, "#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */" with open('src/core/lib/debug/stats_data.c', 'w') as C: # copy-paste copyright notice from this file with open(sys.argv[0]) as my_source: copyright = [] for line in my_source: if line[0] != '#': break for line in my_source: if line[0] == '#': copyright.append(line) break for line in my_source: if line[0] != '#': break copyright.append(line) put_banner([C], [line[2:].rstrip() for line in copyright]) put_banner([C], ["Automatically generated by tools/codegen/core/gen_stats_data.py"]) print >>C, "#include \"src/core/lib/debug/stats_data.h\"" print >>C, "#include \"src/core/lib/debug/stats.h\"" print >>C, "#include \"src/core/lib/iomgr/exec_ctx.h\"" print >>C, "#include <grpc/support/useful.h>" histo_code = [] for histogram in inst_map['Histogram']: code, bounds_idx = gen_bucket_code(histogram) histo_bucket_boundaries.append(bounds_idx) histo_code.append(code) for typename, instances in sorted(inst_map.items()): print >>C, "const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT] = {" % ( typename.lower(), typename.upper()) for inst in instances: print >>C, " %s," % c_str(inst.name) print >>C, "};" print >>C, "const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT] = {" % ( typename.lower(), typename.upper()) for inst in instances: print >>C, " %s," % c_str(inst.doc) print >>C, "};" for i, tbl in enumerate(static_tables): print >>C, "const %s grpc_stats_table_%d[%d] = {%s};" % ( tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1])) for histogram, code in zip(inst_map['Histogram'], histo_code): print >>C, ("void grpc_stats_inc_%s(grpc_exec_ctx *exec_ctx, int value) {%s}") % ( histogram.name.lower(), code) print >>C, "const int grpc_stats_histo_buckets[%d] = {%s};" % ( len(inst_map['Histogram']), ','.join('%s' % x for x in histo_buckets)) print >>C, "const int grpc_stats_histo_start[%d] = {%s};" % ( len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start)) print >>C, "const int *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % ( len(inst_map['Histogram']), ','.join('grpc_stats_table_%d' % x for x in histo_bucket_boundaries)) print >>C, "void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, int x) = {%s};" % ( len(inst_map['Histogram']), ','.join('grpc_stats_inc_%s' % histogram.name.lower() for histogram in inst_map['Histogram'])) # patch qps_test bigquery schema RECORD_EXPLICIT_PERCENTILES = [50, 95, 99] with open('tools/run_tests/performance/scenario_result_schema.json', 'r') as f: qps_schema = json.loads(f.read()) def FindNamed(js, name): for el in js: if el['name'] == name: return el def RemoveCoreFields(js): new_fields = [] for field in js['fields']: if not field['name'].startswith('core_'): new_fields.append(field) js['fields'] = new_fields RemoveCoreFields(FindNamed(qps_schema, 'clientStats')) RemoveCoreFields(FindNamed(qps_schema, 'serverStats')) def AddCoreFields(js): for counter in inst_map['Counter']: js['fields'].append({ 'name': 'core_%s' % counter.name, 'type': 'INTEGER', 'mode': 'NULLABLE' }) for histogram in inst_map['Histogram']: js['fields'].append({ 'name': 'core_%s' % histogram.name, 'type': 'STRING', 'mode': 'NULLABLE' }) js['fields'].append({ 'name': 'core_%s_bkts' % histogram.name, 'type': 'STRING', 'mode': 'NULLABLE' }) for pctl in RECORD_EXPLICIT_PERCENTILES: js['fields'].append({ 'name': 'core_%s_%dp' % (histogram.name, pctl), 'type': 'FLOAT', 'mode': 'NULLABLE' }) AddCoreFields(FindNamed(qps_schema, 'clientStats')) AddCoreFields(FindNamed(qps_schema, 'serverStats')) with open('tools/run_tests/performance/scenario_result_schema.json', 'w') as f: f.write(json.dumps(qps_schema, indent=2, sort_keys=True)) # and generate a helper script to massage scenario results into the format we'd # like to query with open('tools/run_tests/performance/massage_qps_stats.py', 'w') as P: with open(sys.argv[0]) as my_source: for line in my_source: if line[0] != '#': break for line in my_source: if line[0] == '#': print >>P, line.rstrip() break for line in my_source: if line[0] != '#': break print >>P, line.rstrip() print >>P print >>P, '# Autogenerated by tools/codegen/core/gen_stats_data.py' print >>P print >>P, 'import massage_qps_stats_helpers' print >>P, 'def massage_qps_stats(scenario_result):' print >>P, ' for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:' print >>P, ' if "coreStats" not in stats: return' print >>P, ' core_stats = stats["coreStats"]' print >>P, ' del stats["coreStats"]' for counter in inst_map['Counter']: print >>P, ' stats["core_%s"] = massage_qps_stats_helpers.counter(core_stats, "%s")' % (counter.name, counter.name) for i, histogram in enumerate(inst_map['Histogram']): print >>P, ' h = massage_qps_stats_helpers.histogram(core_stats, "%s")' % histogram.name print >>P, ' stats["core_%s"] = ",".join("%%f" %% x for x in h.buckets)' % histogram.name print >>P, ' stats["core_%s_bkts"] = ",".join("%%f" %% x for x in h.boundaries)' % histogram.name for pctl in RECORD_EXPLICIT_PERCENTILES: print >>P, ' stats["core_%s_%dp"] = massage_qps_stats_helpers.percentile(h.buckets, %d, h.boundaries)' % ( histogram.name, pctl, pctl) with open('src/core/lib/debug/stats_data_bq_schema.sql', 'w') as S: columns = [] for counter in inst_map['Counter']: columns.append(('%s_per_iteration' % counter.name, 'FLOAT')) print >>S, ',\n'.join('%s:%s' % x for x in columns)
#!/usr/bin/python import unittest from parse import parse class TestCases(unittest.TestCase): def test_one(self): self.assertEquals( ['ARM:primary:1', 'ARM:primary:argent', 'ARM:primary:embowed', 'HORN AND ATTIRES:1:or'], list(parse(u"(Fieldless) An arm embowed argent sustaining a stag's attire Or.").describe())) self.assertEquals( ['CRAC:primary:1', 'CRAC:primary:or', 'CRAC:primary:plain cross', 'FDL:1:sable'], list(parse(u'(Fieldless) On a cross couped Or, a fleur-de-lys sable').describe())) self.assertEquals( ['AR', 'DOG:primary:1', 'DOG:primary:sable', 'DOG:primary:courant', 'ESCALLOP:3:gules'], list(parse(u'Argent, a greyhound courant sable between three escallops gules.').describe())) self.assertEquals( ['AR', 'SPINDLE:primary:1', 'SPINDLE:primary:purpure', '?TOOL-SEWING AND WEAVING:1:purpure:primary', 'CHIEF:1:purpure', 'POLE-AXE:1:argent'], list(parse(u'Argent, an empty bottom-whorl drop spindle and on a chief purpure a glaive argent.').describe())) self.assertEquals( ['AZ', 'CRAC:primary:1', 'CRAC:primary:or', 'CRAC:primary:moline', 'CHIEF:1:or', 'BIRD:3:azure:volant to dexter', '?BIRD9DEMI:3:azure:volant to dexter'], list(parse(u'Azure, a cross moline and on a chief Or three martlets volant azure').describe())) self.assertEquals( ['OR', 'FIELD TREATMENT-SEME (CRUSILLY):sable', '?CRAC:sable', 'CROSS:primary:1', 'CROSS:primary:gules', 'BIRD:1:or:eagle:displayed', '?BIRD9DEMI:1:or:eagle:displayed'], list(parse(u'Or, crusilly sable, on a cross throughout gules, an eagle displayed Or.\n\n').describe())) self.assertEquals( ['OR', 'ARROW:primary:2', 'ARROW:primary:vert', 'INSA:2:vert:primary', 'JEWELS AND JEWELRY:1:gules', '?CROWN:1:gules'], list(parse(u'Or, two arrows in saltire vert within a rosary gules.\n').describe())) self.assertEquals( ['PB:purpure:~and sable', 'HEADDOG:primary:1', 'HEADDOG:primary:argent', 'FLOWER-IRIS AND ORCHID:primary:1', 'FLOWER-IRIS AND ORCHID:primary:argent', 'FLOWER-IRIS AND ORCHID:primary:bendwise'], list(parse(u"Per bend purpure and sable, a wolf's head erased and an iris bendwise slipped and leaved argent").describe())) self.assertEquals( ['PCI:sable:~and purpure', 'CASTLE:primary:1', 'CASTLE:primary:or', '?BEACON AND BRAZIER:1:or:primary', 'INPALE:or', 'LW:primary:1', 'LW:primary:or'], list(parse(u'Per chevron inverted sable and purpure, in pale a lighthouse and a laurel wreath Or\n').describe())) self.assertEquals( ['PU', 'CASTLE:primary:1', 'CASTLE:primary:or', '?BEACON AND BRAZIER:1:or:primary'], list(parse(u'Purpure, a lighthouse Or').describe())) self.assertEquals( ['PU', 'HEADDOG:primary:1', 'HEADDOG:primary:argent', 'FLOWER-IRIS AND ORCHID:1:argent:maintained'], list(parse(u"Purpure, a wolf's head erased maintaining an iris slipped and leaved argent").describe())) self.assertEquals( ['MONSTER-SPHINX:primary:1', 'MONSTER-SPHINX:primary:or', 'MONSTER-SPHINX:primary:couchant'], list(parse(u'(Fieldless) An Egyptian sphinx couchant Or.').describe())) self.assertEquals( ['AZ', 'BEAST-BEAR:primary:1', 'BEAST-BEAR:primary:or', 'BEAST-BEAR:primary:rampant', 'KNOT AND ROPE:3:or'], list(parse(u'Azure, a bear rampant and in chief three quatrefoil knots Or.').describe())) self.assertEquals( ['AZ', 'DOG:primary:1', 'DOG:primary:argent', 'DOG:primary:sejant', 'CHIEF:1:argent', 'LETTERS,RUNES AND SYMBOLS:azure'], list(parse(u'Azure, a wolf sejant erect and on a chief argent the Elder Futhark runes laguz, dagaz, jera, ansuz, laguz, teiwaz, and jera azure.').describe())) self.assertEquals( ['FIELD DIV.-BARRY:or:~and azure', 'CHIEF:1:gules:indented', 'CAT:3:or'], list(parse(u'Barry Or and azure, on a chief indented gules three lions queue-forchy Or.').describe())) self.assertEquals( ['ER', 'PEN:primary:1', 'PEN:primary:purpure', 'PEN:primary:bendwise sinister'], list(parse(u'Ermine, a quill pen bendwise sinister purpure.\n').describe())) self.assertEquals( ['GU', 'FDL:primary:3', 'FDL:primary:or', 'ARRANGEMENT-IN BEND:or', 'BORDURE:1:or:embattled'], list(parse(u'Gules, in bend three fleurs-de-lis within a bordure embattled Or.\n').describe())) self.assertEquals( ['GYRONNY:argent:~and gules', 'CUP:primary:1', 'CUP:primary:purpure', 'BEAST-WEASEL AND OTTER:1:sable', 'BEAST9DEMI:1:sable'], list(parse(u'Gyronny argent and gules, issuant from a mug purpure a demi-weasel sable.\n\n').describe())) self.assertEquals( ['PB:azure:~and argent', 'ROSE:primary:1', 'ROSE:primary:argent', 'DOG:primary:1', 'DOG:primary:courant', 'DOG:primary:bendwise', 'CHIEF:1:argent', 'BIRD:3:sable', '?BIRD9DEMI:3:sable'], list(parse(u'Per bend azure and argent, a rose argent and a fox courant bendwise proper, on a chief argent three martlets sable').describe())) self.assertEquals( ['FIELD TREATMENT-VAIRY', 'PALL*7:primary:1', 'PALL*7:primary:gules'], list(parse(u'Vair, a pall inverted gules\n').describe())) self.assertEquals( ['VT', 'ARROW:primary:2', 'ARROW:primary:or', 'INSA:2:or:primary', 'CHIEF:1:or', 'LEAF:3:vert'], list(parse(u'Vert, two arrows in saltire and on a chief Or, three poplar leaves vert.\n\n').describe())) self.assertEquals( ['BIRD:primary:1', 'BIRD:primary:multicolor', 'BIRD:primary:volant to dexter', '?BIRD9DEMI:1:multicolor:primary:volant to dexter'], list(parse(u'(Fieldless) A swallow volant per fess azure and argent\n\n').describe())) self.assertEquals( ['AR', 'FESS:primary:2', 'FESS:primary:azure', 'FESS:primary:wavy', 'ROUNDEL:1:sable', 'CRESCENT:2:sable'], list(parse(u'Argent, two bars wavy azure and in chief a roundel between an increscent and a decrescent sable\n\n').describe())) self.assertEquals( ['HEART:primary:1', 'HEART:primary:gules', '?LEAF:1:gules:primary', 'WINGED OBJECT'], list(parse(u'(Fieldless) A heart gules winged argent\n\n').describe())) self.assertEquals( ['VT', 'BEAST-BULL AND BISON:primary:1', 'BEAST-BULL AND BISON:primary:argent', 'BEAST-BULL AND BISON:primary:courant', 'MONSTER9WINGED:argent:courant', 'BASE:1:or'], list(parse(u'Vert, a winged bull courant wings elevated and addorsed argent and a base Or').describe())) self.assertEquals( ['STAR:primary:1', 'STAR:primary:argent', 'STAR:primary:of 5', 'STAR:primary:estoile'], list(parse(u'(Fieldless) An estoile of five rays argent\n\n').describe())) self.assertEquals( [u'PB:sable:~and argent', 'BEND:primary:1', 'BEND:primary:azure', '?FIELD DIV.-BENDY:1:azure:primary', 'CRAMPET:3:argent', 'MONSTER-PHOENIX:2:multicolor'], list(parse(u'Per bend sable and argent, on a bend azure between two phoenixes counterchanged, three crampets argent').describe())) self.assertEquals( ['AR', 'BEAST-MOUSE AND RAT:primary:1', 'BEAST-MOUSE AND RAT:primary:passant', 'BORDURE:1:purpure'], list(parse(u'Argent, a brown mouse passant proper and a bordure purpure\n').describe())) self.assertEquals( [u'QLY:sable:~and multicolor', 'FIELD TREATMENT-VAIRY', 'CASTLE:primary:3', 'CASTLE:primary:or', 'CASTLE:primary:palewise', 'ARRANGEMENT-IN BEND:or'], list(parse(u'Quarterly sable and vairy sable, argent, gules and Or, in bend three towers palewise Or\n\n').describe())) self.assertEquals( ['OR', 'BEND:primary:1', 'BEND:primary:azure', 'BEND:primary:cotised', '?FIELD DIV.-BENDY:1:azure:primary:cotised', 'FDL:1:or:palewise', 'ROSE:2:or', 'CHIEF:1:gules', 'CAT:1:or:passant'], list(parse(u'Or, on a bend cotised azure a fleur-de-lys palewise between two cinquefoils Or and on a chief gules a lion passant guardant Or\n\n').describe())) self.assertEquals( ['AZ', 'PLANT-WHEAT:seme:or:seme on field', 'FIELD TREATMENT-SEME (9OTHER):or', 'PALE:primary:1', 'PALE:primary:or', '?FIELD DIV.-PALY:1:or:primary', 'SWORD:1:azure'], list(parse(u'Azure semy of ears of wheat, on a pale Or a sword azure').describe())) self.assertEquals( [u'PBS:sable:~and argent', 'STAR:primary:1', 'STAR:primary:or', 'STAR:primary:mullet', '?CALTRAP:1:or:primary', 'HAMMER:primary:2', '?HAMMER:2:primary', 'INSA:2:primary'], list(parse(u"Per bend sinister sable and argent, a spur rowel Or and two smith's hammers in saltire proper.").describe())) self.assertEquals( ['AR', 'MONSTER-PHOENIX:primary:1', 'MONSTER-PHOENIX:primary:gules', 'FIRE AND FLAME:1', 'BORDURE:1:gules', 'ROUNDEL:seme:or'], list(parse(u'Argent, a phoenix gules rising from flames proper and a bordure gules semy of bezants').describe())) self.assertEquals( ['VT', 'CAULDRON AND COOKING POT:primary:1', 'CAULDRON AND COOKING POT:primary:or', 'FIRE AND FLAME:1:or', 'CHIEF:1:or', 'FORK AND SPOON:2:vert', 'INSA:2:vert'], list(parse(u'Vert, a cauldron issuant from a flame and on a chief Or two ladles in saltire vert\n\n').describe())) self.assertEquals( [u'QLY:sable:~and or', 'BEND:primary:1', 'BEND:primary:argent', '?FIELD DIV.-BENDY:1:argent:primary', 'DOG:2:sable:rampant to sinister', 'PAW PRINT:5:sable'], list(parse(u'Quarterly sable and Or, on a bend argent between two wolves rampant contourney five paw prints sable.\n\n').describe())) self.assertEquals( [u'PFESS:azure:~and or', 'MONSTER-SEA LION:primary:1', 'MACE AND MORNINGSTAR:1:maintained'], list(parse(u'Per fess azure and Or, a sea lion maintaining a spiked mace counterchanged\n\n').describe())) self.assertEquals( [u'PC:vert:~and argent', 'FLOWER-TRUMPET SHAPE:primary:2', 'REPTILE-TURTLE:primary:1', 'REPTILE-TURTLE:primary:statant', 'LOZENGE:1:or', '?MASCLE AND RUSTRE:1:or', '?FIELD DIV.-VETU:1:or', '?FIELD DIV.-LOZENGY OR FUSILY:1:or', 'HARP:1:sable', 'LEG AND FOOT-MONSTER:1:vert:fesswise', '?CLAW:1:vert:fesswise'], list(parse(u"Per chevron vert and argent, two lilies and a turtle statant counterchanged, and for augmentation in fess point on a lozenge Or, a harp sable sustained by a dragon's jamb fesswise vert.\n\n").describe())) self.assertEquals( [u'GYRONNY:sable:~and vert', 'DRAGON:primary:1', 'DRAGON:primary:argent', 'DRAGON:primary:rampant', 'SWORD:1:or:maintained'], list(parse(u'Gyronny of eight, sable and vert a wyvern rampant argent langued gules, maintaining in dexter claw a sword erect Or.').describe())) self.assertEquals( [u'PPALE:azure:~and or', 'EYE:seme:argent:seme on field', 'FIELD TREATMENT-SEME (9OTHER):argent', 'HAND AND GAUNTLET:primary:1', 'HAND AND GAUNTLET:primary:multicolor'], list(parse(u'Per pale azure semy of eyes argent orbed sable and Or, a sinister hand counter-changed.\n').describe())) self.assertEquals( ['OR', 'STAR:primary:1', 'STAR:primary:sable', 'STAR:primary:of 9 or more', 'STAR:primary:mullet', '?CALTRAP:1:sable:primary:of 9 or more', 'BORDURE:1:sable'], list(parse(u'Or, a mullet of five greater and five lesser points within a bordure sable.\n\n').describe())) self.assertEquals( ['AR', 'HAMMER:primary:1', 'HAMMER:primary:sable', 'CRESCENT:2:sable', 'ARRANGEMENT-IN BEND:sable'], list(parse(u"Argent, a Thor's hammer between in bend two increscents sable.").describe())) self.assertEquals( [u'PBS:argent:~and gules', 'HUMAN FIGURE:primary:1', '?HUMAN FIGURE:1:primary', 'ROSE:primary:3', 'ROSE:primary:argent'], list(parse(u'Per bend sinister argent and gules, a brunette Caucasian maiden proper vested azure and three roses argent.').describe())) self.assertEquals( ['OR', 'TOOL9OTHER:primary:1', 'TOOL9OTHER:primary:sable', 'TOOL9OTHER:primary:bendwise sinister', '?ANVIL:1:sable:primary:bendwise sinister', 'FOODSTUFF:3:or', 'ARM:1:embowed', 'CHIEF:1:gules:rayonny'], list(parse(u"Or, a baker's peel bendwise sinister sable charged with three loaves of bread Or sustained by an arm embowed issuant from sinister proper vested sable, a chief rayonny gules.").describe())) self.assertEquals( ['TOOL9OTHER:primary:1', 'TOOL9OTHER:primary:sable', '?ANVIL:1:sable:primary', 'INPALE:sable', 'FOODSTUFF:3:argent', 'GARB:1:sable'], list(parse(u"(Fieldless) In pale a baker's peel sable charged with three loaves of bread argent issuant palewise from a garb sable.").describe())) self.assertEquals( ['GU', 'HEAD-BEAST,RAM AND GOAT:primary:2', 'HEAD-BEAST,RAM AND GOAT:primary:argent', 'HEAD-BEAST,RAM AND GOAT:primary:fesswise', 'ARRANGEMENT-IN FESS:argent', 'ARRANGEMENT9HEAD,RESPECTANT', 'STAR:2:or:mullet', '?CALTRAP:2:or', '?STAR:2:or', 'INPALE:or'], list(parse(u"Gules, in fess two lamb's heads fesswise respectant erased conjoined at the forehead argent between in pale two mullets Or.").describe())) self.assertEquals( [u'FIELD DIV.-VETU:ploye:argent:~and azure', 'REPTILE-SNAKE:primary:2', 'REPTILE-SNAKE:primary:sable', 'ARRANGEMENT9BEAST&MONSTER,RESPECTANT'], list(parse(u'Argent v\xeatu ploy\xe9 azure, two serpents erect respectant entwined sable.').describe())) self.assertEquals( [u'PPALE:gules:~and or', 'BELL:primary:2', 'BELL:primary:argent', 'INPALE:argent'], list(parse(u"Per pale gules and Or, in dexter in pale two hawk's bells argent.").describe())) self.assertEquals( [u'PC:azure:~and gules', 'CHEVRON:primary:1', 'CHEVRON:primary:or', 'HEAD-MONSTER,DRAGON:1:argent'], list(parse(u"Per chevron azure and gules, a chevron Or and overall a dragon's head cabossed argent.").describe())) self.assertEquals( [u'PBS:nebuly:or:~and azure', 'HARP:primary:2', 'HARP:primary:multicolor'], list(parse(u'Per bend sinister nebuly Or and azure, two harps counterchanged.').describe())) self.assertEquals( [u'PPALE:purpure:~and argent', 'CHEVRON:primary:1', 'PAW PRINT:4', 'CRAC:1:doubled'], list(parse(u'Per pale purpure and argent, on a chevron four pawprints and in base a Russian Orthodox cross, all counterchanged').describe())) self.assertEquals( [u'PPALE:argent:~and azure', 'DOG:primary:2', 'DOG:primary:multicolor', 'COMBAT'], list(parse(u'Per pale argent and azure, two wolves combattant counterchanged sable and argent.').describe())) self.assertEquals( [u'PC:or:~and vert', 'ROUNDEL:primary:2', 'ROUNDEL:primary:vert', 'TRISKELION:2:or', '?LEG AND FOOT-HUMAN:6:or', 'SEAWOLF:primary:1', 'SEAWOLF:primary:argent', 'SEAWOLF:primary:naiant to dexter', 'MONSTER9WINGED:argent:naiant to dexter'], list(parse(u'Per chevron Or and vert, two pommes each charged with a triskelion of armored legs Or and a winged sea-fox naiant argent.').describe())) self.assertEquals( [u'PPALE:argent:~and sable', 'ROUNDEL:primary:4', 'ROUNDEL:primary:multicolor', 'CHIEF:1:vert:engrailed', 'BELL:3:argent'], list(parse(u"Per pale argent and sable, four\n roundels counterchanged two and two and on a chief engrailed vert\n three hawk's bells argent.\n").describe())) self.assertEquals( [u'PFESS:azure:~and multicolor', 'FESS:primary:1', 'FESS:primary:argent', '?FIELD DIV.-BARRY:1:argent:primary', 'DICE:2:azure'], list(parse(u'Per fess azure and checky argent and\n azure, on a fess argent two dice azure marked argent.').describe())) self.assertEquals( [u'PB:multicolor:~and argent', 'BEND:primary:1', 'BEND:primary:vert', '?FIELD DIV.-BENDY:1:vert:primary', 'FEATHER AND QUILL:1:sable:bendwise', '?PEN:1:sable:bendwise', '?FIELD TREATMENT-PLUMMETTY:1:sable:bendwise'], list(parse(u'Per bend lozengy argent and sable and\n argent, a bend vert and in base a feather bendwise sable.').describe())) self.assertEquals( [u'FIELD DIV.-LOZENGY OR FUSILY:argent:~and gules', 'CRAC:primary:1', 'CRAC:primary:sable', 'CRAC:primary:other cross', 'CHIEF:1:sable', 'FIRE AND FLAME:2:argent'], list(parse(u'Lozengy argent and gules, a cross of Saint Brigid and on a chief sable two flames argent.').describe())) self.assertEquals( ['CRAC:primary:1', 'CRAC:primary:argent', 'CRAC:primary:other cross', 'ERMINE SPOT:4:sable', '?FIELD TREATMENT-SEME (ERMINED):4:sable'], list(parse(u'(Fieldless) A cross of Canterbury argent each arm charged with an ermine spot head to center sable.\n').describe())) self.assertEquals( ['SA', 'ROUNDEL:seme:argent:seme on field', 'ARRANGEMENT-IN ORLE:argent', 'FIELD TREATMENT-SEME (9OTHER):argent', 'HEADDOG:primary:1', 'HEADDOG:primary:argent'], list(parse(u"Sable, a wolf's head erased contourny within an orle of roundels argent.").describe())) self.assertEquals( ['FISH8OTHER:primary:1', 'FISH8OTHER:primary:argent', 'FISH8OTHER:primary:naiant to dexter', 'FISH8OTHER:primary:embowed', 'MONSTER9WINGED:argent:naiant to dexter', 'ARRANGEMENT-IN ANNULO:1:argent:primary:naiant to dexter:embowed', 'HORN AND ATTIRES:1:argent:maintained'], list(parse(u"(Fieldless) A bat-winged fish attired of a stag's antlers naiant embowed in annulo argent.").describe())) self.assertEquals( ['PPALE:fur:~and fur', 'FIELD TREATMENT-SEME (ERMINED):multicolor', 'BIRD:primary:2', 'BIRD:primary:multicolor', 'BIRD:primary:owl', '?BIRD9DEMI:2:multicolor:owl:primary', 'ARRANGEMENT9BEAST&MONSTER,RESPECTANT'], list(parse(u'Per pale azure ermined argent and argent ermined azure, two owls respectant counterchanged argent and azure.').describe())) self.assertEquals( ['AR', u'CROSS:primary:1', u'CROSS:primary:azure', u'CROSS:primary:complex line', 'KNOT AND ROPE:1:or'], list(parse(u'Argent, on a cross nowy azure a trefoil knot Or.').describe())) self.assertEquals( ['CRAC:primary:1', 'CRAC:primary:purpure', 'ROUNDEL:primary:5', 'ROUNDEL:primary:purpure'], list(parse(u'(Fieldless) A cross of five golpes.').describe())) self.assertEquals( ['GU', 'ARROW:primary:1', 'ARROW:primary:argent', 'HEAD-BIRD:2:argent', 'ARRANGEMENT9HEAD,RESPECTANT'], list(parse(u"Gules, an arrow inverted between two bird's heads couped respectant argent.").describe())) self.assertEquals( ['QLY:azure:~and gules', 'BIRD:primary:1', 'BIRD:primary:argent', 'BIRD:primary:owl', '?BIRD9DEMI:1:argent:owl:primary', 'WREATH,OTHER:1:argent', '?FLOWER-MULTI-PETALED:seme:argent'], list(parse(u'Quarterly azure and gules, an owl within a wreath of daisies argent.').describe())) self.assertEquals( ['SA', 'ROUNDEL:primary:1', 'ROUNDEL:primary:argent', 'CRAC:1:azure:formy', 'BORDURE:1:multicolor'], list(parse(u'Sable, on a plate a Latin cross formy azure, a bordure parted bordurewise indented azure and argent.').describe())) self.assertEquals( ['AR', 'AMPHIBIAN-FROG:primary:1', 'AMPHIBIAN-FROG:primary:vert', 'AMPHIBIAN-FROG:primary:affronte', 'HEART:1:gules', '?LEAF:1:gules', 'ARRANGEMENT-IN ANNULO:1:sable', 'LETTERS,RUNES AND SYMBOLS:sable'], list(parse(u'Argent, a toad sejant affronty vert, spotted and crowned Or, charged with a heart gules fimbriated Or within in annulo the inscription "Before you meet the handsome prince you have to kiss a lot of toads" sable.').describe())) self.assertEquals( ['BEAST-BOAR:primary:1', 'BEAST-BOAR:primary:or', 'BEAST-BOAR:primary:passant', 'JEWELS AND JEWELRY:1:gules'], list(parse(u'(Fieldless) A boar passant Or charged on the shoulder with a hexagonal gemstone gules.').describe())) self.assertEquals( ['PC:argent:~and sable', 'TREE9BRANCH:primary:4', 'TREE9BRANCH:primary:vert', 'INSA:4:vert:primary', 'SWORD:primary:1', 'SWORD:primary:argent'], list(parse(u'Per chevron throughout argent and sable, between two pairs of branches in saltire vert, a sword inverted argent.').describe())) self.assertEquals( ['PC:azure:~and argent', 'FDL:primary:2', 'INSECT-BUTTERFLY AND MOTH:primary:1', 'BORDURE:1:multicolor', 'GOUTE:seme:multicolor'], list(parse(u'Per chevron azure and argent, two fleurs-de-lys and a butterfly counterchanged, a bordure goutty all counterchanged argent and gules.').describe())) self.assertEquals( ['PB:or:~and sable', 'GOUTE:primary:1', 'GOUTE:primary:multicolor'], list(parse(u'Per bend Or and sable, a goutte counterchanged.').describe())) self.assertEquals( ['GU', 'CALIPER AND COMPASS:primary:1', 'CALIPER AND COMPASS:primary:argent', '?TOOL9OTHER:1:argent:primary', 'CRESCENT:3:argent', 'STAR:3:sable:of 6:mullet', '?CALTRAP:3:sable:of 6', '?STAR:3:sable:of 6'], list(parse(u'Gules, a pair of calipers and in chief three crescents argent each crescent charged with a mullet of six points sable.').describe())) self.assertEquals( ['FIELD DIV.-PER PALL', 'TRISKELION:primary:3', 'TRISKELION:primary:multicolor'], list(parse(u'Per pall vert, Or, and argent, three triskeles argent, purpure, and azure.\n').describe())) self.assertEquals( ['OR', 'BIRD:primary:1', 'BIRD:primary:sable', '?BIRD9DEMI:1:sable:primary', 'BASE:1:vert:enarched', 'BORDURE:1:sable:nebuly'], list(parse(u'Or, a hen sable and a mount vert, a bordure nebuly sable.').describe())) self.assertEquals( ['GATE AND DOOR:primary:1', 'GATE AND DOOR:primary:multicolor', 'HEAD-BEAST,CAT AND LION:1:multicolor'], list(parse(u"(Fieldless) On a chainless portcullis per pale argent and sable a lion's head cabossed counterchanged.").describe())) self.assertEquals( ['SA', 'EYE:primary:1', 'EYE:primary:or', 'ARROW:1:or:bendwise sinister', 'CHIEF:1:or', 'ANNULET:6:sable', '?TORSE:6:sable', 'ROUNDEL:3:sable'], list(parse(u'Sable, an eye Or, irised sable, transfixed by an arrow bendwise sinister and on a chief Or within each of three sets of two concentric annulets, a roundel sable.').describe())) self.assertEquals( ['PPALE:gules:~and sable', 'BIRD:primary:1', 'BIRD:primary:argent', 'BIRD:primary:raven', 'BIRD:primary:displayed', '?BIRD9DEMI:1:argent:raven:primary:displayed', 'ROUNDEL:2:argent', 'INPALE:argent', 'TRISKELION:2'], list(parse(u'Per pale gules and sable, a raven displayed argent between in pale two plates charged with triskelions of spirals.').describe())) self.assertEquals( ['AZ', 'REPTILE-SNAKE:primary:3', 'REPTILE-SNAKE:primary:or', 'KNOT AND ROPE:primary:1', 'KNOT AND ROPE:primary:or'], list(parse(u'Azure, three snakes nowed in a trefoil knot Or.').describe())) self.assertEquals( ['PBS:purpure:~and vert', 'BS:primary:1', 'BS:primary:or', '?FIELD DIV.-BENDY*3:1:or:primary', 'HEAD-MONSTER,UNICORN:1:argent', 'STAR:3:or:mullet', '?CALTRAP:3:or', '?STAR:3:or'], list(parse(u"Per bend sinister purpure and vert, a bend sinister Or between a unicorn's head erased argent and 3 mullets Or").describe())) self.assertEquals( ['VT', 'FESS:primary:1', 'FESS:primary:multicolor', '?FIELD DIV.-BARRY:1:multicolor:primary', 'FLEAM:1', 'CHESS PIECE:1'], list(parse(u'Vert, on a fess per pale azure and Or, a fleam and a chessrook counterchanged.').describe())) self.assertEquals( ['VT', 'BEAST-DEER AND STAG:primary:1', 'BEAST-DEER AND STAG:primary:or', 'BEAST-DEER AND STAG:primary:passant to sinister', 'BEAST9DEMI:1:or:passant to sinister'], list(parse(u'Vert, a demi-stag passant to sinister reguardant Or.\n').describe())) self.assertEquals( ['AZ', 'FLOWER-CUP SHAPE:primary:1', 'FLOWER-CUP SHAPE:primary:argent', 'STAR:5:argent', 'ARRANGEMENT-IN CHEVRON:5:argent', 'ORLE AND TRESSURE:1:argent'], list(parse(u'Azure, a lotus blossom in profile beneath five compass stars in chevron, an orle argent.').describe())) self.assertEquals( ['AR', 'FESS:primary:1', 'FESS:primary:sable', '?FIELD DIV.-BARRY:1:sable:primary', 'CRESCENT:3:gules', 'MONSTER-ENFIELD:1:gules'], list(parse(u'Argent, surmounting a fess sable between three crescents an enfield gules.').describe())) self.assertEquals( ['VT', 'TRISKELION:primary:1', 'TRISKELION:primary:or', 'HEADDOG:primary:3', 'HEADDOG:primary:or'], list(parse(u"Vert, a triskelion of wolves' heads Or.\n").describe())) self.assertEquals( ['AZ', 'LETTERS,RUNES AND SYMBOLS:seme:argent:seme on field', 'FIELD TREATMENT-SEME (9OTHER):argent', 'BEAST-RABBIT:primary:1', 'BEAST-RABBIT:primary:argent', 'BEAST-RABBIT:primary:rampant', 'HOURGLASS:1:argent:maintained'], list(parse(u'Azure semy of Greek letters pi, a rabbit rampant maintaining an hourglass argent.\n').describe())) self.assertEquals( ['FIELD DIV.-PER PALL', 'BEAST-HORSE:seme:or:seme on field:passant', 'FIELD TREATMENT-SEME (9OTHER):or'], list(parse(u'Per pall gules, azure and vert, semy of horses passant Or').describe())) self.assertEquals( ['SA', 'FIRE AND FLAME:primary:1', '?FIRE AND FLAME:1:primary', 'REPTILE-SNAKE:1:argent', 'ARRANGEMENT-IN ANNULO:1:argent', 'STAR:1:argent:of 8:mullet', '?CALTRAP:1:argent:of 8', '?STAR:1:argent:of 8', 'BORDURE:1:argent'], list(parse(u'Sable, on a flame proper a serpent in annulo with head to base argent surrounding a mullet of eight points pierced argent within a bordure argent').describe())) self.assertEquals( ['GU', 'HEAD-BEAST,RABBIT:primary:1', 'HEAD-BEAST,RABBIT:primary:argent', 'HEAD-JESSANT-DE-LYS:primary:or', '?HEAD-JESSANT-DE-LYS:primary'], list(parse(u"Gules, a rabbit's head argent jessant-de-lys Or.\n\n").describe())) self.assertEquals( ['OR', 'LW:primary:1', 'LW:primary:vert'], list(parse(u'Or, a Laurel wreath vert.\n').describe())) self.assertEquals( ['AR', 'FESS:primary:1', 'FESS:primary:gules', '?FIELD DIV.-BARRY:1:gules:primary', 'BEAST-HORSE:3:gules:rampant'], list(parse(u'Argent, a fess and three stallions rampant gules.').describe())) self.assertEquals( ['AR', 'CAT:seme:vert:seme on field', 'FIELD TREATMENT-SEME (9OTHER):vert'], list(parse(u'Argent, seme of lions vert.\n').describe())) self.assertEquals( ['FIELD DIV.-CHECKY:or:~and sable', 'FIELD DIV.-BENDY:or:~and sable'], list(parse(u'Checky and bendy Or and sable.').describe())) self.assertEquals( ['FIELD DIV.-CHECKY:or:~and sable', 'FIELD DIV.-BENDY:or:~and sable'], list(parse(u'Checky of nine and bendy Or and sable.').describe())) self.assertEquals( ['FIELD DIV.-CHECKY:multicolor:~and vert'], list(parse(u'Checky bendy Or and sable and vert.').describe())) self.assertEquals( ['OR', 'WELL:primary:1', 'WELL:primary:vert', '?ARCHITECTURE:1:vert:primary'], list(parse(u'Or, a well vert.').describe())) self.assertEquals( ['OR', 'STAR:primary:1', 'STAR:primary:vert', 'STAR:primary:sun'], list(parse(u'Or, a sun vert.').describe())) self.assertEquals( ['OR', 'STAR:primary:1', 'STAR:primary:vert', 'STAR:primary:estoile'], list(parse(u'Or, an estoile vert.').describe())) self.assertEquals( ['OR', 'STAR:primary:1', 'STAR:primary:vert', 'STAR:primary:mullet', '?CALTRAP:1:vert:primary'], list(parse(u'Or, a mullet vert.').describe())) self.assertEquals( ['FIELD TREATMENT-POTENTY:argent:~and sable', 'PALL:primary:1', 'PALL:primary:gules'], list(parse(u'Potenty argent and sable, a pall gules').describe())) self.assertEquals( ['VT', 'CHEVRON*7:primary:1', 'CHEVRON*7:primary:azure', 'SPINDLE:2:argent', '?TOOL-SEWING AND WEAVING:2:argent', 'ROUNDEL-DEMI:3:argent'], list(parse(u'Vert, on a chevron inverted azure fimbriated Or two drop spindles and in base three demi-roundel two and one argent').describe())) if __name__ == "__main__": unittest.main()
# -*- coding: utf-8 -*- """ sphinx.ext.autosummary ~~~~~~~~~~~~~~~~~~~~~~ Sphinx extension that adds an autosummary:: directive, which can be used to generate function/method/attribute/etc. summary lists, similar to those output eg. by Epydoc and other API doc generation tools. An :autolink: role is also provided. autosummary directive --------------------- The autosummary directive has the form:: .. autosummary:: :nosignatures: :toctree: generated/ module.function_1 module.function_2 ... and it generates an output table (containing signatures, optionally) ======================== ============================================= module.function_1(args) Summary line from the docstring of function_1 module.function_2(args) Summary line from the docstring ... ======================== ============================================= If the :toctree: option is specified, files matching the function names are inserted to the toctree with the given prefix: generated/module.function_1 generated/module.function_2 ... Note: The file names contain the module:: or currentmodule:: prefixes. .. seealso:: autosummary_generate.py autolink role ------------- The autolink role functions as ``:obj:`` when the name referred can be resolved to a Python object, and otherwise it becomes simple emphasis. This can be used as the default role to make links 'smart'. :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import os import re import sys import inspect import posixpath from docutils.parsers.rst import directives from docutils.statemachine import ViewList from docutils import nodes from sphinx import addnodes from sphinx.util.compat import Directive # -- autosummary_toc node ------------------------------------------------------ class autosummary_toc(nodes.comment): pass def process_autosummary_toc(app, doctree): """Insert items described in autosummary:: to the TOC tree, but do not generate the toctree:: list. """ env = app.builder.env crawled = {} def crawl_toc(node, depth=1): crawled[node] = True for j, subnode in enumerate(node): try: if (isinstance(subnode, autosummary_toc) and isinstance(subnode[0], addnodes.toctree)): env.note_toctree(env.docname, subnode[0]) continue except IndexError: continue if not isinstance(subnode, nodes.section): continue if subnode not in crawled: crawl_toc(subnode, depth+1) crawl_toc(doctree) def autosummary_toc_visit_html(self, node): """Hide autosummary toctree list in HTML output.""" raise nodes.SkipNode def autosummary_noop(self, node): pass # -- autosummary_table node ---------------------------------------------------- class autosummary_table(nodes.comment): pass def autosummary_table_visit_html(self, node): """Make the first column of the table non-breaking.""" try: tbody = node[0][0][-1] for row in tbody: col1_entry = row[0] par = col1_entry[0] for j, subnode in enumerate(list(par)): if isinstance(subnode, nodes.Text): new_text = unicode(subnode.astext()) new_text = new_text.replace(u" ", u"\u00a0") par[j] = nodes.Text(new_text) except IndexError: pass # -- autodoc integration ------------------------------------------------------- class FakeDirective: env = {} genopt = {} def get_documenter(obj, parent): """Get an autodoc.Documenter class suitable for documenting the given object. *obj* is the Python object to be documented, and *parent* is an another Python object (e.g. a module or a class) to which *obj* belongs to. """ from sphinx.ext.autodoc import AutoDirective, DataDocumenter, \ ModuleDocumenter if inspect.ismodule(obj): # ModuleDocumenter.can_document_member always returns False return ModuleDocumenter # Construct a fake documenter for *parent* if parent is not None: parent_doc_cls = get_documenter(parent, None) else: parent_doc_cls = ModuleDocumenter if hasattr(parent, '__name__'): parent_doc = parent_doc_cls(FakeDirective(), parent.__name__) else: parent_doc = parent_doc_cls(FakeDirective(), "") # Get the corrent documenter class for *obj* classes = [cls for cls in AutoDirective._registry.values() if cls.can_document_member(obj, '', False, parent_doc)] if classes: classes.sort(key=lambda cls: cls.priority) return classes[-1] else: return DataDocumenter # -- .. autosummary:: ---------------------------------------------------------- class Autosummary(Directive): """ Pretty table containing short signatures and summaries of functions etc. autosummary can also optionally generate a hidden toctree:: node. """ required_arguments = 0 optional_arguments = 0 final_argument_whitespace = False has_content = True option_spec = { 'toctree': directives.unchanged, 'nosignatures': directives.flag, 'template': directives.unchanged, } def warn(self, msg): self.warnings.append(self.state.document.reporter.warning( msg, line=self.lineno)) def run(self): self.env = env = self.state.document.settings.env self.genopt = {} self.warnings = [] names = [x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])] items = self.get_items(names) nodes = self.get_table(items) if 'toctree' in self.options: suffix = env.config.source_suffix dirname = posixpath.dirname(env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) if docname.endswith(suffix): docname = docname[:-len(suffix)] docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in env.found_docs: self.warn('toctree references unknown document %r' % docname) docnames.append(docname) tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docname) for docname in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None tocnode = autosummary_toc('', '', tocnode) nodes.append(tocnode) return self.warnings + nodes def get_items(self, names): """Try to import the given names, and return a list of ``[(name, signature, summary_string, real_name), ...]``. """ env = self.state.document.settings.env prefixes = get_import_prefixes_from_env(env) items = [] max_item_chars = 50 for name in names: display_name = name if name.startswith('~'): name = name[1:] display_name = name.split('.')[-1] try: real_name, obj, parent = import_by_name(name, prefixes=prefixes) except ImportError: self.warn('failed to import %s' % name) items.append((name, '', '', name)) continue # NB. using real_name here is important, since Documenters # handle module prefixes slightly differently documenter = get_documenter(obj, parent)(self, real_name) if not documenter.parse_name(): self.warn('failed to parse name %s' % real_name) items.append((display_name, '', '', real_name)) continue if not documenter.import_object(): self.warn('failed to import object %s' % real_name) items.append((display_name, '', '', real_name)) continue # -- Grab the signature sig = documenter.format_signature() if not sig: sig = '' else: max_chars = max(10, max_item_chars - len(display_name)) sig = mangle_signature(sig, max_chars=max_chars) sig = sig.replace('*', r'\*') # -- Grab the summary doc = list(documenter.process_doc(documenter.get_doc())) while doc and not doc[0].strip(): doc.pop(0) m = re.search(r"^([A-Z][^A-Z]*?\.\s)", " ".join(doc).strip()) if m: summary = m.group(1).strip() elif doc: summary = doc[0].strip() else: summary = '' items.append((display_name, sig, summary, real_name)) return items def get_table(self, items): """Generate a proper list of table nodes for autosummary:: directive. *items* is a list produced by :meth:`get_items`. """ table_spec = addnodes.tabular_col_spec() table_spec['spec'] = 'll' table = autosummary_table('') real_table = nodes.table('', classes=['longtable']) table.append(real_table) group = nodes.tgroup('', cols=2) real_table.append(group) group.append(nodes.colspec('', colwidth=10)) group.append(nodes.colspec('', colwidth=90)) body = nodes.tbody('') group.append(body) def append_row(*column_texts): row = nodes.row('') for text in column_texts: node = nodes.paragraph('') vl = ViewList() vl.append(text, '<autosummary>') self.state.nested_parse(vl, 0, node) try: if isinstance(node[0], nodes.paragraph): node = node[0] except IndexError: pass row.append(nodes.entry('', node)) body.append(row) for name, sig, summary, real_name in items: qualifier = 'obj' if 'nosignatures' not in self.options: col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, real_name, sig) else: col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name) col2 = summary append_row(col1, col2) return [table_spec, table] def mangle_signature(sig, max_chars=30): """Reformat a function signature to a more compact form.""" s = re.sub(r"^\((.*)\)$", r"\1", sig).strip() # Strip strings (which can contain things that confuse the code below) s = re.sub(r"\\\\", "", s) s = re.sub(r"\\'", "", s) s = re.sub(r"'[^']*'", "", s) # Parse the signature to arguments + options args = [] opts = [] opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=") while s: m = opt_re.search(s) if not m: # The rest are arguments args = s.split(', ') break opts.insert(0, m.group(2)) s = m.group(1)[:-2] # Produce a more compact signature sig = limited_join(", ", args, max_chars=max_chars-2) if opts: if not sig: sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars-4) elif len(sig) < max_chars - 4 - 2 - 3: sig += "[, %s]" % limited_join(", ", opts, max_chars=max_chars-len(sig)-4-2) return u"(%s)" % sig def limited_join(sep, items, max_chars=30, overflow_marker="..."): """Join a number of strings to one, limiting the length to *max_chars*. If the string overflows this limit, replace the last fitting item by *overflow_marker*. Returns: joined_string """ full_str = sep.join(items) if len(full_str) < max_chars: return full_str n_chars = 0 n_items = 0 for j, item in enumerate(items): n_chars += len(item) + len(sep) if n_chars < max_chars - len(overflow_marker): n_items += 1 else: break return sep.join(list(items[:n_items]) + [overflow_marker]) # -- Importing items ----------------------------------------------------------- def get_import_prefixes_from_env(env): """ Obtain current Python import prefixes (for `import_by_name`) from ``document.env`` """ prefixes = [None] currmodule = env.temp_data.get('py:module') if currmodule: prefixes.insert(0, currmodule) currclass = env.temp_data.get('py:class') if currclass: if currmodule: prefixes.insert(0, currmodule + "." + currclass) else: prefixes.insert(0, currclass) return prefixes def import_by_name(name, prefixes=[None]): """Import a Python object that has the given *name*, under one of the *prefixes*. The first name that succeeds is used. """ tried = [] for prefix in prefixes: try: if prefix: prefixed_name = '.'.join([prefix, name]) else: prefixed_name = name obj, parent = _import_by_name(prefixed_name) return prefixed_name, obj, parent except ImportError: tried.append(prefixed_name) raise ImportError('no module named %s' % ' or '.join(tried)) def _import_by_name(name): """Import a Python object given its full name.""" try: name_parts = name.split('.') # try first interpret `name` as MODNAME.OBJ modname = '.'.join(name_parts[:-1]) if modname: try: __import__(modname) mod = sys.modules[modname] return getattr(mod, name_parts[-1]), mod except (ImportError, IndexError, AttributeError): pass # ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ... last_j = 0 modname = None for j in reversed(range(1, len(name_parts)+1)): last_j = j modname = '.'.join(name_parts[:j]) try: __import__(modname) except:# ImportError: continue if modname in sys.modules: break if last_j < len(name_parts): parent = None obj = sys.modules[modname] for obj_name in name_parts[last_j:]: parent = obj obj = getattr(obj, obj_name) return obj, parent else: return sys.modules[modname], None except (ValueError, ImportError, AttributeError, KeyError), e: raise ImportError(*e.args) # -- :autolink: (smart default role) ------------------------------------------- def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]): """Smart linking role. Expands to ':obj:`text`' if `text` is an object that can be imported; otherwise expands to '*text*'. """ env = inliner.document.settings.env r = env.get_domain('py').role('obj')( 'obj', rawtext, etext, lineno, inliner, options, content) pnode = r[0][0] prefixes = get_import_prefixes_from_env(env) try: name, obj, parent = import_by_name(pnode['reftarget'], prefixes) except ImportError: content = pnode[0] r[0][0] = nodes.emphasis(rawtext, content[0].astext(), classes=content['classes']) return r def process_generate_options(app): genfiles = app.config.autosummary_generate ext = app.config.source_suffix if genfiles and not hasattr(genfiles, '__len__'): env = app.builder.env genfiles = [x + ext for x in env.found_docs if os.path.isfile(env.doc2path(x))] if not genfiles: return from generate import generate_autosummary_docs genfiles = [genfile + (not genfile.endswith(ext) and ext or '') for genfile in genfiles] generate_autosummary_docs(genfiles, builder=app.builder, warn=app.warn, info=app.info, suffix=ext, base_path=app.srcdir) def setup(app): # I need autodoc app.setup_extension('sphinx.ext.autodoc') app.add_node(autosummary_toc, html=(autosummary_toc_visit_html, autosummary_noop), latex=(autosummary_noop, autosummary_noop), text=(autosummary_noop, autosummary_noop), man=(autosummary_noop, autosummary_noop), texinfo=(autosummary_noop, autosummary_noop)) app.add_node(autosummary_table, html=(autosummary_table_visit_html, autosummary_noop), latex=(autosummary_noop, autosummary_noop), text=(autosummary_noop, autosummary_noop), man=(autosummary_noop, autosummary_noop), texinfo=(autosummary_noop, autosummary_noop)) app.add_directive('autosummary', Autosummary) app.add_role('autolink', autolink_role) app.connect('doctree-read', process_autosummary_toc) app.connect('builder-inited', process_generate_options) app.add_config_value('autosummary_generate', [], True)
# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from six.moves.urllib import parse as urlparse from keystoneclient import exceptions from keystoneclient import httpclient from keystoneclient.i18n import _, _LE _logger = logging.getLogger(__name__) class Client(httpclient.HTTPClient): """Client for the OpenStack Keystone pre-version calls API. :param string endpoint: A user-supplied endpoint URL for the keystone service. :param integer timeout: Allows customization of the timeout for client http requests. (optional) Example:: >>> from keystoneclient.generic import client >>> root = client.Client(auth_url=KEYSTONE_URL) >>> versions = root.discover() ... >>> from keystoneclient.v2_0 import client as v2client >>> keystone = v2client.Client(auth_url=versions['v2.0']['url']) ... >>> user = keystone.users.get(USER_ID) >>> user.delete() """ def __init__(self, endpoint=None, **kwargs): """Initialize a new client for the Keystone v2.0 API.""" super(Client, self).__init__(endpoint=endpoint, **kwargs) self.endpoint = endpoint def discover(self, url=None): """Discover Keystone servers and return API versions supported. :param url: optional url to test (without version) Returns:: { 'message': 'Keystone found at http://127.0.0.1:5000/', 'v2.0': { 'status': 'beta', 'url': 'http://127.0.0.1:5000/v2.0/', 'id': 'v2.0' }, } """ if url: return self._check_keystone_versions(url) else: return self._local_keystone_exists() def _local_keystone_exists(self): """Checks if Keystone is available on default local port 35357.""" results = self._check_keystone_versions("http://localhost:35357") if results is None: results = self._check_keystone_versions("https://localhost:35357") return results def _check_keystone_versions(self, url): """Calls Keystone URL and detects the available API versions.""" try: resp, body = self._request(url, "GET", headers={'Accept': 'application/json'}) # Multiple Choices status code is returned by the root # identity endpoint, with references to one or more # Identity API versions -- v3 spec # some cases we get No Content if resp.status_code in (200, 204, 300): try: results = {} if 'version' in body: results['message'] = _("Keystone found at %s") % url version = body['version'] # Stable/diablo incorrect format id, status, version_url = ( self._get_version_info(version, url)) results[str(id)] = {"id": id, "status": status, "url": version_url} return results elif 'versions' in body: # Correct format results['message'] = _("Keystone found at %s") % url for version in body['versions']['values']: id, status, version_url = ( self._get_version_info(version, url)) results[str(id)] = {"id": id, "status": status, "url": version_url} return results else: results['message'] = ( _("Unrecognized response from %s") % url) return results except KeyError: raise exceptions.AuthorizationFailure() elif resp.status_code == 305: return self._check_keystone_versions(resp['location']) else: raise exceptions.from_response(resp, "GET", url) except Exception: _logger.exception(_LE('Failed to detect available versions.')) def discover_extensions(self, url=None): """Discover Keystone extensions supported. :param url: optional url to test (should have a version in it) Returns:: { 'message': 'Keystone extensions at http://127.0.0.1:35357/v2', 'OS-KSEC2': 'OpenStack EC2 Credentials Extension', } """ if url: return self._check_keystone_extensions(url) def _check_keystone_extensions(self, url): """Calls Keystone URL and detects the available extensions.""" try: if not url.endswith("/"): url += '/' resp, body = self._request("%sextensions" % url, "GET", headers={'Accept': 'application/json'}) if resp.status_code in (200, 204): # some cases we get No Content if 'extensions' in body and 'values' in body['extensions']: # Parse correct format (per contract) extensions = body['extensions']['values'] elif 'extensions' in body: # Support incorrect, but prevalent format extensions = body['extensions'] else: return dict(message=( _('Unrecognized extensions response from %s') % url)) return dict(self._get_extension_info(e) for e in extensions) elif resp.status_code == 305: return self._check_keystone_extensions(resp['location']) else: raise exceptions.from_response( resp, "GET", "%sextensions" % url) except Exception: _logger.exception(_LE('Failed to check keystone extensions.')) @staticmethod def _get_version_info(version, root_url): """Parses version information. :param version: a dict of a Keystone version response :param root_url: string url used to construct the version if no URL is provided. :returns: tuple - (verionId, versionStatus, versionUrl) """ id = version['id'] status = version['status'] ref = urlparse.urljoin(root_url, id) if 'links' in version: for link in version['links']: if link['rel'] == 'self': ref = link['href'] break return (id, status, ref) @staticmethod def _get_extension_info(extension): """Parses extension information. :param extension: a dict of a Keystone extension response :returns: tuple - (alias, name) """ alias = extension['alias'] name = extension['name'] return (alias, name)
from __future__ import absolute_import from __future__ import unicode_literals # load python 3, fallback to python 2 if it fails try: from urllib.parse import unquote, unquote_plus, quote_plus except ImportError: from urllib import unquote, unquote_plus, quote_plus # type: ignore from datetime import datetime, timedelta from itertools import tee from distutils.util import strtobool import sys from flask import ( render_template, abort, url_for, Response, stream_with_context, request, session, jsonify ) import logging from pypuppetdb.QueryBuilder import (ExtractOperator, AndOperator, EqualsOperator, FunctionOperator, NullOperator, OrOperator, LessEqualOperator, RegexOperator) from puppetboard.forms import ENABLED_QUERY_ENDPOINTS, QueryForm from puppetboard.utils import (get_or_abort, yield_or_stop, get_db_version, is_bool) from puppetboard.dailychart import get_daily_reports_chart try: import CommonMark as commonmark except ImportError: import commonmark from puppetboard.core import get_app, get_puppetdb, environments from . import __version__ REPORTS_COLUMNS = [ {'attr': 'end', 'filter': 'end_time', 'name': 'End time', 'type': 'datetime'}, {'attr': 'status', 'name': 'Status', 'type': 'status'}, {'attr': 'certname', 'name': 'Certname', 'type': 'node'}, {'attr': 'version', 'filter': 'configuration_version', 'name': 'Configuration version'}, {'attr': 'agent_version', 'filter': 'puppet_version', 'name': 'Agent version'}, ] CATALOGS_COLUMNS = [ {'attr': 'certname', 'name': 'Certname', 'type': 'node'}, {'attr': 'catalog_timestamp', 'name': 'Compile Time'}, {'attr': 'form', 'name': 'Compare'}, ] app = get_app() graph_facts = app.config['GRAPH_FACTS'] numeric_level = getattr(logging, app.config['LOGLEVEL'].upper(), None) logging.basicConfig(level=numeric_level) log = logging.getLogger(__name__) puppetdb = get_puppetdb() @app.template_global() def version(): return __version__ def stream_template(template_name, **context): app.update_template_context(context) t = app.jinja_env.get_template(template_name) rv = t.stream(context) rv.enable_buffering(5) return rv def check_env(env, envs): if env != '*' and env not in envs: abort(404) def metric_params(db_version): query_type = '' # Puppet Server is enforcing new metrics API (v2) # starting with versions 6.9.1, 5.3.12, and 5.2.13 if (db_version > (6, 9, 0) or (db_version > (5, 3, 11) and db_version < (6, 0, 0)) or (db_version > (5, 2, 12) and db_version < (5, 3, 10))): metric_version = 'v2' else: metric_version = 'v1' # Puppet DB version changed the query format from 3.2.0 # to 4.0 when querying mbeans if db_version < (4, 0, 0): query_type = 'type=default,' return query_type, metric_version @app.context_processor def utility_processor(): def now(format='%m/%d/%Y %H:%M:%S'): """returns the formated datetime""" return datetime.now().strftime(format) return dict(now=now) @app.route('/', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/') def index(env): """This view generates the index page and displays a set of metrics and latest reports on nodes fetched from PuppetDB. :param env: Search for nodes in this (Catalog and Fact) environment :type env: :obj:`string` """ envs = environments() metrics = { 'num_nodes': 0, 'num_resources': 0, 'avg_resources_node': 0} check_env(env, envs) if env == '*': query = app.config['OVERVIEW_FILTER'] prefix = 'puppetlabs.puppetdb.population' db_version = get_db_version(puppetdb) query_type, metric_version = metric_params(db_version) num_nodes = get_or_abort( puppetdb.metric, "{0}{1}".format(prefix, ':%sname=num-nodes' % query_type), version=metric_version) num_resources = get_or_abort( puppetdb.metric, "{0}{1}".format(prefix, ':%sname=num-resources' % query_type), version=metric_version) avg_resources_node = get_or_abort( puppetdb.metric, "{0}{1}".format(prefix, ':%sname=avg-resources-per-node' % query_type), version=metric_version) metrics['num_nodes'] = num_nodes['Value'] metrics['num_resources'] = num_resources['Value'] try: # Compute our own average because avg_resources_node['Value'] # returns a string of the format "num_resources/num_nodes" # example: "1234/9" instead of doing the division itself. metrics['avg_resources_node'] = "{0:10.0f}".format( (num_resources['Value'] / num_nodes['Value'])) except ZeroDivisionError: metrics['avg_resources_node'] = 0 else: query = AndOperator() query.add(EqualsOperator('catalog_environment', env)) num_nodes_query = ExtractOperator() num_nodes_query.add_field(FunctionOperator('count')) num_nodes_query.add_query(query) if app.config['OVERVIEW_FILTER'] is not None: query.add(app.config['OVERVIEW_FILTER']) num_resources_query = ExtractOperator() num_resources_query.add_field(FunctionOperator('count')) num_resources_query.add_query(EqualsOperator("environment", env)) num_nodes = get_or_abort( puppetdb._query, 'nodes', query=num_nodes_query) num_resources = get_or_abort( puppetdb._query, 'resources', query=num_resources_query) metrics['num_nodes'] = num_nodes[0]['count'] metrics['num_resources'] = num_resources[0]['count'] try: metrics['avg_resources_node'] = "{0:10.0f}".format( (num_resources[0]['count'] / num_nodes[0]['count'])) except ZeroDivisionError: metrics['avg_resources_node'] = 0 nodes = get_or_abort(puppetdb.nodes, query=query, unreported=app.config['UNRESPONSIVE_HOURS'], with_status=True, with_event_numbers=app.config['WITH_EVENT_NUMBERS']) nodes_overview = [] stats = { 'changed': 0, 'unchanged': 0, 'failed': 0, 'unreported': 0, 'noop': 0 } for node in nodes: if node.status == 'unreported': stats['unreported'] += 1 elif node.status == 'changed': stats['changed'] += 1 elif node.status == 'failed': stats['failed'] += 1 elif node.status == 'noop': stats['noop'] += 1 else: stats['unchanged'] += 1 if node.status != 'unchanged': nodes_overview.append(node) return render_template( 'index.html', metrics=metrics, nodes=nodes_overview, stats=stats, envs=envs, current_env=env ) @app.route('/nodes', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/nodes') def nodes(env): """Fetch all (active) nodes from PuppetDB and stream a table displaying those nodes. Downside of the streaming aproach is that since we've already sent our headers we can't abort the request if we detect an error. Because of this we'll end up with an empty table instead because of how yield_or_stop works. Once pagination is in place we can change this but we'll need to provide a search feature instead. :param env: Search for nodes in this (Catalog and Fact) environment :type env: :obj:`string` """ envs = environments() status_arg = request.args.get('status', '') check_env(env, envs) query = AndOperator() if env != '*': query.add(EqualsOperator("catalog_environment", env)) if status_arg in ['failed', 'changed', 'unchanged']: query.add(EqualsOperator('latest_report_status', status_arg)) elif status_arg == 'unreported': unreported = datetime.utcnow() unreported = (unreported - timedelta(hours=app.config['UNRESPONSIVE_HOURS'])) unreported = unreported.replace(microsecond=0).isoformat() unrep_query = OrOperator() unrep_query.add(NullOperator('report_timestamp', True)) unrep_query.add(LessEqualOperator('report_timestamp', unreported)) query.add(unrep_query) if len(query.operations) == 0: query = None nodelist = puppetdb.nodes( query=query, unreported=app.config['UNRESPONSIVE_HOURS'], with_status=True, with_event_numbers=app.config['WITH_EVENT_NUMBERS']) nodes = [] for node in yield_or_stop(nodelist): if status_arg: if node.status == status_arg: nodes.append(node) else: nodes.append(node) return Response(stream_with_context( stream_template('nodes.html', nodes=nodes, envs=envs, current_env=env))) def inventory_facts(): # a list of facts descriptions to go in table header headers = [] # a list of inventory fact names fact_names = [] # load the list of items/facts we want in our inventory try: inv_facts = app.config['INVENTORY_FACTS'] except KeyError: inv_facts = [('Hostname', 'fqdn'), ('IP Address', 'ipaddress'), ('OS', 'lsbdistdescription'), ('Architecture', 'hardwaremodel'), ('Kernel Version', 'kernelrelease')] # generate a list of descriptions and a list of fact names # from the list of tuples inv_facts. for desc, name in inv_facts: headers.append(desc) fact_names.append(name) return headers, fact_names @app.route('/inventory', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/inventory') def inventory(env): """Fetch all (active) nodes from PuppetDB and stream a table displaying those nodes along with a set of facts about them. :param env: Search for facts in this environment :type env: :obj:`string` """ envs = environments() check_env(env, envs) headers, fact_names = inventory_facts() return render_template( 'inventory.html', envs=envs, current_env=env, fact_headers=headers) @app.route('/inventory/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/inventory/json') def inventory_ajax(env): """Backend endpoint for inventory table""" draw = int(request.args.get('draw', 0)) envs = environments() check_env(env, envs) headers, fact_names = inventory_facts() query = AndOperator() fact_query = OrOperator() fact_query.add([EqualsOperator("name", name) for name in fact_names]) query.add(fact_query) if env != '*': query.add(EqualsOperator("environment", env)) facts = puppetdb.facts(query=query) fact_data = {} for fact in facts: if fact.node not in fact_data: fact_data[fact.node] = {} fact_data[fact.node][fact.name] = fact.value total = len(fact_data) return render_template( 'inventory.json.tpl', draw=draw, total=total, total_filtered=total, fact_data=fact_data, columns=fact_names) @app.route('/node/<node_name>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/node/<node_name>') def node(env, node_name): """Display a dashboard for a node showing as much data as we have on that node. This includes facts and reports but not Resources as that is too heavy to do within a single request. :param env: Ensure that the node, facts and reports are in this environment :type env: :obj:`string` """ envs = environments() check_env(env, envs) query = AndOperator() if env != '*': query.add(EqualsOperator("environment", env)) query.add(EqualsOperator("certname", node_name)) node = get_or_abort(puppetdb.node, node_name) return render_template( 'node.html', node=node, envs=envs, current_env=env, columns=REPORTS_COLUMNS[:2]) @app.route('/reports', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'node_name': None}) @app.route('/<env>/reports', defaults={'node_name': None}) @app.route('/reports/<node_name>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/reports/<node_name>') def reports(env, node_name): """Query and Return JSON data to reports Jquery datatable :param env: Search for all reports in this environment :type env: :obj:`string` """ envs = environments() check_env(env, envs) return render_template( 'reports.html', envs=envs, current_env=env, node_name=node_name, columns=REPORTS_COLUMNS) @app.route('/reports/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'node_name': None}) @app.route('/<env>/reports/json', defaults={'node_name': None}) @app.route('/reports/<node_name>/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/reports/<node_name>/json') def reports_ajax(env, node_name): """Query and Return JSON data to reports Jquery datatable :param env: Search for all reports in this environment :type env: :obj:`string` """ draw = int(request.args.get('draw', 0)) start = int(request.args.get('start', 0)) length = int(request.args.get('length', app.config['NORMAL_TABLE_COUNT'])) paging_args = {'limit': length, 'offset': start} search_arg = request.args.get('search[value]') order_column = int(request.args.get('order[0][column]', 0)) order_filter = REPORTS_COLUMNS[order_column].get( 'filter', REPORTS_COLUMNS[order_column]['attr']) order_dir = request.args.get('order[0][dir]', 'desc') order_args = '[{"field": "%s", "order": "%s"}]' % (order_filter, order_dir) status_args = request.args.get('columns[1][search][value]', '').split('|') max_col = len(REPORTS_COLUMNS) for i in range(len(REPORTS_COLUMNS)): if request.args.get("columns[%s][data]" % i, None): max_col = i + 1 envs = environments() check_env(env, envs) reports_query = AndOperator() if env != '*': reports_query.add(EqualsOperator("environment", env)) if node_name: reports_query.add(EqualsOperator("certname", node_name)) if search_arg: search_query = OrOperator() search_query.add(RegexOperator("certname", r"%s" % search_arg)) search_query.add(RegexOperator("puppet_version", r"%s" % search_arg)) search_query.add(RegexOperator( "configuration_version", r"%s" % search_arg)) reports_query.add(search_query) status_query = OrOperator() for status_arg in status_args: if status_arg in ['failed', 'changed', 'unchanged']: arg_query = AndOperator() arg_query.add(EqualsOperator('status', status_arg)) arg_query.add(EqualsOperator('noop', False)) status_query.add(arg_query) if status_arg == 'unchanged': arg_query = AndOperator() arg_query.add(EqualsOperator('noop', True)) arg_query.add(EqualsOperator('noop_pending', False)) status_query.add(arg_query) elif status_arg == 'noop': arg_query = AndOperator() arg_query.add(EqualsOperator('noop', True)) arg_query.add(EqualsOperator('noop_pending', True)) status_query.add(arg_query) if len(status_query.operations) == 0: if len(reports_query.operations) == 0: reports_query = None else: reports_query.add(status_query) if status_args[0] != 'none': reports = get_or_abort( puppetdb.reports, query=reports_query, order_by=order_args, include_total=True, **paging_args) reports, reports_events = tee(reports) total = None else: reports = [] reports_events = [] total = 0 # Convert metrics to relational dict metrics = {} for report in reports_events: if total is None: total = puppetdb.total metrics[report.hash_] = {} for m in report.metrics: if m['category'] not in metrics[report.hash_]: metrics[report.hash_][m['category']] = {} metrics[report.hash_][m['category']][m['name']] = m['value'] if total is None: total = 0 return render_template( 'reports.json.tpl', draw=draw, total=total, total_filtered=total, reports=reports, metrics=metrics, envs=envs, current_env=env, columns=REPORTS_COLUMNS[:max_col]) @app.route('/report/<node_name>/<report_id>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/report/<node_name>/<report_id>') def report(env, node_name, report_id): """Displays a single report including all the events associated with that report and their status. The report_id may be the puppetdb's report hash or the configuration_version. This allows for better integration into puppet-hipchat. :param env: Search for reports in this environment :type env: :obj:`string` :param node_name: Find the reports whose certname match this value :type node_name: :obj:`string` :param report_id: The hash or the configuration_version of the desired report :type report_id: :obj:`string` """ envs = environments() check_env(env, envs) query = AndOperator() report_id_query = OrOperator() report_id_query.add(EqualsOperator("hash", report_id)) report_id_query.add(EqualsOperator("configuration_version", report_id)) if env != '*': query.add(EqualsOperator("environment", env)) query.add(EqualsOperator("certname", node_name)) query.add(report_id_query) reports = puppetdb.reports(query=query) try: report = next(reports) except StopIteration: abort(404) report.version = commonmark.commonmark(report.version) return render_template( 'report.html', report=report, events=yield_or_stop(report.events()), logs=report.logs, metrics=report.metrics, envs=envs, current_env=env) @app.route('/facts', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/facts') def facts(env): """Displays an alphabetical list of all facts currently known to PuppetDB. :param env: Serves no purpose for this function, only for consistency's sake :type env: :obj:`string` """ envs = environments() check_env(env, envs) facts = [] order_by = '[{"field": "name", "order": "asc"}]' facts = get_or_abort(puppetdb.fact_names) # we consider a column label to count for ~5 lines column_label_height = 5 # 1 label per different letter and up to 3 more labels for letters spanning # multiple columns. column_label_count = 3 + len(set(map(lambda fact: fact[0].upper(), facts))) break_size = (len(facts) + column_label_count * column_label_height) / 4.0 next_break = break_size facts_columns = [] facts_current_column = [] facts_current_letter = [] letter = None count = 0 for fact in facts: count += 1 if count > next_break: next_break += break_size if facts_current_letter: facts_current_column.append(facts_current_letter) if facts_current_column: facts_columns.append(facts_current_column) facts_current_column = [] facts_current_letter = [] letter = None if letter != fact[0].upper(): if facts_current_letter: facts_current_column.append(facts_current_letter) facts_current_letter = [] letter = fact[0].upper() count += column_label_height facts_current_letter.append(fact) if facts_current_letter: facts_current_column.append(facts_current_letter) if facts_current_column: facts_columns.append(facts_current_column) return render_template('facts.html', facts_columns=facts_columns, envs=envs, current_env=env) @app.route('/fact/<fact>', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'value': None}) @app.route('/<env>/fact/<fact>', defaults={'value': None}) @app.route('/fact/<fact>/<value>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/fact/<fact>/<value>') def fact(env, fact, value): """Fetches the specific fact(/value) from PuppetDB and displays per node for which this fact is known. :param env: Searches for facts in this environment :type env: :obj:`string` :param fact: Find all facts with this name :type fact: :obj:`string` :param value: Find all facts with this value :type value: :obj:`string` """ envs = environments() check_env(env, envs) render_graph = False if fact in graph_facts and not value: render_graph = True value_safe = value if value is not None: value_safe = unquote_plus(value) return render_template( 'fact.html', fact=fact, value=value, value_safe=value_safe, render_graph=render_graph, envs=envs, current_env=env) @app.route('/fact/<fact>/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'node': None, 'value': None}) @app.route('/<env>/fact/<fact>/json', defaults={'node': None, 'value': None}) @app.route('/fact/<fact>/<value>/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'node': None}) @app.route('/fact/<fact>/<path:value>/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'node': None}) @app.route('/<env>/fact/<fact>/<value>/json', defaults={'node': None}) @app.route('/node/<node>/facts/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'fact': None, 'value': None}) @app.route('/<env>/node/<node>/facts/json', defaults={'fact': None, 'value': None}) def fact_ajax(env, node, fact, value): """Fetches the specific facts matching (node/fact/value) from PuppetDB and return a JSON table :param env: Searches for facts in this environment :type env: :obj:`string` :param node: Find all facts for this node :type node: :obj:`string` :param fact: Find all facts with this name :type fact: :obj:`string` :param value: Filter facts whose value is equal to this :type value: :obj:`string` """ draw = int(request.args.get('draw', 0)) envs = environments() check_env(env, envs) render_graph = False if fact in graph_facts and not value and not node: render_graph = True query = AndOperator() if node: query.add(EqualsOperator("certname", node)) if env != '*': query.add(EqualsOperator("environment", env)) if len(query.operations) == 0: query = None # Generator needs to be converted (graph / total) try: value = int(value) except ValueError: if value is not None and query is not None: if is_bool(value): query.add(EqualsOperator('value', bool(strtobool(value)))) else: query.add(EqualsOperator('value', unquote_plus(value))) except TypeError: pass facts = [f for f in get_or_abort( puppetdb.facts, name=fact, query=query)] total = len(facts) counts = {} json = { 'draw': draw, 'recordsTotal': total, 'recordsFiltered': total, 'data': []} for fact_h in facts: line = [] if not fact: line.append(fact_h.name) if not node: line.append('<a href="{0}">{1}</a>'.format( url_for('node', env=env, node_name=fact_h.node), fact_h.node)) if not value: fact_value = fact_h.value if isinstance(fact_value, str): fact_value = quote_plus(fact_h.value) line.append('<a href="{0}">{1}</a>'.format( url_for( 'fact', env=env, fact=fact_h.name, value=fact_value), fact_h.value)) json['data'].append(line) if render_graph: if fact_h.value not in counts: counts[fact_h.value] = 0 counts[fact_h.value] += 1 if render_graph: json['chart'] = [ {"label": "{0}".format(k).replace('\n', ' '), "value": counts[k]} for k in sorted(counts, key=lambda k: counts[k], reverse=True)] return jsonify(json) @app.route('/query', methods=('GET', 'POST'), defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/query', methods=('GET', 'POST')) def query(env): """Allows to execute raw, user created querries against PuppetDB. This is currently highly experimental and explodes in interesting ways since none of the possible exceptions are being handled just yet. This will return the JSON of the response or a message telling you what whent wrong / why nothing was returned. :param env: Serves no purpose for the query data but is required for the select field in the environment block :type env: :obj:`string` """ if not app.config['ENABLE_QUERY']: log.warn('Access to query interface disabled by administrator.') abort(403) envs = environments() check_env(env, envs) form = QueryForm(meta={ 'csrf_secret': app.config['SECRET_KEY'], 'csrf_context': session}) if form.validate_on_submit(): if form.endpoints.data not in ENABLED_QUERY_ENDPOINTS: log.warn('Access to query endpoint %s disabled by administrator.', form.endpoints.data) abort(403) if form.endpoints.data == 'pql': query = form.query.data elif form.query.data[0] == '[': query = form.query.data else: query = '[{0}]'.format(form.query.data) result = get_or_abort( puppetdb._query, form.endpoints.data, query=query) return render_template('query.html', form=form, result=result, envs=envs, current_env=env) return render_template('query.html', form=form, envs=envs, current_env=env) @app.route('/metrics', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/metrics') def metrics(env): """Lists all available metrics that PuppetDB is aware of. :param env: While this parameter serves no function purpose it is required for the environments template block :type env: :obj:`string` """ envs = environments() check_env(env, envs) db_version = get_db_version(puppetdb) query_type, metric_version = metric_params(db_version) if metric_version == 'v1': mbeans = get_or_abort(puppetdb._query, 'mbean') metrics = list(mbeans.keys()) elif metric_version == 'v2': # the list response is a dict in the format: # { # "domain1": { # "property1": { # ... # } # }, # "domain2": { # "property2": { # ... # } # } # } # The MBean names are the combination of the domain and the properties # with a ":" in between, example: # domain1:property1 # domain2:property2 # reference: https://jolokia.org/reference/html/protocol.html#list metrics_domains = get_or_abort(puppetdb.metric) metrics = [] # get all of the domains for domain in list(metrics_domains.keys()): # iterate over all of the properties in this domain properties = list(metrics_domains[domain].keys()) for prop in properties: # combine the current domain and each property with # a ":" in between metrics.append(domain + ':' + prop) else: raise ValueError("Unknown metric version {} for database version {}" .format(metric_version, database_version)) return render_template('metrics.html', metrics=sorted(metrics), envs=envs, current_env=env) @app.route('/metric/<path:metric>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/metric/<path:metric>') def metric(env, metric): """Lists all information about the metric of the given name. :param env: While this parameter serves no function purpose it is required for the environments template block :type env: :obj:`string` """ envs = environments() check_env(env, envs) db_version = get_db_version(puppetdb) query_type, metric_version = metric_params(db_version) name = unquote(metric) metric = get_or_abort(puppetdb.metric, metric, version=metric_version) return render_template( 'metric.html', name=name, metric=sorted(metric.items()), envs=envs, current_env=env) @app.route('/catalogs', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'compare': None}) @app.route('/<env>/catalogs', defaults={'compare': None}) @app.route('/catalogs/compare/<compare>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/catalogs/compare/<compare>') def catalogs(env, compare): """Lists all nodes with a compiled catalog. :param env: Find the nodes with this catalog_environment value :type env: :obj:`string` """ envs = environments() check_env(env, envs) if not app.config['ENABLE_CATALOG']: log.warning('Access to catalog interface disabled by administrator') abort(403) return render_template( 'catalogs.html', compare=compare, columns=CATALOGS_COLUMNS, envs=envs, current_env=env) @app.route('/catalogs/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'compare': None}) @app.route('/<env>/catalogs/json', defaults={'compare': None}) @app.route('/catalogs/compare/<compare>/json', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/catalogs/compare/<compare>/json') def catalogs_ajax(env, compare): """Server data to catalogs as JSON to Jquery datatables """ draw = int(request.args.get('draw', 0)) start = int(request.args.get('start', 0)) length = int(request.args.get('length', app.config['NORMAL_TABLE_COUNT'])) paging_args = {'limit': length, 'offset': start} search_arg = request.args.get('search[value]') order_column = int(request.args.get('order[0][column]', 0)) order_filter = CATALOGS_COLUMNS[order_column].get( 'filter', CATALOGS_COLUMNS[order_column]['attr']) order_dir = request.args.get('order[0][dir]', 'asc') order_args = '[{"field": "%s", "order": "%s"}]' % (order_filter, order_dir) envs = environments() check_env(env, envs) query = AndOperator() if env != '*': query.add(EqualsOperator("catalog_environment", env)) if search_arg: query.add(RegexOperator("certname", r"%s" % search_arg)) query.add(NullOperator("catalog_timestamp", False)) nodes = get_or_abort(puppetdb.nodes, query=query, include_total=True, order_by=order_args, **paging_args) catalog_list = [] total = None for node in nodes: if total is None: total = puppetdb.total catalog_list.append({ 'certname': node.name, 'catalog_timestamp': node.catalog_timestamp, 'form': compare, }) if total is None: total = 0 return render_template( 'catalogs.json.tpl', total=total, total_filtered=total, draw=draw, columns=CATALOGS_COLUMNS, catalogs=catalog_list, envs=envs, current_env=env) @app.route('/catalog/<node_name>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/catalog/<node_name>') def catalog_node(env, node_name): """Fetches from PuppetDB the compiled catalog of a given node. :param env: Find the catalog with this environment value :type env: :obj:`string` """ envs = environments() check_env(env, envs) if app.config['ENABLE_CATALOG']: catalog = get_or_abort(puppetdb.catalog, node=node_name) return render_template('catalog.html', catalog=catalog, envs=envs, current_env=env) else: log.warn('Access to catalog interface disabled by administrator') abort(403) @app.route('/catalogs/compare/<compare>...<against>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/catalogs/compare/<compare>...<against>') def catalog_compare(env, compare, against): """Compares the catalog of one node, parameter compare, with that of with that of another node, parameter against. :param env: Ensure that the 2 catalogs are in the same environment :type env: :obj:`string` """ envs = environments() check_env(env, envs) if app.config['ENABLE_CATALOG']: compare_cat = get_or_abort(puppetdb.catalog, node=compare) against_cat = get_or_abort(puppetdb.catalog, node=against) return render_template('catalog_compare.html', compare=compare_cat, against=against_cat, envs=envs, current_env=env) else: log.warn('Access to catalog interface disabled by administrator') abort(403) @app.route('/radiator', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/radiator') def radiator(env): """This view generates a simplified monitoring page akin to the radiator view in puppet dashboard """ envs = environments() check_env(env, envs) if env == '*': db_version = get_db_version(puppetdb) query_type, metric_version = metric_params(db_version) query = None metrics = get_or_abort( puppetdb.metric, 'puppetlabs.puppetdb.population:%sname=num-nodes' % query_type, version=metric_version) num_nodes = metrics['Value'] else: query = AndOperator() metric_query = ExtractOperator() query.add(EqualsOperator("catalog_environment", env)) metric_query.add_field(FunctionOperator('count')) metric_query.add_query(query) metrics = get_or_abort( puppetdb._query, 'nodes', query=metric_query) num_nodes = metrics[0]['count'] nodes = puppetdb.nodes( query=query, unreported=app.config['UNRESPONSIVE_HOURS'], with_status=True ) stats = { 'changed_percent': 0, 'changed': 0, 'failed_percent': 0, 'failed': 0, 'noop_percent': 0, 'noop': 0, 'skipped_percent': 0, 'skipped': 0, 'unchanged_percent': 0, 'unchanged': 0, 'unreported_percent': 0, 'unreported': 0, } for node in nodes: if node.status == 'unreported': stats['unreported'] += 1 elif node.status == 'changed': stats['changed'] += 1 elif node.status == 'failed': stats['failed'] += 1 elif node.status == 'noop': stats['noop'] += 1 elif node.status == 'skipped': stats['skipped'] += 1 else: stats['unchanged'] += 1 try: stats['changed_percent'] = int(100 * (stats['changed'] / float(num_nodes))) stats['failed_percent'] = int(100 * stats['failed'] / float(num_nodes)) stats['noop_percent'] = int(100 * stats['noop'] / float(num_nodes)) stats['skipped_percent'] = int(100 * (stats['skipped'] / float(num_nodes))) stats['unchanged_percent'] = int(100 * (stats['unchanged'] / float(num_nodes))) stats['unreported_percent'] = int(100 * (stats['unreported'] / float(num_nodes))) except ZeroDivisionError: stats['changed_percent'] = 0 stats['failed_percent'] = 0 stats['noop_percent'] = 0 stats['skipped_percent'] = 0 stats['unchanged_percent'] = 0 stats['unreported_percent'] = 0 if ('Accept' in request.headers and request.headers["Accept"] == 'application/json'): return jsonify(**stats) return render_template( 'radiator.html', stats=stats, total=num_nodes ) @app.route('/daily_reports_chart.json', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/daily_reports_chart.json') def daily_reports_chart(env): """Return JSON data to generate a bar chart of daily runs. If certname is passed as GET argument, the data will target that node only. """ certname = request.args.get('certname') result = get_or_abort( get_daily_reports_chart, db=puppetdb, env=env, days_number=app.config['DAILY_REPORTS_CHART_DAYS'], certname=certname, ) return jsonify(result=result) @app.route('/offline/<path:filename>') def offline_static(filename): mimetype = 'text/html' if filename.endswith('.css'): mimetype = 'text/css' elif filename.endswith('.js'): mimetype = 'text/javascript' return Response(response=render_template('static/%s' % filename), status=200, mimetype=mimetype) @app.route('/status') def health_status(): return 'OK'
# -*- coding: utf-8 -*- ######################################################################## # # License: BSD # Created: March 4, 2003 # Author: Francesc Alted - faltet@pytables.com # # $Id$ # ######################################################################## """Utility functions.""" from __future__ import print_function import os import sys import warnings import subprocess from time import time import numpy from tables.flavor import array_of_flavor # The map between byteorders in NumPy and PyTables byteorders = { '>': 'big', '<': 'little', '=': sys.byteorder, '|': 'irrelevant', } # The type used for size values: indexes, coordinates, dimension # lengths, row numbers, shapes, chunk shapes, byte counts... SizeType = numpy.int64 def correct_byteorder(ptype, byteorder): """Fix the byteorder depending on the PyTables types.""" if ptype in ['string', 'bool', 'int8', 'uint8']: return "irrelevant" else: return byteorder def is_idx(index): """Checks if an object can work as an index or not.""" if type(index) in (int, long): return True elif hasattr(index, "__index__"): # Only works on Python 2.5 (PEP 357) # Exclude the array([idx]) as working as an index. Fixes #303. if (hasattr(index, "shape") and index.shape != ()): return False try: index.__index__() if isinstance(index, bool): warnings.warn( 'using a boolean instead of an integer will result in an ' 'error in the future', DeprecationWarning, stacklevel=2) return True except TypeError: return False elif isinstance(index, numpy.integer): return True # For Python 2.4 one should test 0-dim and 1-dim, 1-elem arrays as well elif (isinstance(index, numpy.ndarray) and (index.shape == ()) and index.dtype.str[1] == 'i'): return True return False def idx2long(index): """Convert a possible index into a long int.""" try: return long(index) except: raise TypeError("not an integer type.") # This is used in VLArray and EArray to produce NumPy object compliant # with atom from a generic python type. If copy is stated as True, it # is assured that it will return a copy of the object and never the same # object or a new one sharing the same memory. def convert_to_np_atom(arr, atom, copy=False): """Convert a generic object into a NumPy object compliant with atom.""" # First, convert the object into a NumPy array nparr = array_of_flavor(arr, 'numpy') # Copy of data if necessary for getting a contiguous buffer, or if # dtype is not the correct one. if atom.shape == (): # Scalar atom case nparr = numpy.array(nparr, dtype=atom.dtype, copy=copy) else: # Multidimensional atom case. Addresses #133. # We need to use this strange way to obtain a dtype compliant # array because NumPy doesn't honor the shape of the dtype when # it is multidimensional. See: # http://scipy.org/scipy/numpy/ticket/926 # for details. # All of this is done just to taking advantage of the NumPy # broadcasting rules. newshape = nparr.shape[:-len(atom.dtype.shape)] nparr2 = numpy.empty(newshape, dtype=[('', atom.dtype)]) nparr2['f0'][:] = nparr # Return a view (i.e. get rid of the record type) nparr = nparr2.view(atom.dtype) return nparr # The next is used in Array, EArray and VLArray, and it is a bit more # high level than convert_to_np_atom def convert_to_np_atom2(object, atom): """Convert a generic object into a NumPy object compliant with atom.""" # Check whether the object needs to be copied to make the operation # safe to in-place conversion. copy = atom.type in ['time64'] nparr = convert_to_np_atom(object, atom, copy) # Finally, check the byteorder and change it if needed byteorder = byteorders[nparr.dtype.byteorder] if (byteorder in ['little', 'big'] and byteorder != sys.byteorder): # The byteorder needs to be fixed (a copy is made # so that the original array is not modified) nparr = nparr.byteswap() return nparr def check_file_access(filename, mode='r'): """Check for file access in the specified `mode`. `mode` is one of the modes supported by `File` objects. If the file indicated by `filename` can be accessed using that `mode`, the function ends successfully. Else, an ``IOError`` is raised explaining the reason of the failure. All this paraphernalia is used to avoid the lengthy and scaring HDF5 messages produced when there are problems opening a file. No changes are ever made to the file system. """ if mode == 'r': # The file should be readable. if not os.access(filename, os.F_OK): raise IOError("``%s`` does not exist" % (filename,)) if not os.path.isfile(filename): raise IOError("``%s`` is not a regular file" % (filename,)) if not os.access(filename, os.R_OK): raise IOError("file ``%s`` exists but it can not be read" % (filename,)) elif mode == 'w': if os.access(filename, os.F_OK): # Since the file is not removed but replaced, # it must already be accessible to read and write operations. check_file_access(filename, 'r+') else: # A new file is going to be created, # so the directory should be writable. parentname = os.path.dirname(filename) if not parentname: parentname = '.' if not os.access(parentname, os.F_OK): raise IOError("``%s`` does not exist" % (parentname,)) if not os.path.isdir(parentname): raise IOError("``%s`` is not a directory" % (parentname,)) if not os.access(parentname, os.W_OK): raise IOError("directory ``%s`` exists but it can not be " "written" % (parentname,)) elif mode == 'a': if os.access(filename, os.F_OK): check_file_access(filename, 'r+') else: check_file_access(filename, 'w') elif mode == 'r+': check_file_access(filename, 'r') if not os.access(filename, os.W_OK): raise IOError("file ``%s`` exists but it can not be written" % (filename,)) else: raise ValueError("invalid mode: %r" % (mode,)) def lazyattr(fget): """Create a *lazy attribute* from the result of `fget`. This function is intended to be used as a *method decorator*. It returns a *property* which caches the result of calling the `fget` instance method. The docstring of `fget` is used for the property itself. For instance: >>> class MyClass(object): ... @lazyattr ... def attribute(self): ... 'Attribute description.' ... print('creating value') ... return 10 ... >>> type(MyClass.attribute) <type 'property'> >>> MyClass.attribute.__doc__ 'Attribute description.' >>> obj = MyClass() >>> obj.__dict__ {} >>> obj.attribute creating value 10 >>> obj.__dict__ {'attribute': 10} >>> obj.attribute 10 >>> del obj.attribute Traceback (most recent call last): ... AttributeError: can't delete attribute .. warning:: Please note that this decorator *changes the type of the decorated object* from an instance method into a property. """ name = fget.__name__ def newfget(self): mydict = self.__dict__ if name in mydict: return mydict[name] mydict[name] = value = fget(self) return value return property(newfget, None, None, fget.__doc__) def show_stats(explain, tref, encoding=None): """Show the used memory (only works for Linux 2.6.x).""" if encoding is None: encoding = sys.getdefaultencoding() # Build the command to obtain memory info cmd = "cat /proc/%s/status" % os.getpid() sout = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout for line in sout: line = line.decode(encoding) if line.startswith("VmSize:"): vmsize = int(line.split()[1]) elif line.startswith("VmRSS:"): vmrss = int(line.split()[1]) elif line.startswith("VmData:"): vmdata = int(line.split()[1]) elif line.startswith("VmStk:"): vmstk = int(line.split()[1]) elif line.startswith("VmExe:"): vmexe = int(line.split()[1]) elif line.startswith("VmLib:"): vmlib = int(line.split()[1]) sout.close() print("Memory usage: ******* %s *******" % explain) print("VmSize: %7s kB\tVmRSS: %7s kB" % (vmsize, vmrss)) print("VmData: %7s kB\tVmStk: %7s kB" % (vmdata, vmstk)) print("VmExe: %7s kB\tVmLib: %7s kB" % (vmexe, vmlib)) tnow = time() print("WallClock time:", round(tnow - tref, 3)) return tnow # truncate data before calling __setitem__, to improve compression ratio # this function is taken verbatim from netcdf4-python def quantize(data, least_significant_digit): """quantize data to improve compression. Data is quantized using around(scale*data)/scale, where scale is 2**bits, and bits is determined from the least_significant_digit. For example, if least_significant_digit=1, bits will be 4. """ precision = pow(10., -least_significant_digit) exp = numpy.log10(precision) if exp < 0: exp = int(numpy.floor(exp)) else: exp = int(numpy.ceil(exp)) bits = numpy.ceil(numpy.log2(pow(10., -exp))) scale = pow(2., bits) datout = numpy.around(scale * data) / scale return datout # Utilities to detect leaked instances. See recipe 14.10 of the Python # Cookbook by Martelli & Ascher. tracked_classes = {} import weakref def log_instance_creation(instance, name=None): if name is None: name = instance.__class__.__name__ if name not in tracked_classes: tracked_classes[name] = [] tracked_classes[name].append(weakref.ref(instance)) def string_to_classes(s): if s == '*': c = sorted(tracked_classes.iterkeys()) return c else: return s.split() def fetch_logged_instances(classes="*"): classnames = string_to_classes(classes) return [(cn, len(tracked_classes[cn])) for cn in classnames] def count_logged_instances(classes, file=sys.stdout): for classname in string_to_classes(classes): file.write("%s: %d\n" % (classname, len(tracked_classes[classname]))) def list_logged_instances(classes, file=sys.stdout): for classname in string_to_classes(classes): file.write('\n%s:\n' % classname) for ref in tracked_classes[classname]: obj = ref() if obj is not None: file.write(' %s\n' % repr(obj)) def dump_logged_instances(classes, file=sys.stdout): for classname in string_to_classes(classes): file.write('\n%s:\n' % classname) for ref in tracked_classes[classname]: obj = ref() if obj is not None: file.write(' %s:\n' % obj) for key, value in obj.__dict__.iteritems(): file.write(' %20s : %s\n' % (key, value)) # # A class useful for cache usage # class CacheDict(dict): """A dictionary that prevents itself from growing too much.""" def __init__(self, maxentries): self.maxentries = maxentries super(CacheDict, self).__init__(self) def __setitem__(self, key, value): # Protection against growing the cache too much if len(self) > self.maxentries: # Remove a 10% of (arbitrary) elements from the cache entries_to_remove = self.maxentries / 10 for k in self.keys()[:entries_to_remove]: super(CacheDict, self).__delitem__(k) super(CacheDict, self).__setitem__(key, value) class NailedDict(object): """A dictionary which ignores its items when it has nails on it.""" def __init__(self, maxentries): self.maxentries = maxentries self._cache = {} self._nailcount = 0 # Only a restricted set of dictionary methods are supported. That # is why we buy instead of inherit. # The following are intended to be used by ``Table`` code changing # the set of usable indexes. def clear(self): self._cache.clear() def nail(self): self._nailcount += 1 def unnail(self): self._nailcount -= 1 # The following are intended to be used by ``Table`` code handling # conditions. def __contains__(self, key): if self._nailcount > 0: return False return key in self._cache def __getitem__(self, key): if self._nailcount > 0: raise KeyError(key) return self._cache[key] def get(self, key, default=None): if self._nailcount > 0: return default return self._cache.get(key, default) def __setitem__(self, key, value): if self._nailcount > 0: return cache = self._cache # Protection against growing the cache too much if len(cache) > self.maxentries: # Remove a 10% of (arbitrary) elements from the cache entries_to_remove = self.maxentries // 10 for k in cache.keys()[:entries_to_remove]: del cache[k] cache[key] = value def detect_number_of_cores(): """Detects the number of cores on a system. Cribbed from pp. """ # Linux, Unix and MacOS: if hasattr(os, "sysconf"): if "SC_NPROCESSORS_ONLN" in os.sysconf_names: # Linux & Unix: ncpus = os.sysconf("SC_NPROCESSORS_ONLN") if isinstance(ncpus, int) and ncpus > 0: return ncpus else: # OSX: return int(os.popen2("sysctl -n hw.ncpu")[1].read()) # Windows: if "NUMBER_OF_PROCESSORS" in os.environ: ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]) if ncpus > 0: return ncpus return 1 # Default # Main part # ========= def _test(): """Run ``doctest`` on this module.""" import doctest doctest.testmod() if __name__ == '__main__': _test() ## Local Variables: ## mode: python ## py-indent-offset: 4 ## tab-width: 4 ## fill-column: 72 ## End:
""" Copyright (c) 2013 Rodrigo Baravalle All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from imfractal import * import Image import time import csv import sys import os from subprocess import * import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn import cross_validation from sklearn import svm import matplotlib.pyplot as plt from pylab import * import csv def opencsv(filename): array = [] with open(filename, 'rb') as csvfile: spamreader = csv.reader(csvfile) i = 0 for row in spamreader: array[i] = row i = i+1 return np.array(array) # CSV WRITE def writecsv(filename,array): with open(filename, 'wb') as f: writer = csv.writer(f) writer.writerows(array) def do_test(minn,vals): #cant = 10 dfs = 10 dDFs = 2*dfs+1 cantClasses = 2 computeMFS = False pathbtr = 'images/train2/bread/' dirListbtr=os.listdir(pathbtr) cantTrainB = 10#len(dirListbtr) #pathnbtr = 'images/train2/nonbread/' #dirListnbtr=os.listdir(pathnbtr) #cantTrainNB = 1#len(dirListnbtr) pathbte = 'images/test4/bread/' dirListbte=os.listdir(pathbte) cantTestB = 2#len(dirListbte) #pathnbte = 'images/test2/nonbread/' #dirListnbte=os.listdir(pathnbte) #cantTestNB = 1#len(dirListnbte) breadtrain = np.zeros((cantTrainB, dDFs)).astype(np.float32) print breadtrain[0].shape breadtest = np.zeros((cantTestB, dDFs)).astype(np.float32) #nonbreadtrain = np.zeros((cantTrainNB, dDFs)).astype(np.float32) #nonbreadtest = np.zeros((cantTestNB, dDFs)).astype(np.float32) ins = Sandbox(dfs) if(computeMFS): print 'Training: computing sandbox MFS for the bread database...' ins.setDef(40,1.02,True) #print "Computing " + str(cantTrainB) +" bread train..." for i in range(cantTrainB): filename = pathbtr+dirListbtr[i] breadtrain[i] = ins.getFDs(filename) else: print "Loading CSV" with open('breadtrainS.csv', 'rb') as csvfile: spamreader = csv.reader(csvfile) i = 0 for row in spamreader: breadtrain[i] = row[1:] i = i+1 #print "Computing "+ str(cantTestB) +" bread test..." ins.setDef(40,1.15,False) for i in range(cantTestB): filename = pathbte+dirListbte[i] breadtest[i] = ins.getFDs(filename) #ins.setDef(40,1.02,True) #print "Computing "+str(cantTrainNB)+" non bread train..." #for i in range(cantTrainNB): # filename = pathnbtr+dirListnbtr[i] # nonbreadtrain[i] = ins.getFDs(filename) #ins.setDef(40,1.02,True) #print "Computing "+ str(cantTestNB) +" non bread test..." #for i in range(cantTestNB): # filename = pathnbte+dirListnbte[i] # nonbreadtest[i] = ins.getFDs(filename) fsize = 14 if(computeMFS): #data = np.vstack((breadtrain,breadtest))#,nonbreadtrain)) labelsbtr = np.zeros((len(breadtrain),1)) + 1 #labelsnbtr = np.zeros((len(nonbreadtrain),1)) + 2 #labelsbte = np.zeros((len(breadtest),1)) + 1 #labelsnbte = np.zeros((len(nonbreadtest),1)) + 2 #labels = np.hstack((labelsbtr[:,0],labelsbte[:,0],labelsnbtr[:,0],labelsnbte[:,0])) print "Saving CSVs for SOM" print "Shapes: labelsbtr: ", labelsbtr.shape, "breadtrain: ",breadtrain.shape writecsv('breadtrainS.csv',np.hstack((labelsbtr,breadtrain)) ) #writecsv('breadtestS.csv',np.hstack((labelsbte,breadtest)) ) #writecsv('nonbreadtrainS.csv',np.hstack((labelsnbtr,nonbreadtrain)) ) #writecsv('nonbreadtestS.csv',np.hstack((labelsnbte,nonbreadtest)) ) #exit() if(False): #print "Testing..." #print "1 = Bread" #print "2 = Nonbread" train = np.vstack((breadtrain,nonbreadtrain)) labels = np.hstack((labelsbtr[:,0],labelsnbtr[:,0])) test = np.vstack((breadtest,nonbreadtest)) lin_svc = svm.LinearSVC(C=1.0).fit(train, labels) predictionsSVM = lin_svc.predict(test) cfr = RandomForestClassifier(n_estimators=120) cfr.fit(train,labels) # train gtruth = np.hstack((labelsbte[:,0],labelsnbte[:,0])) predictionsRF = cfr.predict(test) # test print dirListbte print dirListbtr print "Random Forest Prediction:" print predictionsRF[:cantTestB] print predictionsRF[cantTestB:] print "SVM Prediction:" print predictionsSVM[:cantTestB] print predictionsSVM[cantTestB:] print "REAL: " print gtruth x = np.arange(dDFs) #plt.ylabel(r'$f(alpha)$',fontsize=fsize) #plt.xlabel('alpha',fontsize=fsize) #plt.plot(x, breadtrain.T, 'k+--', label='bread train',linewidth=2.0) #plt.plot(x, breadtest.T, 'g+--', label='bread test',linewidth=2.0) #plt.plot(x, nonbreadtrain.T, 'r*--', label='non bread train',linewidth=2.0) #plt.plot(x, nonbreadtest.T, 'b*--', label='non bread test',linewidth=2.0) #plt.legend(loc = 3) #plt.show() y0 = 1.2 y1 = 3 plt.figure() #plt.subplot(122) plt.ylim((y0,y1)) #plt.xlabel('Real Breads',fontsize=fsize) b = plt.boxplot(np.vstack((breadtrain)),sym="") mediansReal = map(lambda i: i.get_data()[1][0],b['medians']) x = np.arange(len(mediansReal)) #plt.subplot(121) #plt.ylim((y0, y1)) plt.xlabel('$q$',fontsize=fsize) b = plt.boxplot(np.vstack((breadtest)),sym="") mediansSynth = map(lambda i: i.get_data()[1][0],b['medians']) #plt.show() err = sum(abs(np.array(mediansReal)-np.array(mediansSynth))) err1 = sum(abs(np.array(mediansReal[:dfs])-np.array(mediansSynth[:dfs]))) err2 = sum(abs(np.array(mediansReal[dfs:])-np.array(mediansSynth[dfs:]))) qs = range(-dfs,dfs+1) xticks(x+1,qs) # translate print "ERROR: ", err if(err < minn): plt.plot(x+1, mediansReal, 'k+--', label='real',linewidth=2.0) plt.plot(x+1, mediansSynth, 'b+--', label='synthetic',linewidth=2.0) plt.legend(loc=4) plt.ylabel(r'$D_{q}$',fontsize=fsize) plt.xlabel(r'$q$',fontsize=fsize) plt.title(vals) savefig('best_boxplot'+str(err)+'_'+'_.png') return err, err1, err2 if(False): scoreRF = (len(gtruth)-sum(abs(gtruth-predictionsRF)))/float(len(gtruth)) scoreSVM = (len(gtruth)-sum(abs(gtruth-predictionsSVM)))/float(len(gtruth)) #scores = cross_validation.cross_val_score(cfr, data, labels, cv=4) print "Classification performance (Random Forest classifier): " + str( scoreRF*100 ) + "%" print "Classification performance (Support Vector Machine classifier): " + str( scoreSVM*100 ) + "%"
# Copyright (c) 2012 Citrix Systems, Inc. # Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for Pool-related functions (join, eject, etc). """ from oslo.config import cfg import six.moves.urllib.parse as urlparse from nova.compute import rpcapi as compute_rpcapi from nova import exception from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.virt.xenapi import pool_states from nova.virt.xenapi import vm_utils LOG = logging.getLogger(__name__) xenapi_pool_opts = [ cfg.BoolOpt('use_join_force', default=True, help='To use for hosts with different CPUs'), ] CONF = cfg.CONF CONF.register_opts(xenapi_pool_opts, 'xenserver') CONF.import_opt('host', 'nova.netconf') class ResourcePool(object): """Implements resource pool operations.""" def __init__(self, session, virtapi): host_rec = session.host.get_record(session.host_ref) self._host_name = host_rec['hostname'] self._host_addr = host_rec['address'] self._host_uuid = host_rec['uuid'] self._session = session self._virtapi = virtapi self.compute_rpcapi = compute_rpcapi.ComputeAPI() def undo_aggregate_operation(self, context, op, aggregate, host, set_error): """Undo aggregate operation when pool error raised.""" try: if set_error: metadata = {pool_states.KEY: pool_states.ERROR} aggregate.update_metadata(metadata) op(host) except Exception: LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable state ' 'during operation on %(host)s'), {'aggregate_id': aggregate['id'], 'host': host}) def add_to_aggregate(self, context, aggregate, host, slave_info=None): """Add a compute host to an aggregate.""" if not pool_states.is_hv_pool(aggregate['metadata']): return invalid = {pool_states.CHANGING: 'setup in progress', pool_states.DISMISSED: 'aggregate deleted', pool_states.ERROR: 'aggregate in error'} if (aggregate['metadata'][pool_states.KEY] in invalid.keys()): raise exception.InvalidAggregateAction( action='add host', aggregate_id=aggregate['id'], reason=invalid[aggregate['metadata'][pool_states.KEY]]) if (aggregate['metadata'][pool_states.KEY] == pool_states.CREATED): aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING}) if len(aggregate['hosts']) == 1: # this is the first host of the pool -> make it master self._init_pool(aggregate['id'], aggregate['name']) # save metadata so that we can find the master again metadata = {'master_compute': host, host: self._host_uuid, pool_states.KEY: pool_states.ACTIVE} aggregate.update_metadata(metadata) else: # the pool is already up and running, we need to figure out # whether we can serve the request from this host or not. master_compute = aggregate['metadata']['master_compute'] if master_compute == CONF.host and master_compute != host: # this is the master -> do a pool-join # To this aim, nova compute on the slave has to go down. # NOTE: it is assumed that ONLY nova compute is running now self._join_slave(aggregate['id'], host, slave_info.get('compute_uuid'), slave_info.get('url'), slave_info.get('user'), slave_info.get('passwd')) metadata = {host: slave_info.get('xenhost_uuid'), } aggregate.update_metadata(metadata) elif master_compute and master_compute != host: # send rpc cast to master, asking to add the following # host with specified credentials. slave_info = self._create_slave_info() self.compute_rpcapi.add_aggregate_host( context, aggregate, host, master_compute, slave_info) def remove_from_aggregate(self, context, aggregate, host, slave_info=None): """Remove a compute host from an aggregate.""" slave_info = slave_info or dict() if not pool_states.is_hv_pool(aggregate['metadata']): return invalid = {pool_states.CREATED: 'no hosts to remove', pool_states.CHANGING: 'setup in progress', pool_states.DISMISSED: 'aggregate deleted', } if aggregate['metadata'][pool_states.KEY] in invalid.keys(): raise exception.InvalidAggregateAction( action='remove host', aggregate_id=aggregate['id'], reason=invalid[aggregate['metadata'][pool_states.KEY]]) master_compute = aggregate['metadata']['master_compute'] if master_compute == CONF.host and master_compute != host: # this is the master -> instruct it to eject a host from the pool host_uuid = aggregate['metadata'][host] self._eject_slave(aggregate['id'], slave_info.get('compute_uuid'), host_uuid) aggregate.update_metadata({host: None}) elif master_compute == host: # Remove master from its own pool -> destroy pool only if the # master is on its own, otherwise raise fault. Destroying a # pool made only by master is fictional if len(aggregate['hosts']) > 1: # NOTE: this could be avoided by doing a master # re-election, but this is simpler for now. raise exception.InvalidAggregateAction( aggregate_id=aggregate['id'], action='remove_from_aggregate', reason=_('Unable to eject %s ' 'from the pool; pool not empty') % host) self._clear_pool(aggregate['id']) aggregate.update_metadata({'master_compute': None, host: None}) elif master_compute and master_compute != host: # A master exists -> forward pool-eject request to master slave_info = self._create_slave_info() self.compute_rpcapi.remove_aggregate_host( context, aggregate['id'], host, master_compute, slave_info) else: # this shouldn't have happened raise exception.AggregateError(aggregate_id=aggregate['id'], action='remove_from_aggregate', reason=_('Unable to eject %s ' 'from the pool; No master found') % host) def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd): """Joins a slave into a XenServer resource pool.""" try: args = {'compute_uuid': compute_uuid, 'url': url, 'user': user, 'password': passwd, 'force': jsonutils.dumps(CONF.xenserver.use_join_force), 'master_addr': self._host_addr, 'master_user': CONF.xenserver.connection_username, 'master_pass': CONF.xenserver.connection_password, } self._session.call_plugin('xenhost', 'host_join', args) except self._session.XenAPI.Failure as e: LOG.error(_("Pool-Join failed: %s"), e) raise exception.AggregateError(aggregate_id=aggregate_id, action='add_to_aggregate', reason=_('Unable to join %s ' 'in the pool') % host) def _eject_slave(self, aggregate_id, compute_uuid, host_uuid): """Eject a slave from a XenServer resource pool.""" try: # shutdown nova-compute; if there are other VMs running, e.g. # guest instances, the eject will fail. That's a precaution # to deal with the fact that the admin should evacuate the host # first. The eject wipes out the host completely. vm_ref = self._session.VM.get_by_uuid(compute_uuid) self._session.VM.clean_shutdown(vm_ref) host_ref = self._session.host.get_by_uuid(host_uuid) self._session.pool.eject(host_ref) except self._session.XenAPI.Failure as e: LOG.error(_("Pool-eject failed: %s"), e) raise exception.AggregateError(aggregate_id=aggregate_id, action='remove_from_aggregate', reason=str(e.details)) def _init_pool(self, aggregate_id, aggregate_name): """Set the name label of a XenServer pool.""" try: pool_ref = self._session.pool.get_all()[0] self._session.pool.set_name_label(pool_ref, aggregate_name) except self._session.XenAPI.Failure as e: LOG.error(_("Unable to set up pool: %s."), e) raise exception.AggregateError(aggregate_id=aggregate_id, action='add_to_aggregate', reason=str(e.details)) def _clear_pool(self, aggregate_id): """Clear the name label of a XenServer pool.""" try: pool_ref = self._session.pool.get_all()[0] self._session.pool.set_name_label(pool_ref, '') except self._session.XenAPI.Failure as e: LOG.error(_("Pool-set_name_label failed: %s"), e) raise exception.AggregateError(aggregate_id=aggregate_id, action='remove_from_aggregate', reason=str(e.details)) def _create_slave_info(self): """XenServer specific info needed to join the hypervisor pool.""" # replace the address from the xenapi connection url # because this might be 169.254.0.1, i.e. xenapi # NOTE: password in clear is not great, but it'll do for now sender_url = swap_xapi_host( CONF.xenserver.connection_url, self._host_addr) return { "url": sender_url, "user": CONF.xenserver.connection_username, "passwd": CONF.xenserver.connection_password, "compute_uuid": vm_utils.get_this_vm_uuid(None), "xenhost_uuid": self._host_uuid, } def swap_xapi_host(url, host_addr): """Replace the XenServer address present in 'url' with 'host_addr'.""" temp_url = urlparse.urlparse(url) _netloc, sep, port = temp_url.netloc.partition(':') return url.replace(temp_url.netloc, '%s%s%s' % (host_addr, sep, port))
from unittest2 import TestCase from cosmic.models import BaseModel from cosmic.globals import cosmos from cosmic.http import * from cosmic.api import API from cosmic.client import WsgiAPIClient from cosmic.types import * class TestGuideWhatIsAPI(TestCase): maxDiff = None def setUp(self): self.cosmos = {} with cosmos.swap(self.cosmos): self.mathy = API("trivia", homepage="http://example.com") def test_to_json(self): with cosmos.swap(self.cosmos): self.assertEqual(APISpec.to_json(self.mathy.spec), { u'name': 'trivia', u'homepage': 'http://example.com', u'actions': {u'map': {}, u'order': []}, u'models': {u'map': {}, u'order': []} }) class TestGuideModels(TestCase): maxDiff = None def setUp(self): self.cosmos = {} with cosmos.swap(self.cosmos): self.places = places = API('places') @places.model class Address(BaseModel): properties = [ required(u"number", Integer), required(u"street", String), optional(u"city", String) ] def test_to_json(self): places = self.places with cosmos.swap(self.cosmos): self.assertEqual(APISpec.to_json(places.spec), { u'name': u'places', u'actions': {u'map': {}, u'order': []}, u"models": { u"map": { u"Address": { u"properties": { u"map": { u"number": { u"required": True, u"schema": {u"type": u"Integer"} }, u"street": { u"required": True, u"schema": {u"type": u"String"} }, u"city": { u"required": False, u"schema": {u"type": u"String"} } }, u"order": [u"number", u"street", u"city"] }, u"links": {u"map": {}, u"order": []}, u"query_fields": {u"map": {}, u"order": []}, u"list_metadata": {u"map": {}, u"order": []}, u'methods': { u'get_by_id': False, u'get_list': False, u'create': False, u'update': False, u'delete': False, }, } }, u"order": [u"Address"] } }) def test_serialize_model(self): places = self.places with cosmos.swap(self.cosmos): rep = { "number": 31, "street": "Sesame" } self.assertEqual(Representation(Model('places.Address')).to_json((None, rep)), rep) class TestGuideModelLinks(TestCase): maxDiff = None def setUp(self): self.cosmos1 = {} with cosmos.swap(self.cosmos1): self.places = places = API('places') @places.model class City(BaseModel): properties = [ optional(u"name", String) ] @places.model class Address(BaseModel): properties = [ required(u"number", Integer), required(u"street", String), ] links = [ required_link(u"city", Model('places.City')) ] self.cosmos2 = {} with cosmos.swap(self.cosmos2): class PlacesClient(WsgiAPIClient): wsgi_app = Server(places).wsgi_app self.remote_places = PlacesClient() def remote_create_models(self): with cosmos.swap(self.cosmos2): elm13 = self.remote_places.models.Address(number=13, street="Elm") self.assertEqual(elm13.number, 13) class TestGuideGetById(TestCase): maxDiff = None def setUp(self): self.cosmos = {} with cosmos.swap(self.cosmos): self.places = places = API('places') @places.model class City(BaseModel): properties = [ optional(u"name", String) ] @classmethod def get_by_id(cls, id): if id in cities: return cities[id] else: return None cities = { "0": {"name": "Toronto"}, "1": {"name": "San Francisco"}, } def test_access_links(self): places = self.places with cosmos.swap(self.cosmos): city = places.models.City.get_by_id("0") self.assertEqual(city['name'], "Toronto") self.assertEqual(places.models.City.get_by_id("5") is None, True) class TestGuideSave(TestCase): maxDiff = None def setUp(self): self.cosmos = {} with cosmos.swap(self.cosmos): self.places = places = API('places') @places.model class City(BaseModel): methods = ["create", "update"] properties = [ optional(u"name", String) ] @classmethod def validate_patch(cls, datum): if datum[u"name"][0].islower(): raise ValidationError("Name must be capitalized", datum["name"]) @classmethod def create(cls, **patch): id = str(len(cities)) cities[id] = patch return id, patch @classmethod def update(cls, id, **patch): cities[id] = patch return patch self.cosmos2 = {} with cosmos.swap(self.cosmos2): class PlacesClient(WsgiAPIClient): wsgi_app = Server(places).wsgi_app server_cosmos = self.cosmos self.remote_places = PlacesClient() cities = { "0": {"name": "Toronto"}, "1": {"name": "San Francisco"}, } def test_save_good(self): places = self.places with cosmos.swap(self.cosmos): (id, rep) = places.models.City.create(name="Moscow") self.assertEqual(id, "2") def test_local_save_validation_error(self): with cosmos.swap(self.cosmos): with self.assertRaisesRegexp(ValidationError, "must be capitalized"): self.places.models.City.validate_patch({"name": "moscow"}) self.places.models.City.create(name="moscow") def test_remote_save_validation_error(self): with cosmos.swap(self.cosmos2): with self.assertRaisesRegexp(RemoteHTTPError, "must be capitalized"): self.remote_places.models.City.create(name="moscow") class TestGuideDelete(TestCase): maxDiff = None def setUp(self): self.cosmos = {} with cosmos.swap(self.cosmos): self.places = places = API('places') @places.model class City(BaseModel): methods = ["get_by_id", "delete"] properties = [ optional(u"name", String) ] @classmethod def get_by_id(cls, id): if id in cities: return cities[id] raise NotFound @classmethod def delete(cls, id): del cities[id] cities = { "0": {"name": "Toronto"}, "1": {"name": "San Francisco"}, } def test_save(self): places = self.places with cosmos.swap(self.cosmos): city = places.models.City.get_by_id("0") places.models.City.delete("0") with self.assertRaises(NotFound): places.models.City.get_by_id("0") class TestGuideGetList(TestCase): maxDiff = None def setUp(self): self.cosmos = {} with cosmos.swap(self.cosmos): self.places = places = API('places') @places.model class City(BaseModel): properties = [ optional(u"name", String) ] query_fiels = [ optional(u"country", String) ] @classmethod def get_list(cls, country=None): if country is None: return cities.items() elif country == "Canada": return [("0", cities["0"])] elif country == "USA": return [("1", cities["1"])] else: return [] cities = { "0": {"name": "Toronto"}, "1": {"name": "San Francisco"}, } def test_get_list(self): places = self.places with cosmos.swap(self.cosmos): l1 = places.models.City.get_list() self.assertEqual(len(l1), 2) l2 = places.models.City.get_list(country="Canada") self.assertEqual(len(l2), 1) self.assertEqual(l2[0][1]['name'], "Toronto") l3 = places.models.City.get_list(country="Russia") self.assertEqual(l3, []) class TestGuideAction(TestCase): maxDiff = None def setUp(self): self.cosmos1 = {} with cosmos.swap(self.cosmos1): self.mathy = mathy = API("mathy") @mathy.action(accepts=Array(Integer), returns=Integer) def add(numbers): return sum(numbers) @mathy.action(accepts=Struct([ required(u'numerator', Integer), required(u'denominator', Integer), ]), returns=Integer) def divide(numerator, denominator): return numerator / denominator self.add = add self.cosmos2 = {} with cosmos.swap(self.cosmos2): class MathyClient(WsgiAPIClient): wsgi_app = Server(mathy).wsgi_app self.remote_mathy = MathyClient() def test_call_as_function(self): self.assertEqual(self.add([1, 2, 3]), 6) def test_call_as_action(self): self.assertEqual(self.mathy.actions.add([1, 2, 3]), 6) def test_call_as_action_remote(self): self.assertEqual(self.remote_mathy.actions.add([1, 2, 3]), 6) def test_call_as_action_remote_kwargs(self): self.assertEqual(self.remote_mathy.actions.divide(numerator=10, denominator=5), 2) def test_remote_action_validation_error(self): with self.assertRaisesRegexp(HTTPError, "Invalid Integer"): self.remote_mathy.actions.add([1, 2, True])
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import os import nibabel as nib import numpy as np import pandas as pd import tensorflow as tf from niftynet.engine.sampler_resize_v2 import ResizeSampler from niftynet.engine.windows_aggregator_resize import ResizeSamplesAggregator from niftynet.io.image_reader import ImageReader from niftynet.io.image_sets_partitioner import ImageSetsPartitioner from niftynet.layer.discrete_label_normalisation import \ DiscreteLabelNormalisationLayer from niftynet.layer.pad import PadLayer from niftynet.utilities.util_common import ParserNamespace from tests.niftynet_testcase import NiftyNetTestCase MULTI_MOD_DATA = { 'T1': ParserNamespace( csv_file=os.path.join('testing_data', 'T1sampler.csv'), path_to_search='testing_data', filename_contains=('_o_T1_time', '23'), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=(2.4, 5.0, 2.0), axcodes='LAS', spatial_window_size=(23, 32, 15), loader=None), 'FLAIR': ParserNamespace( csv_file=os.path.join('testing_data', 'FLAIRsampler.csv'), path_to_search='testing_data', filename_contains=('FLAIR_', '23'), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=(2.4, 5.0, 2.0), axcodes='LAS', spatial_window_size=(23, 32, 15), loader=None) } MULTI_MOD_TASK = ParserNamespace(image=('T1', 'FLAIR')) MOD_2D_DATA = { 'ultrasound': ParserNamespace( csv_file=os.path.join('testing_data', 'T1sampler2d.csv'), path_to_search='testing_data', filename_contains=('2d_', ), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=None, axcodes=None, spatial_window_size=(72, 83, 1), loader=None), } MOD_2D_TASK = ParserNamespace(image=('ultrasound', )) MOD_LABEL_DATA = { 'parcellation': ParserNamespace( csv_file=os.path.join('testing_data', 'Parcelsampler2d.csv'), path_to_search='testing_data', filename_contains=('23_NeuroMorph_Parcellation', ), filename_not_contains=('FLAIR', ), interp_order=0, pixdim=None, axcodes=None, spatial_window_size=(150, 140, 100), loader=None), } MOD_LABEl_TASK = ParserNamespace(label=('parcellation', )) SINGLE_25D_DATA = { 'T1': ParserNamespace( csv_file=os.path.join('testing_data', 'T1sampler.csv'), path_to_search='testing_data', filename_contains=('_o_T1_time', '106'), filename_not_contains=('Parcellation', ), interp_order=3, pixdim=(3.0, 5.0, 5.0), axcodes='LAS', spatial_window_size=(40, 30, 1), loader=None), } SINGLE_25D_TASK = ParserNamespace(image=('T1', )) data_partitioner = ImageSetsPartitioner() multi_mod_list = data_partitioner.initialise( MULTI_MOD_DATA, data_split_file='testing_data/resize_split.csv').get_file_list() mod_2d_list = data_partitioner.initialise( MOD_2D_DATA, data_split_file='testing_data/resize_split.csv').get_file_list() mod_label_list = data_partitioner.initialise( MOD_LABEL_DATA, data_split_file='testing_data/resize_split.csv').get_file_list() single_25d_list = data_partitioner.initialise( SINGLE_25D_DATA, data_split_file='testing_data/resize_split.csv').get_file_list() def get_3d_reader(): ''' define the 3d reader :return: 3d reader ''' reader = ImageReader(['image']) reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list) return reader def get_2d_reader(): ''' define the 2d reader :return: 2d reader ''' reader = ImageReader(['image']) reader.initialise(MOD_2D_DATA, MOD_2D_TASK, mod_2d_list) return reader def get_label_reader(): ''' define the label reader :return: label reader ''' reader = ImageReader(['label']) reader.initialise(MOD_LABEL_DATA, MOD_LABEl_TASK, mod_label_list) label_normaliser = DiscreteLabelNormalisationLayer( image_name='label', modalities=vars(SINGLE_25D_TASK).get('label'), model_filename=os.path.join('testing_data', 'agg_test.txt')) reader.add_preprocessing_layers(label_normaliser) pad_layer = PadLayer(image_name=('label', ), border=(5, 6, 7)) reader.add_preprocessing_layers([pad_layer]) return reader def get_25d_reader(): ''' define the 2.5 d reader :return: ''' reader = ImageReader(['image']) reader.initialise(SINGLE_25D_DATA, SINGLE_25D_TASK, single_25d_list) return reader class ResizeSamplesAggregatorTest(NiftyNetTestCase): def test_3d_init(self): reader = get_3d_reader() sampler = ResizeSampler( reader=reader, window_sizes=MULTI_MOD_DATA, batch_size=1, shuffle=False, queue_length=50) aggregator = ResizeSamplesAggregator( image_reader=reader, name='image', output_path=os.path.join('testing_data', 'aggregated'), interp_order=3) more_batch = True with self.cached_session() as sess: sampler.set_num_threads(2) while more_batch: try: out = sess.run(sampler.pop_batch_op()) except tf.errors.OutOfRangeError: break more_batch = aggregator.decode_batch( {'window_image': out['image']}, out['image_location']) output_filename = 'window_image_{}_niftynet_out.nii.gz'.format( sampler.reader.get_subject_id(0)) output_file = os.path.join('testing_data', 'aggregated', output_filename) self.assertAllClose(nib.load(output_file).shape, (256, 168, 256, 1, 2)) sampler.close_all() def test_3d_init_mo(self): reader = get_3d_reader() sampler = ResizeSampler( reader=reader, window_sizes=MULTI_MOD_DATA, batch_size=1, shuffle=False, queue_length=50) aggregator = ResizeSamplesAggregator( image_reader=reader, name='image', output_path=os.path.join('testing_data', 'aggregated'), interp_order=3) more_batch = True with self.cached_session() as sess: sampler.set_num_threads(2) while more_batch: try: out = sess.run(sampler.pop_batch_op()) except tf.errors.OutOfRangeError: break sum_val = np.sum(out['image']) more_batch = aggregator.decode_batch( { 'window_image': out['image'], 'csv_sum': sum_val }, out['image_location']) output_filename = 'window_image_{}_niftynet_out.nii.gz'.format( sampler.reader.get_subject_id(0)) sum_filename = os.path.join( 'testing_data', 'aggregated', 'csv_sum_{}_niftynet_out.csv'.format( sampler.reader.get_subject_id(0))) output_file = os.path.join('testing_data', 'aggregated', output_filename) self.assertAllClose(nib.load(output_file).shape, (256, 168, 256, 1, 2)) sum_pd = pd.read_csv(sum_filename) self.assertAllClose(sum_pd.shape, [1, 2]) sampler.close_all() def test_3d_init_mo_2im(self): reader = get_3d_reader() sampler = ResizeSampler( reader=reader, window_sizes=MULTI_MOD_DATA, batch_size=1, shuffle=False, queue_length=50) aggregator = ResizeSamplesAggregator( image_reader=reader, name='image', output_path=os.path.join('testing_data', 'aggregated'), interp_order=3) more_batch = True with self.cached_session() as sess: sampler.set_num_threads(2) while more_batch: try: out = sess.run(sampler.pop_batch_op()) except tf.errors.OutOfRangeError: break more_batch = aggregator.decode_batch( { 'window_image': out['image'], 'window_im2': out['image'] }, out['image_location']) output_filename = 'window_image_{}_niftynet_out.nii.gz'.format( sampler.reader.get_subject_id(0)) outim2_filename = os.path.join( 'testing_data', 'aggregated', 'window_im2_{}_niftynet_out.nii.gz'.format( sampler.reader.get_subject_id(0))) output_file = os.path.join('testing_data', 'aggregated', output_filename) self.assertAllClose(nib.load(output_file).shape, (256, 168, 256, 1, 2)) self.assertAllClose( nib.load(outim2_filename).shape, (256, 168, 256, 1, 2)) sampler.close_all() def test_3d_init_mo_3out(self): reader = get_3d_reader() sampler = ResizeSampler( reader=reader, window_sizes=MULTI_MOD_DATA, batch_size=1, shuffle=False, queue_length=50) aggregator = ResizeSamplesAggregator( image_reader=reader, name='image', output_path=os.path.join('testing_data', 'aggregated'), interp_order=3) more_batch = True with self.cached_session() as sess: sampler.set_num_threads(2) while more_batch: try: out = sess.run(sampler.pop_batch_op()) except tf.errors.OutOfRangeError: break sum_val = np.sum(out['image']) stats_val = [ np.sum(out['image']), np.min(out['image']), np.max(out['image']) ] more_batch = aggregator.decode_batch( { 'window_image': out['image'], 'csv_sum': sum_val, 'csv_stats': stats_val }, out['image_location']) output_filename = 'window_image_{}_niftynet_out.nii.gz'.format( sampler.reader.get_subject_id(0)) sum_filename = os.path.join( 'testing_data', 'aggregated', 'csv_sum_{}_niftynet_out.csv'.format( sampler.reader.get_subject_id(0))) stats_filename = os.path.join( 'testing_data', 'aggregated', 'csv_stats_{}_niftynet_out.csv'.format( sampler.reader.get_subject_id(0))) output_file = os.path.join('testing_data', 'aggregated', output_filename) self.assertAllClose(nib.load(output_file).shape, (256, 168, 256, 1, 2)) sum_pd = pd.read_csv(sum_filename) self.assertAllClose(sum_pd.shape, [1, 2]) stats_pd = pd.read_csv(stats_filename) self.assertAllClose(stats_pd.shape, [1, 4]) sampler.close_all() def test_init_3d_mo_bidimcsv(self): reader = get_3d_reader() sampler = ResizeSampler( reader=reader, window_sizes=MULTI_MOD_DATA, batch_size=1, shuffle=False, queue_length=50) aggregator = ResizeSamplesAggregator( image_reader=reader, name='image', output_path=os.path.join('testing_data', 'aggregated'), interp_order=3) more_batch = True with self.cached_session() as sess: sampler.set_num_threads(2) while more_batch: try: out = sess.run(sampler.pop_batch_op()) except tf.errors.OutOfRangeError: break min_val = np.sum((np.asarray(out['image']).flatten())) stats_val = [ np.min(out['image']), np.max(out['image']), np.sum(out['image']) ] stats_val = np.expand_dims(stats_val, 0) stats_val = np.concatenate([stats_val, stats_val], axis=0) more_batch = aggregator.decode_batch( { 'window_image': out['image'], 'csv_sum': min_val, 'csv_stats_2d': stats_val }, out['image_location']) output_filename = 'window_image_{}_niftynet_out.nii.gz'.format( sampler.reader.get_subject_id(0)) sum_filename = os.path.join( 'testing_data', 'aggregated', 'csv_sum_{}_niftynet_out.csv'.format( sampler.reader.get_subject_id(0))) stats_filename = os.path.join( 'testing_data', 'aggregated', 'csv_stats_2d_{}_niftynet_out.csv'.format( sampler.reader.get_subject_id(0))) output_file = os.path.join('testing_data', 'aggregated', output_filename) self.assertAllClose(nib.load(output_file).shape, (256, 168, 256, 1, 2)) min_pd = pd.read_csv(sum_filename) self.assertAllClose(min_pd.shape, [1, 2]) stats_pd = pd.read_csv(stats_filename) self.assertAllClose(stats_pd.shape, [1, 7]) sampler.close_all() def test_2d_init(self): reader = get_2d_reader() sampler = ResizeSampler( reader=reader, window_sizes=MOD_2D_DATA, batch_size=1, shuffle=False, queue_length=50) aggregator = ResizeSamplesAggregator( image_reader=reader, name='image', output_path=os.path.join('testing_data', 'aggregated'), interp_order=3) more_batch = True with self.cached_session() as sess: sampler.set_num_threads(2) while more_batch: try: out = sess.run(sampler.pop_batch_op()) except tf.errors.OutOfRangeError: break more_batch = aggregator.decode_batch( {'window_image': out['image']}, out['image_location']) output_filename = 'window_image_{}_niftynet_out.nii.gz'.format( sampler.reader.get_subject_id(0)) output_file = os.path.join('testing_data', 'aggregated', output_filename) self.assertAllClose(nib.load(output_file).shape, [128, 128]) sampler.close_all() def test_init_2d_mo(self): reader = get_2d_reader() sampler = ResizeSampler( reader=reader, window_sizes=MOD_2D_DATA, batch_size=1, shuffle=False, queue_length=50) aggregator = ResizeSamplesAggregator( image_reader=reader, name='image', output_path=os.path.join('testing_data', 'aggregated'), interp_order=3) more_batch = True with self.cached_session() as sess: sampler.set_num_threads(2) while more_batch: try: out = sess.run(sampler.pop_batch_op()) except tf.errors.OutOfRangeError: break min_val = np.sum((np.asarray(out['image']).flatten())) stats_val = [np.min(out), np.max(out), np.sum(out)] more_batch = aggregator.decode_batch( { 'window_image': out['image'], 'csv_sum': min_val }, out['image_location']) output_filename = 'window_image_{}_niftynet_out.nii.gz'.format( sampler.reader.get_subject_id(0)) sum_filename = os.path.join( 'testing_data', 'aggregated', 'csv_sum_{}_niftynet_out.csv'.format( sampler.reader.get_subject_id(0))) output_file = os.path.join('testing_data', 'aggregated', output_filename) self.assertAllClose(nib.load(output_file).shape, (128, 128)) min_pd = pd.read_csv(sum_filename) self.assertAllClose(min_pd.shape, [1, 2]) sampler.close_all() def test_init_2d_mo_3out(self): reader = get_2d_reader() sampler = ResizeSampler( reader=reader, window_sizes=MOD_2D_DATA, batch_size=1, shuffle=False, queue_length=50) aggregator = ResizeSamplesAggregator( image_reader=reader, name='image', output_path=os.path.join('testing_data', 'aggregated'), interp_order=3) more_batch = True with self.cached_session() as sess: sampler.set_num_threads(2) while more_batch: try: out = sess.run(sampler.pop_batch_op()) except tf.errors.OutOfRangeError: break min_val = np.sum((np.asarray(out['image']).flatten())) stats_val = [ np.min(out['image']), np.max(out['image']), np.sum(out['image']) ] more_batch = aggregator.decode_batch( { 'window_image': out['image'], 'csv_sum': min_val, 'csv_stats': stats_val }, out['image_location']) output_filename = 'window_image_{}_niftynet_out.nii.gz'.format( sampler.reader.get_subject_id(0)) sum_filename = os.path.join( 'testing_data', 'aggregated', 'csv_sum_{}_niftynet_out.csv'.format( sampler.reader.get_subject_id(0))) stats_filename = os.path.join( 'testing_data', 'aggregated', 'csv_stats_{}_niftynet_out.csv'.format( sampler.reader.get_subject_id(0))) output_file = os.path.join('testing_data', 'aggregated', output_filename) self.assertAllClose(nib.load(output_file).shape, (128, 128)) min_pd = pd.read_csv(sum_filename) self.assertAllClose(min_pd.shape, [1, 2]) stats_pd = pd.read_csv(stats_filename) self.assertAllClose(stats_pd.shape, [1, 4]) sampler.close_all() def test_init_2d_mo_bidimcsv(self): reader = get_2d_reader() sampler = ResizeSampler( reader=reader, window_sizes=MOD_2D_DATA, batch_size=1, shuffle=False, queue_length=50) aggregator = ResizeSamplesAggregator( image_reader=reader, name='image', output_path=os.path.join('testing_data', 'aggregated'), interp_order=3) more_batch = True with self.cached_session() as sess: sampler.set_num_threads(2) while more_batch: try: out = sess.run(sampler.pop_batch_op()) except tf.errors.OutOfRangeError: break min_val = np.sum((np.asarray(out['image']).flatten())) stats_val = [ np.min(out['image']), np.max(out['image']), np.sum(out['image']) ] stats_val = np.expand_dims(stats_val, 0) stats_val = np.concatenate([stats_val, stats_val], axis=0) more_batch = aggregator.decode_batch( { 'window_image': out['image'], 'csv_sum': min_val, 'csv_stats_2d': stats_val }, out['image_location']) output_filename = 'window_image_{}_niftynet_out.nii.gz'.format( sampler.reader.get_subject_id(0)) sum_filename = os.path.join( 'testing_data', 'aggregated', 'csv_sum_{}_niftynet_out.csv'.format( sampler.reader.get_subject_id(0))) stats_filename = os.path.join( 'testing_data', 'aggregated', 'csv_stats_2d_{}_niftynet_out.csv'.format( sampler.reader.get_subject_id(0))) output_file = os.path.join('testing_data', 'aggregated', output_filename) self.assertAllClose(nib.load(output_file).shape, (128, 128)) min_pd = pd.read_csv(sum_filename) self.assertAllClose(min_pd.shape, [1, 2]) stats_pd = pd.read_csv(stats_filename) self.assertAllClose(stats_pd.shape, [1, 7]) sampler.close_all() def test_25d_init(self): reader = get_25d_reader() sampler = ResizeSampler( reader=reader, window_sizes=SINGLE_25D_DATA, batch_size=1, shuffle=False, queue_length=50) aggregator = ResizeSamplesAggregator( image_reader=reader, name='image', output_path=os.path.join('testing_data', 'aggregated'), interp_order=3) more_batch = True with self.cached_session() as sess: sampler.set_num_threads(2) while more_batch: try: out = sess.run(sampler.pop_batch_op()) except tf.errors.OutOfRangeError: break more_batch = aggregator.decode_batch( {'window_image': out['image']}, out['image_location']) output_filename = 'window_image_{}_niftynet_out.nii.gz'.format( sampler.reader.get_subject_id(0)) output_file = os.path.join('testing_data', 'aggregated', output_filename) self.assertAllClose(nib.load(output_file).shape, [256, 168, 256]) sampler.close_all() def test_inverse_mapping(self): reader = get_label_reader() sampler = ResizeSampler( reader=reader, window_sizes=MOD_LABEL_DATA, batch_size=1, shuffle=False, queue_length=50) aggregator = ResizeSamplesAggregator( image_reader=reader, name='label', output_path=os.path.join('testing_data', 'aggregated'), interp_order=0) more_batch = True with self.cached_session() as sess: sampler.set_num_threads(2) while more_batch: try: out = sess.run(sampler.pop_batch_op()) except tf.errors.OutOfRangeError: break more_batch = aggregator.decode_batch( {'window_label': out['label']}, out['label_location']) output_filename = 'window_label_{}_niftynet_out.nii.gz'.format( sampler.reader.get_subject_id(0)) output_file = os.path.join('testing_data', 'aggregated', output_filename) self.assertAllClose(nib.load(output_file).shape, [256, 168, 256]) sampler.close_all() # output_data = nib.load(output_file).get_data()[..., 0, 0] # expected_data = nib.load( # 'testing_data/T1_1023_NeuroMorph_Parcellation.nii.gz').get_data() # self.assertAllClose(output_data, expected_data) if __name__ == "__main__": tf.test.main()