repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
noba3/KoTos | refs/heads/master | addons/plugin.video.movie25/resources/libs/plugins/animefreak.py | 2 | # -*- coding: cp1252 -*-
import urllib,urllib2,re,cookielib,string, urlparse,sys,os
import xbmc, xbmcgui, xbmcaddon, xbmcplugin,urlresolver
from t0mm0.common.net import Net as net
from resources.libs import main
#Mash Up - by Mash2k3 2012.
from t0mm0.common.addon import Addon
from resources.universal import playbackengine, watchhistory
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
addon = Addon('plugin.video.movie25', sys.argv)
art = main.art
wh = watchhistory.WatchHistory('plugin.video.movie25')
def AtoZ():
main.addDir('#','http://www.animefreak.tv/book',629,art+'/09.png')
for i in string.ascii_uppercase:
main.addDir(i,'http://www.animefreak.tv/book',629,art+'/'+i.lower()+'.png')
main.GA("Tvshows","A-ZTV")
main.VIEWSB()
def MAIN():
main.GA("Plugin","AnimeFreak")
main.addDir('Search','http://www.animefreak.tv',638,art+'/search.png')
main.addDir('A-Z','http://www.animefreak.tv',628,art+'/AZ.png')
main.addDir('Genre','http://www.animefreak.tv/browse',634,art+'/genre.png')
main.addDir('Popular Anime','http://www.animefreak.tv/watch/popular-animes',637,art+'/animefreak.png')
main.addDir('Latest Episodes','http://www.animefreak.tv/tracker',632,art+'/animefreak.png')
main.addDir('Latest Anime','http://www.animefreak.tv/latest',633,art+'/animefreak.png')
def SEARCH():
keyb = xbmc.Keyboard('', 'Search Anime')
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
encode=urllib.quote(search)
surl='http://www.animefreak.tv/search/node/'+encode
link=main.OPENURL(surl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match= re.compile('<dt class="title".+?a href="(.+?)">(.+?)</a.+?dt>').findall(link)
for url,name in match:
r = re.findall('Episode',name)
if not r:
main.addDirT(name,url,626,'','','','','','')
else:
main.addDirT(name,url,630,'','','','','','')
def GENRE(murl):
link=main.OPENURL(murl)
link=main.unescapes(link)
match = re.compile('<input type="checkbox" name="taxonomy.+?" id=".+?" value=".+?" class="form-checkbox" /> (.+?)</label>').findall(link)
for name in match:
uname=name
uname=uname.replace(' ','-')
if uname == 'Slice-of-Life':
uname='slice-life'
main.addDir(name,'http://www.animefreak.tv/category/genre/'+uname.lower(),635,'')
def GENRELIST(murl):
link=main.OPENURL(murl)
link=main.unescapes(link)
match = re.compile('<h2 class="nodeTitle">.+?<a href="(.+?)".+?src="(.+?)" alt="(.+?)" />').findall(link)
for url, thumb,name in match:
main.addDirT(name,'http://www.animefreak.tv/'+url,626,thumb,'','','','','')
paginate = re.compile("""<li class="pager-next"><a href="(.+?)" title=".+?" class="active">.+?</a></li>""").findall(link)
if len(paginate)>0:
paginates=paginate[0]
main.addDir('Next','http://www.animefreak.tv/'+paginates,635,art+'/next2.png')
def AZLIST(mname,murl):
if mname=='#':
link=main.OPENURL(murl)
link=main.unescapes(link)
match = re.compile('<li><a href="([^<]+)">(.+?)</a></li>').findall(link)
for url, name in match[0:10]:
main.addDirT(name,'http://www.animefreak.tv/'+url,626,'','','','','','')
else:
link=main.OPENURL(murl)
link=main.unescapes(link)
match = re.compile('<li><a href="([^<]+)">(.+?)</a></li>').findall(link)
for url, name in match:
if name[0]==mname or name[0]==mname.lower():
main.addDirT(name,'http://www.animefreak.tv/'+url,626,'','','','','','')
def LATESTE(mname,murl):
link=main.OPENURL(murl)
link=main.unescapes(link)
match = re.compile('<td class=".+?<a href="(.+?)">(.+?)</a> </td>').findall(link)
for url, name in match:
main.addDirT(name,'http://www.animefreak.tv/'+url,630,'','','','','','')
paginate = re.compile("""<li class="pager-next last"><a href="(.+?)" class="active">.+?</a></li>""").findall(link)
if len(paginate)>0:
paginates=paginate[0]
main.addDir('Next','http://www.animefreak.tv/'+paginates,632,art+'/next2.png')
def LATESTA(mname,murl):
link=main.OPENURL(murl)
link=main.unescapes(link)
match = re.compile('<td class=".+?<a href="(.+?)">(.+?)</a> </td>').findall(link)
for url, name in match:
main.addDirT(name,'http://www.animefreak.tv/'+url,626,'','','','','','')
paginate = re.compile("""<li class="pager-next last"><a href="(.+?)" class="active">.+?</a></li>""").findall(link)
if len(paginate)>0:
paginates=paginate[0]
main.addDir('Next','http://www.animefreak.tv/'+paginates,633,art+'/next2.png')
def LISTPOP(murl):
link=main.OPENURL(murl)
link=main.unescapes(link)
match = re.compile('<div><span><img alt=".+?" src="(.+?)" /></span></div> </td> <td .+?> <h2><a href="(.+?)"><strong>(.+?)</strong></a></h2>').findall(link)
for thumb,url, name in match:
main.addDirT(name,url,626,thumb,'','','','','')
def LIST(mname,murl):
link=main.OPENURL(murl)
link=main.unescapes(link)
match = re.compile('<li class="leaf.+?<a href="([^<]+)">(.+?)</a></li>').findall(link)
thumbs = re.compile('</p><p><img align="left".+?src="(.+?)".+?>').findall(link)
if thumbs:
thumb=thumbs[0]
else:
thumb=''
descs = re.compile('<h2><span style=".+?"><strong>.+?</strong></span></h2><blockquote><p>(.+?)</p>').findall(link)
if descs:
desc=descs[0]
else:
desc=''
for url, name in match:
main.addDirT(name,'http://www.animefreak.tv/'+url,630,thumb,desc,'','','','')
def LIST2(mname,murl,thumb,desc):
main.GA("AnimeFreak","List")
link=main.OPENURL(murl)
link=main.unescapes(link)
match = re.compile("""onClick="javascript:loadParts.?\'(.+?)', \'\'.?" class="multi">(.+?)</a>""").findall(link)
if len(match)==0:
match = re.compile('<iframe .+?src="(.+?)".+?/iframe>').findall(link)
for url in match:
host=re.compile("http://(.+?).?/.+?").findall(url)
for hname in host:
name=hname.replace('www.','').replace('embed.','').replace('.co','').replace('.t','').replace('.e','')
main.addPlayc(mname+' [COLOR red]'+name+'[/COLOR]',url,627,thumb,desc,'','','','')
else:
for url, name in match:
match2 = re.compile('<iframe(.+?)/iframe>').findall(url)
if len(match2)>=2:
for url in match2:
match = re.compile('src="(.+?)"').findall(url)
if len(match)==0:
match = re.compile("src='(.+?)'").findall(url)
for url in match:
host=re.compile("http://(.+?).?/.+?").findall(url)
for hname in host:
name=hname.replace('www.','').replace('embed.','').replace('.co','').replace('.t','').replace('.e','')
main.addPlayc(mname+' [COLOR red]'+name+'[/COLOR]',url,627,thumb,desc,'','','','')
main.addPlayc(mname+' [COLOR red]'+name+'[/COLOR]',url,627,thumb,desc,'','','','')
def NovaWeed(murl):
link=main.OPENURL(murl)
link=main.unescapes(link)
vw = re.compile('flashvars.advURL="(.+?)";').findall(link)
vid_url=vw[0]
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,3000)")
return main.resolve_url(vid_url)
def Upload2(murl):
link=main.OPENURL(murl)
link=main.unescapes(link)
vw = re.compile("1&video=(.+?)&rating").findall(link)
if vw:
stream_url = vw[0]
else:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Removed,4000)")
stream_url =''
return stream_url
def Sapo(murl):
match=re.compile('/play\?file=([^\&]+)').findall(murl)
newlink='http://videos.sapo.pt/playhtml?file=' + match[0]
link=main.OPENURL(newlink)
link=main.unescapes(link)
link = ''.join(link.splitlines()).replace('\'','"')
match1=re.compile('showEmbedHTML\("swfplayer", (.+?), "(.+?)"\);').findall(link)
for time,token in match1:
stream_url = match[0]+"?player=EXTERNO&time="+time+"&token="+token;
return stream_url
def LINK(mname,murl,thumb,desc):
ok=True
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
link=main.unescapes(murl)
match = re.compile('src="(.+?)"').findall(link)
if len(match)==0:
match = re.compile("src='(.+?)'").findall(link)
if len(match)==0:
r = re.findall('animefreak',murl)
if r:
link=main.OPENURL(murl)
link=main.unescapes(link)
match = re.compile('src="http://www.animefreak.tv/sites/default/files/af/misc/player.swf.?file=(.+?)"').findall(link)
print 'HH1 '+match[0]
s = re.findall('http://.+?/(.+?).mp4.?(.+?)e=([^\&]+)',match[0])
for p1,p2,p3 in s:
p1=p1.replace('+','%20')
stream_url='http://78.152.42.214/'+p1+'.mp4?'+p2+'e='+p3
nv = re.findall('novamov',murl)
if nv:
stream_url = NovaWeed(murl)
v = re.findall('videoweed',murl)
if v:
stream_url = NovaWeed(murl)
m = re.findall('mp4upload',murl)
if m:
link=main.OPENURL(murl)
link=main.unescapes(link)
vw = re.compile("'file': '(.+?)',").findall(link)
stream_url = vw[0]
vb = re.findall('videobam',murl)
if vb:
link=main.OPENURL(murl)
link=main.unescapes(link)
vw = re.compile("low: '(.+?)'").findall(link)
if len(vw)==0:
vw = re.compile("high: '(.+?)'").findall(link)
stream_url = vw[0]
u2 = re.findall('upload2',murl)
if u2:
stream_url = Upload2(murl)
sp = re.findall('sapo',murl)
if sp:
stream_url=Sapo(murl)
af = re.findall('.mp4',murl)
if len(af)>0 and len(r)==0 and len(m)==0 and len(vb)==0 and len(u2)==0 and len(sp)==0:
murl=murl.replace("'","").replace("+","%20")
print "hh q "+murl
stream_url=murl
else:
nv = re.findall('novamov',match[0])
if nv:
stream_url = NovaWeed(match[0])
v = re.findall('videoweed',match[0])
if v:
stream_url = NovaWeed(match[0])
m = re.findall('mp4upload',match[0])
if m:
link=main.OPENURL(match[0])
link=main.unescapes(link)
vw = re.compile("'file': '(.+?)',").findall(link)
stream_url = vw[0]
vb = re.findall('videobam',match[0])
if vb:
link=main.OPENURL(match[0])
link=main.unescapes(link)
vw = re.compile("low: '(.+?)'").findall(link)
if len(vw)==0:
vw = re.compile("high: '(.+?)'").findall(link)
stream_url = vw[0]
u2 = re.findall('upload2',match[0])
if u2:
stream_url = Upload2(match[0])
sp = re.findall('sapo',match[0])
if sp:
stream_url=Sapo(match[0])
else:
r = re.findall('animefreak',match[0])
if r:
link=main.OPENURL(match[0])
link=main.unescapes(link)
match = re.compile('src="http://www.animefreak.tv/sites/default/files/af/misc/player.swf.?file=(.+?)"').findall(link)
print 'HH2 '+match[0]
s = re.findall('http://.+?/(.+?).mp4.?(.+?)e=([^\&]+)',match[0])
for p1,p2,p3 in s:
p1=p1.replace('+','%20')
stream_url='http://78.152.42.214/'+p1+'.mp4?'+p2+'e='+p3
nv = re.findall('novamov',match[0])
if nv:
stream_url = NovaWeed(match[0])
v = re.findall('videoweed',match[0])
if v:
stream_url = NovaWeed(match[0])
m = re.findall('mp4upload',match[0])
if m:
link=main.OPENURL(match[0])
link=main.unescapes(link)
vw = re.compile("'file': '(.+?)',").findall(link)
stream_url = vw[0]
vb = re.findall('videobam',match[0])
if vb:
link=main.OPENURL(match[0])
link=main.unescapes(link)
vw = re.compile("low: '(.+?)'").findall(link)
if len(vw)==0:
vw = re.compile("high: '(.+?)'").findall(link)
stream_url = vw[0]
u2 = re.findall('upload2',match[0])
if u2:
stream_url = Upload2(match[0])
sp = re.findall('sapo',match[0])
if sp:
stream_url=Sapo(match[0])
infoL={'Title': mname, 'Plot': desc, 'Genre': 'Anime'}
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='', title=mname,season='', episode='', year='',img=thumb,infolabels=infoL, watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id='')
#WatchHistory
main.GA("AnimeFreak","Watched")
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]AFTv[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
player.KeepAlive()
return ok
|
holzman/glideinwms-old | refs/heads/master | lib/symCrypto.py | 2 | #
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This module defines classes to perform symmetric key cryptography
# (shared or hidden key)
#
import M2Crypto
import os
import binascii
######################
#
# Available ciphers:
# too many to list them all
# try 'man enc'
# a few of them are
# 'aes_128_cbc'
# 'aes_128_ofb
# 'aes_256_cbc'
# 'aes_256_cfb'
# 'bf_cbc'
# 'des3'
#
######################
# you probably don't want to use this
# Use the child classes instead
class SymKey:
def __init__(self,
cypher_name, key_len, iv_len,
key_str=None, iv_str=None,
key_iv_code=None):
self.cypher_name = cypher_name
self.key_len = key_len
self.iv_len = iv_len
self.key_str = None
self.iv_str = None
self.ket_str = None
self.load(key_str, iv_str, key_iv_code)
return
###########################################
# load a new key
def load(self,
key_str=None, iv_str=None,
key_iv_code=None):
if key_str is not None:
if key_iv_code is not None:
raise ValueError, "Illegal to define both key_str and key_iv_code"
key_str = str(key_str) # just in case it was unicode"
if len(key_str) != (self.key_len*2):
raise ValueError, "Key must be exactly %i long, got %i" % (self.key_len*2, len(key_str))
if iv_str is None:
# if key_str defined, one needs the iv_str, too
# set to default of 0
iv_str = '0'*(self.iv_len*2)
else:
if len(iv_str) != (self.iv_len*2):
raise ValueError, "Initialization vector must be exactly %i long, got %i" % (self.iv_len*2, len(iv_str))
iv_str = str(iv_str) # just in case it was unicode"
elif key_iv_code is not None:
key_iv_code = str(key_iv_code) # just in case it was unicode
ki_arr = key_iv_code.split(',')
if len(ki_arr) != 3:
raise ValueError, "Invalid format, comas not found"
if ki_arr[0] != ('cypher:%s' % self.cypher_name):
raise ValueError, "Invalid format, not my cypher(%s)" % self.cypher_name
if ki_arr[1][:4] != 'key:':
raise ValueError, "Invalid format, key not found"
if ki_arr[2][:3] != 'iv:':
raise ValueError, "Invalid format, iv not found"
# call itself, but with key and iv decoded
return self.load(key_str=ki_arr[1][4:], iv_str=ki_arr[2][3:])
#else keep None
self.key_str = key_str
self.iv_str = iv_str
###########################################
# get the stored key
def is_valid(self):
return (self.key_str is not None)
def get(self):
return (self.key_str, self.iv_str)
def get_code(self):
return "cypher:%s,key:%s,iv:%s" % (self.cypher_name, self.key_str, self.iv_str)
###########################################
# generate key function
def new(self, random_iv=True): # if random_iv==False, set iv to 0
self.key_str = binascii.b2a_hex(M2Crypto.Rand.rand_bytes(self.key_len))
if random_iv:
self.iv_str = binascii.b2a_hex(M2Crypto.Rand.rand_bytes(self.iv_len))
else:
self.iv_str = '0'*(self.iv_len*2)
return
###########################################
# encrypt data inline
def encrypt(self, data):
if not self.is_valid():
raise KeyError, "No key"
b = M2Crypto.BIO.MemoryBuffer()
c = M2Crypto.BIO.CipherStream(b)
c.set_cipher(self.cypher_name, binascii.a2b_hex(self.key_str), binascii.a2b_hex(self.iv_str), 1)
c.write(data)
c.flush()
c.close()
e = b.read()
return e
# like encrypt, but base64 encoded
def encrypt_base64(self, data):
return binascii.b2a_base64(self.encrypt(data))
# like encrypt, but hex encoded
def encrypt_hex(self, data):
return binascii.b2a_hex(self.encrypt(data))
###########################################
# decrypt data inline
def decrypt(self, data):
if not self.is_valid():
raise KeyError, "No key"
b = M2Crypto.BIO.MemoryBuffer()
c = M2Crypto.BIO.CipherStream(b)
c.set_cipher(self.cypher_name, binascii.a2b_hex(self.key_str), binascii.a2b_hex(self.iv_str), 0)
c.write(data)
c.flush()
c.close()
d = b.read()
return d
# like decrypt, but base64 encoded
def decrypt_base64(self, data):
return self.decrypt(binascii.a2b_base64(data))
# like decrypt, but hex encoded
def decrypt_hex(self, data):
return self.decrypt(binascii.a2b_hex(data))
# allows to change the crypto after instantiation
class MutableSymKey(SymKey):
def __init__(self,
cypher_name=None, key_len=None, iv_len=None,
key_str=None, iv_str=None,
key_iv_code=None):
self.redefine(cypher_name, key_len, iv_len,
key_str, iv_str, key_iv_code)
###########################################
# load a new crypto type and a new key
def redefine(self,
cypher_name=None, key_len=None, iv_len=None,
key_str=None, iv_str=None,
key_iv_code=None):
self.cypher_name = cypher_name
self.key_len = key_len
self.iv_len = iv_len
self.load(key_str, iv_str, key_iv_code)
return
###########################################
# get the stored key and the crypto name
# redefine, as null crypto name could be used in this class
def is_valid(self):
return (self.key_str is not None) and (self.cypher_name is not None)
def get_wcrypto(self):
return (self.cypher_name, self.key_str, self.iv_str)
##########################################################################
# Parametrized sym algo classes
# dict of crypt_name -> (key_len, iv_len)
cypher_dict = {'aes_128_cbc':(16, 16),
'aes_256_cbc':(32, 16),
'bf_cbc':(16, 8),
'des3':(24, 8),
'des_cbc':(8, 8)}
class ParametryzedSymKey(SymKey):
def __init__(self, cypher_name,
key_str=None, iv_str=None,
key_iv_code=None):
if not (cypher_name in cypher_dict.keys()):
raise KeyError, "Unsupported cypher %s" % cypher_name
cypher_params = cypher_dict[cypher_name]
SymKey.__init__(self, cypher_name, cypher_params[0], cypher_params[1], key_str, iv_str, key_iv_code)
# get cypher name from key_iv_code
class AutoSymKey(MutableSymKey):
def __init__(self,
key_iv_code=None):
self.auto_load(key_iv_code)
###############################################
# load a new key_iv_key and extract the cypther
def auto_load(self, key_iv_code=None):
if key_iv_code is None:
self.cypher_name = None
self.key_str = None
else:
key_iv_code = str(key_iv_code) # just in case it was unicode
ki_arr = key_iv_code.split(',')
if len(ki_arr) != 3:
raise ValueError, "Invalid format, comas not found"
if ki_arr[0][:7] != 'cypher:':
raise ValueError, "Invalid format, cypher not found"
cypher_name = ki_arr[0][7:]
if ki_arr[1][:4] != 'key:':
raise ValueError, "Invalid format, key not found"
key_str = ki_arr[1][4:]
if ki_arr[2][:3] != 'iv:':
raise ValueError, "Invalid format, iv not found"
iv_str = ki_arr[2][3:]
cypher_params = cypher_dict[cypher_name]
self.redefine(cypher_name, cypher_params[0], cypher_params[1], key_str, iv_str)
##########################################################################
# Explicit sym algo classes
class SymAES128Key(ParametryzedSymKey):
def __init__(self,
key_str=None, iv_str=None,
key_iv_code=None):
ParametryzedSymKey.__init__(self, 'aes_128_cbc', key_str, iv_str, key_iv_code)
class SymAES256Key(ParametryzedSymKey):
def __init__(self,
key_str=None, iv_str=None,
key_iv_code=None):
ParametryzedSymKey.__init__(self, 'aes_256_cbc', key_str, iv_str, key_iv_code)
class SymBlowfishKey(ParametryzedSymKey):
def __init__(self,
key_str=None, iv_str=None,
key_iv_code=None):
ParametryzedSymKey.__init__(self, 'bf_cbc', key_str, iv_str, key_iv_code)
class Sym3DESKey(ParametryzedSymKey):
def __init__(self,
key_str=None, iv_str=None,
key_iv_code=None):
ParametryzedSymKey.__init__(self, 'des3', key_str, iv_str, key_iv_code)
class SymDESKey(ParametryzedSymKey):
def __init__(self,
key_str=None, iv_str=None,
key_iv_code=None):
ParametryzedSymKey.__init__(self, 'des_cbc', key_str, iv_str, key_iv_code)
#def debug_print(description, text):
# print "<%s>\n%s\n</%s>\n" % (description,text,description)
#
#def test():
# plaintext = "5105105105105100"
#
# sk=SymAES256Key()
# sk.new()
#
# key_iv_code=sk.get_code()
#
# encrypted = sk.encrypt_hex(plaintext)
#
# sk2=AutoSymKey(key_iv_code=key_iv_code)
# decrypted = sk2.decrypt_hex(encrypted)
#
# assert plaintext == decrypted
#
# debug_print("key_id", key_iv_code)
# debug_print("plain text", plaintext)
# debug_print("cipher text", encrypted)
# debug_print("decrypted text", decrypted)
|
deandunbar/html2bwml | refs/heads/master | venv/lib/python2.7/site-packages/OpenSSL/rand.py | 11 | """
PRNG management routines, thin wrappers.
See the file RATIONALE for a short explanation of why this module was written.
"""
from functools import partial
from six import integer_types as _integer_types
from OpenSSL._util import (
ffi as _ffi,
lib as _lib,
exception_from_error_queue as _exception_from_error_queue)
class Error(Exception):
"""
An error occurred in an `OpenSSL.rand` API.
"""
_raise_current_error = partial(_exception_from_error_queue, Error)
_unspecified = object()
_builtin_bytes = bytes
def bytes(num_bytes):
"""
Get some random bytes as a string.
:param num_bytes: The number of bytes to fetch
:return: A string of random bytes
"""
if not isinstance(num_bytes, _integer_types):
raise TypeError("num_bytes must be an integer")
if num_bytes < 0:
raise ValueError("num_bytes must not be negative")
result_buffer = _ffi.new("char[]", num_bytes)
result_code = _lib.RAND_bytes(result_buffer, num_bytes)
if result_code == -1:
# TODO: No tests for this code path. Triggering a RAND_bytes failure
# might involve supplying a custom ENGINE? That's hard.
_raise_current_error()
return _ffi.buffer(result_buffer)[:]
def add(buffer, entropy):
"""
Add data with a given entropy to the PRNG
:param buffer: Buffer with random data
:param entropy: The entropy (in bytes) measurement of the buffer
:return: None
"""
if not isinstance(buffer, _builtin_bytes):
raise TypeError("buffer must be a byte string")
if not isinstance(entropy, int):
raise TypeError("entropy must be an integer")
# TODO Nothing tests this call actually being made, or made properly.
_lib.RAND_add(buffer, len(buffer), entropy)
def seed(buffer):
"""
Alias for rand_add, with entropy equal to length
:param buffer: Buffer with random data
:return: None
"""
if not isinstance(buffer, _builtin_bytes):
raise TypeError("buffer must be a byte string")
# TODO Nothing tests this call actually being made, or made properly.
_lib.RAND_seed(buffer, len(buffer))
def status():
"""
Retrieve the status of the PRNG
:return: True if the PRNG is seeded enough, false otherwise
"""
return _lib.RAND_status()
def egd(path, bytes=_unspecified):
"""
Query an entropy gathering daemon (EGD) for random data and add it to the
PRNG. I haven't found any problems when the socket is missing, the function
just returns 0.
:param path: The path to the EGD socket
:param bytes: (optional) The number of bytes to read, default is 255
:returns: The number of bytes read (NB: a value of 0 isn't necessarily an
error, check rand.status())
"""
if not isinstance(path, _builtin_bytes):
raise TypeError("path must be a byte string")
if bytes is _unspecified:
bytes = 255
elif not isinstance(bytes, int):
raise TypeError("bytes must be an integer")
return _lib.RAND_egd_bytes(path, bytes)
def cleanup():
"""
Erase the memory used by the PRNG.
:return: None
"""
# TODO Nothing tests this call actually being made, or made properly.
_lib.RAND_cleanup()
def load_file(filename, maxbytes=_unspecified):
"""
Seed the PRNG with data from a file
:param filename: The file to read data from
:param maxbytes: (optional) The number of bytes to read, default is
to read the entire file
:return: The number of bytes read
"""
if not isinstance(filename, _builtin_bytes):
raise TypeError("filename must be a string")
if maxbytes is _unspecified:
maxbytes = -1
elif not isinstance(maxbytes, int):
raise TypeError("maxbytes must be an integer")
return _lib.RAND_load_file(filename, maxbytes)
def write_file(filename):
"""
Save PRNG state to a file
:param filename: The file to write data to
:return: The number of bytes written
"""
if not isinstance(filename, _builtin_bytes):
raise TypeError("filename must be a string")
return _lib.RAND_write_file(filename)
# TODO There are no tests for screen at all
def screen():
"""
Add the current contents of the screen to the PRNG state. Availability:
Windows.
:return: None
"""
_lib.RAND_screen()
if getattr(_lib, 'RAND_screen', None) is None:
del screen
# TODO There are no tests for the RAND strings being loaded, whatever that
# means.
_lib.ERR_load_RAND_strings()
|
marcoserafini/h-store | refs/heads/master | third_party/python/boto/route53/__init__.py | 20 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# this is here for backward compatibility
# originally, the Route53Connection class was defined here
from connection import Route53Connection
|
ruibarreira/linuxtrail | refs/heads/master | usr/lib/python3/dist-packages/orca/scripts/apps/pidgin/speech_generator.py | 6 | # Orca
#
# Copyright 2004-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import orca.speech_generator as speech_generator
########################################################################
# #
# Custom SpeechGenerator #
# #
########################################################################
class SpeechGenerator(speech_generator.SpeechGenerator):
"""Overrides _generateExpandableState so that we can provide access
to the expanded/collapsed state and node count for the buddy list.
"""
# pylint: disable-msg=W0142
def __init__(self, script):
speech_generator.SpeechGenerator.__init__(self, script)
def _generateExpandableState(self, obj, **args):
result = []
if self._script.chat.isInBuddyList(obj):
# The Pidgin buddy list consists of two columns. The
# column which is set as the expander column and which
# also contains the node relationship is hidden. Hidden
# columns are not included among a table's columns. The
# hidden object of interest seems to always immediately
# precede the visible object.
#
expanderCell = obj.parent[obj.getIndexInParent() - 1]
if expanderCell:
result.extend(
speech_generator.SpeechGenerator._generateExpandableState(
self, expanderCell, **args))
else:
result.extend(
speech_generator.SpeechGenerator._generateExpandableState(
self, obj, **args))
else:
result.extend(
speech_generator.SpeechGenerator._generateExpandableState(
self, obj, **args))
return result
def _generateNumberOfChildren(self, obj, **args):
result = []
if self._script.chat.isInBuddyList(obj):
# The Pidgin buddy list consists of two columns. The
# column which is set as the expander column and which
# also contains the node relationship is hidden. Hidden
# columns are not included among a table's columns. The
# hidden object of interest seems to always immediately
# precede the visible object.
#
expanderCell = obj.parent[obj.getIndexInParent() - 1]
if expanderCell:
result.extend(
speech_generator.SpeechGenerator._generateNumberOfChildren(
self, expanderCell, **args))
else:
result.extend(
speech_generator.SpeechGenerator._generateNumberOfChildren(
self, obj, **args))
else:
result.extend(
speech_generator.SpeechGenerator._generateNumberOfChildren(
self, obj, **args))
return result
|
sharhar/USB-Thing | refs/heads/master | UpdaterFiles/Lib/python-3.5.1.amd64/Lib/sqlite3/dump.py | 149 | # Mimic the sqlite3 console shell's .dump command
# Author: Paul Kippes <kippesp@gmail.com>
# Every identifier in sql is quoted based on a comment in sqlite
# documentation "SQLite adds new keywords from time to time when it
# takes on new features. So to prevent your code from being broken by
# future enhancements, you should normally quote any identifier that
# is an English language word, even if you do not have to."
def _iterdump(connection):
"""
Returns an iterator to the dump of the database in an SQL text format.
Used to produce an SQL dump of the database. Useful to save an in-memory
database for later restoration. This function should not be called
directly but instead called from the Connection method, iterdump().
"""
cu = connection.cursor()
yield('BEGIN TRANSACTION;')
# sqlite_master table contains the SQL CREATE statements for the database.
q = """
SELECT "name", "type", "sql"
FROM "sqlite_master"
WHERE "sql" NOT NULL AND
"type" == 'table'
ORDER BY "name"
"""
schema_res = cu.execute(q)
for table_name, type, sql in schema_res.fetchall():
if table_name == 'sqlite_sequence':
yield('DELETE FROM "sqlite_sequence";')
elif table_name == 'sqlite_stat1':
yield('ANALYZE "sqlite_master";')
elif table_name.startswith('sqlite_'):
continue
# NOTE: Virtual table support not implemented
#elif sql.startswith('CREATE VIRTUAL TABLE'):
# qtable = table_name.replace("'", "''")
# yield("INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql)"\
# "VALUES('table','{0}','{0}',0,'{1}');".format(
# qtable,
# sql.replace("''")))
else:
yield('{0};'.format(sql))
# Build the insert statement for each row of the current table
table_name_ident = table_name.replace('"', '""')
res = cu.execute('PRAGMA table_info("{0}")'.format(table_name_ident))
column_names = [str(table_info[1]) for table_info in res.fetchall()]
q = """SELECT 'INSERT INTO "{0}" VALUES({1})' FROM "{0}";""".format(
table_name_ident,
",".join("""'||quote("{0}")||'""".format(col.replace('"', '""')) for col in column_names))
query_res = cu.execute(q)
for row in query_res:
yield("{0};".format(row[0]))
# Now when the type is 'index', 'trigger', or 'view'
q = """
SELECT "name", "type", "sql"
FROM "sqlite_master"
WHERE "sql" NOT NULL AND
"type" IN ('index', 'trigger', 'view')
"""
schema_res = cu.execute(q)
for name, type, sql in schema_res.fetchall():
yield('{0};'.format(sql))
yield('COMMIT;')
|
hychrisli/PyAlgorithms | refs/heads/master | src/tests/part2/q210_test_course_schedule_ii.py | 1 | from src.base.test_cases import TestCases
class CourseScheduleIiTestCases(TestCases):
def __init__(self):
super(CourseScheduleIiTestCases, self).__init__()
self.__add_test_case__('Test 1', (2, [[1,0]]), ([0, 1],))
self.__add_test_case__('Test 2', (4, [[1,0],[2,0],[3,1],[3,2]]), ([0, 1, 2, 3], [0,2,1,3]))
self.__add_test_case__('Test 3', (4, [[1,0],[2,0],[3,1],[2,3]]), ([0, 1, 3, 2], ))
self.__add_test_case__('Test 4', (4, [[1, 0], [1, 2], [3, 1], [2, 3]]), ([],))
self.__add_test_case__('Test 5', (1, []), ([0],))
self.__add_test_case__('Test 6', (2, []), ([1,0], [0,1]))
self.__add_test_case__('Test 7', (3, [[1, 0]]), ([0, 1, 2], [0, 2, 1]))
|
imruahmed/microblog | refs/heads/master | flask/lib/python2.7/site-packages/sqlalchemy/dialects/sybase/pysybase.py | 80 | # sybase/pysybase.py
# Copyright (C) 2010-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase+pysybase
:name: Python-Sybase
:dbapi: Sybase
:connectstring: sybase+pysybase://<username>:<password>@<dsn>/\
[database name]
:url: http://python-sybase.sourceforge.net/
Unicode Support
---------------
The python-sybase driver does not appear to support non-ASCII strings of any
kind at this time.
"""
from sqlalchemy import types as sqltypes, processors
from sqlalchemy.dialects.sybase.base import SybaseDialect, \
SybaseExecutionContext, SybaseSQLCompiler
class _SybNumeric(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class SybaseExecutionContext_pysybase(SybaseExecutionContext):
def set_ddl_autocommit(self, dbapi_connection, value):
if value:
# call commit() on the Sybase connection directly,
# to avoid any side effects of calling a Connection
# transactional method inside of pre_exec()
dbapi_connection.commit()
def pre_exec(self):
SybaseExecutionContext.pre_exec(self)
for param in self.parameters:
for key in list(param):
param["@" + key] = param[key]
del param[key]
class SybaseSQLCompiler_pysybase(SybaseSQLCompiler):
def bindparam_string(self, name, **kw):
return "@" + name
class SybaseDialect_pysybase(SybaseDialect):
driver = 'pysybase'
execution_ctx_cls = SybaseExecutionContext_pysybase
statement_compiler = SybaseSQLCompiler_pysybase
colspecs = {
sqltypes.Numeric: _SybNumeric,
sqltypes.Float: sqltypes.Float
}
@classmethod
def dbapi(cls):
import Sybase
return Sybase
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user', password='passwd')
return ([opts.pop('host')], opts)
def do_executemany(self, cursor, statement, parameters, context=None):
# calling python-sybase executemany yields:
# TypeError: string too long for buffer
for param in parameters:
cursor.execute(statement, param)
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version_number")
# i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0),
# (12, 5, 0, 0)
return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, (self.dbapi.OperationalError,
self.dbapi.ProgrammingError)):
msg = str(e)
return ('Unable to complete network request to host' in msg or
'Invalid connection state' in msg or
'Invalid cursor state' in msg)
else:
return False
dialect = SybaseDialect_pysybase
|
Som-Energia/somenergia-generationkwh | refs/heads/master | som_generationkwh/somenergia_soci.py | 1 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from osv import osv, fields
from tools.translate import _
from tools import config
from oorq.decorators import job
from datetime import datetime, date
def field_function(ff_func):
def string_key(*args, **kwargs):
res = ff_func(*args, **kwargs)
ctx = kwargs.get('context', None)
if ctx or ctx is None:
return res
if not ctx.get('xmlrpc', False):
return res
return dict([(str(key), value) for key, value in res.tuples])
return string_key
class SomenergiaSoci(osv.osv):
""" Class to manage GkWh info in User interface"""
_name = 'somenergia.soci'
_inherit = 'somenergia.soci'
@field_function
def _ff_investments(self, cursor, uid, ids, field_names, args,
context=None):
""" Check's if a member any gkwh investment"""
invest_obj = self.pool.get('generationkwh.investment')
kwhpershare_obj = self.pool.get('generationkwh.kwh.per.share')
if context is None:
context = {}
if not isinstance(ids, (tuple, list)):
ids = [ids]
init_dict = dict([(f, False) for f in field_names])
res = {}.fromkeys(ids, {})
for k in res.keys():
res[k] = init_dict.copy()
for member_id in ids:
investments = invest_obj.effective_investments(
cursor, uid, member_id, None, None, context=context
)
member_data = res[member_id]
if 'has_gkwh' in field_names:
member_data['has_gkwh'] = len(investments) > 0
# Current investments
total_fields = ['active_shares', 'estimated_anual_kwh']
if set(total_fields).intersection(field_names):
today = date.today().strftime('%Y-%m-%d')
current_investments = invest_obj.effective_investments(
cursor, uid, member_id, today, today, context=context
)
total_investments = sum([i.shares for i in current_investments])
if 'active_shares' in field_names:
member_data['active_shares'] = total_investments
if 'estimated_anual_kwh' in field_names:
kwhpershare = kwhpershare_obj.get_kwh_per_date(
cursor, uid, context=context
)
total_kwh = kwhpershare * total_investments
member_data['estimated_anual_kwh'] = total_kwh
return res
def _search_has_gkwh(self, cursor, uid, obj, field_name, args,
context=None):
""" Search has_gkwh members"""
sql = """SELECT distinct(member_id)
FROM generationkwh_investment WHERE emission_id IN
( SELECT id from generationkwh_emission where type = 'genkwh')"""
cursor.execute(sql)
vals = [v[0] for v in cursor.fetchall()]
return [('id', 'in', vals)]
def add_gkwh_comment(self, cursor, uid, member_id, text, context=None):
""" Adds register logs in gkwh_comments"""
Users = self.pool.get('res.users')
member_vals = self.read(cursor, uid, member_id, ['gkwh_comments'])
user_vals = Users.read(cursor, uid, uid, ['name', 'login'])
header_tmpl = (
u"\n----- {0} - {1} ------------------------------------\n"
)
header = header_tmpl.format(
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
user_vals['name'],
)
comment_txt = u"{0}{1}\n".format(header, text)
comments = comment_txt + (member_vals['gkwh_comments'] or '')
self.write(cursor, uid, member_id, {'gkwh_comments': comments})
return comment_txt
def poweremail_write_callback(self, cursor, uid, ids, vals, context=None):
"""Hook que cridarà el poweremail quan es modifiqui un email
a partir d'un soci.
"""
if context is None:
context = {}
# super(SomenergiaSoci, self).poweremail_write_callback(
# cursor, uid, vals, context=context
# )
imd_model = self.pool.get('ir.model.data')
model, template_id = imd_model.get_object_reference(
cursor, uid, 'som_generationkwh',
'generationkwh_assignment_notification_mail'
)
if template_id == int(context.get('template_id', '0')):
for member_id in ids:
member_vals = {'gkwh_assignment_notified': True}
self.write(cursor, uid, member_id, member_vals, context=context)
return True
@job(queue="mailchimp_tasks")
def arxiva_socia_mailchimp_async(self, cursor, uid, ids, context=None):
"""
Archive member async method
"""
return self.arxiva_socia_mailchimp(cursor, uid, ids, context=context)
def arxiva_socia_mailchimp(self, cursor, uid, ids, context=None):
import mailchimp_marketing as MailchimpMarketing
if not isinstance(ids, (list, tuple)):
ids = [ids]
MAILCHIMP_CLIENT = MailchimpMarketing.Client(
dict(api_key=config.options.get('mailchimp_apikey'),
server=config.options.get('mailchimp_server_prefix')
))
conf_obj = self.pool.get('res.config')
res_partner_obj = self.pool.get('res.partner')
res_partner_address_obj = self.pool.get('res.partner.address')
list_name = conf_obj.get(
cursor, uid, 'mailchimp_socis_list', None)
list_id = res_partner_address_obj.get_mailchimp_list_id(list_name, MAILCHIMP_CLIENT)
for partner_id in self.read(cursor, uid, ids,['partner_id']):
address_list = res_partner_obj.read(cursor, uid, partner_id['partner_id'][0], ['address'])['address']
res_partner_address_obj.archieve_mail_in_list(cursor, uid, address_list, list_id, MAILCHIMP_CLIENT)
def verifica_baixa_soci(self, cursor, uid, ids, context=None):
# - Comprovar si té generationkwh: Existeix atribut al model generation que ho indica. Altrament es poden buscar les inversions.
# - Comprovar si té inversions vigents: Buscar inversions vigents.
# - Comprovar si té contractes actius: Buscar contractes vigents.
# - Comprovar si té Factures pendents de pagament: Per a aquesta comprovació hi ha una tasca feta a la OV que ens pot ajudar feta per en Fran a la següent PR: https://github.com/gisce/erp/pull/7997/files
"""Mètode per donar de baixa un soci.
"""
if not context:
context = {}
if not ids:
return
if isinstance(ids, (int, long)):
ids = [ids]
if len(ids) != 1:
raise osv.except_osv(_('Com ha minim es necessita un soci'))
imd_obj = self.pool.get('ir.model.data')
invest_obj = self.pool.get('generationkwh.investment')
pol_obj = self.pool.get('giscedata.polissa')
fact_obj = self.pool.get('giscedata.facturacio.factura')
soci_obj = self.pool.get('somenergia.soci')
today = datetime.today().strftime('%Y-%m-%d')
member_id = ids[0]
res_partner_id = soci_obj.read(cursor, uid, member_id, ['partner_id'])['partner_id'][0]
baixa = soci_obj.read(cursor, uid, [member_id], ['baixa'])[0]['baixa']
if baixa:
raise osv.except_osv(_('El soci no pot ser donat de baixa!'),
_('Ja ha estat donat de baixa anteriorment!'))
gen_invest = invest_obj.search(cursor, uid, [('member_id', '=', member_id),
('emission_id', '=', 1),
('last_effective_date', '>=', today)])
if gen_invest:
raise osv.except_osv(_('El soci no pot ser donat de baixa!'),
_('El soci té inversions de generation actives.'))
apo_invest = invest_obj.search(cursor, uid, [('member_id', '=', member_id),
('emission_id', '=', 2),
'|', ('last_effective_date', '=', False),
('last_effective_date', '>=', today)])
if apo_invest:
raise osv.except_osv(_('El soci no pot ser donat de baixa!'), _('El soci té aportacions actives.'))
factures_pendents = fact_obj.search(cursor, uid, [('partner_id', '=', res_partner_id),
('state', 'not in', ['cancel', 'paid']),
('type', '=', 'out_invoice')])
if factures_pendents:
raise osv.except_osv(_('El soci no pot ser donat de baixa!'), _('El soci té factures pendents.'))
polisses = pol_obj.search(cursor, uid,
[('titular', '=', res_partner_id),
('state', '!=', 'baixa'),
('state', '!=', 'cancelada')])
if polisses:
raise osv.except_osv(_('El soci no pot ser donat de baixa!'), _('El soci té al menys un contracte actiu.'))
soci_category_id = imd_obj.get_object_reference(
cursor, uid, 'som_partner_account', 'res_partner_category_soci'
)[1]
def delete_rel(cursor, uid, categ_id, res_partner_id):
cursor.execute('delete from res_partner_category_rel where category_id=%s and partner_id=%s',(categ_id, res_partner_id))
res_users = self.pool.get('res.users')
usuari = res_users.read(cursor, uid, uid, ['name'])['name']
old_comment = soci_obj.read(cursor, uid, [member_id], ['comment'])[0]['comment']
old_comment = old_comment + '\n' if old_comment else ''
comment = "{}Baixa efectuada a data {} per: {}".format(old_comment, today, usuari)
soci_obj.write(cursor, uid, [member_id], {'baixa': True,
'data_baixa_soci': today,
'comment': comment })
delete_rel(cursor, uid, soci_category_id, res_partner_id)
self.arxiva_socia_mailchimp_async(cursor, uid, member_id)
return True
_columns = {
'has_gkwh': fields.function(
_ff_investments, string='Te drets GkWh', readonly=True,
fnct_search=_search_has_gkwh, type='boolean', method=True,
multi='investments'
),
'active_shares': fields.function(
_ff_investments, string='Accions actives', readonly=True,
type='integer', method=True,
multi='investments'
),
'estimated_anual_kwh': fields.function(
_ff_investments, string='Previsió de kWh anual', readonly=True,
type='integer', method=True,
multi='investments'
),
'investment_ids': fields.one2many(
'generationkwh.investment', 'member_id', string="Inversions",
readonly=True,
context={'active_test': False},
),
'assignment_ids': fields.one2many(
'generationkwh.assignment', 'member_id', string="Assignacions"
),
'gkwh_assignment_notified': fields.boolean(
'Assignació per defecte notificada',
help=u"Indica que ja s'ha notificat l'assignació per defecte. "
u"S'activa quan s'envia el mail"
),
'gkwh_comments': fields.text('Observacions'),
}
_defaults = {
'gkwh_assignment_notified': lambda *a: False,
}
SomenergiaSoci()
class GenerationkWhkWhxShare(osv.osv):
_name = "generationkwh.kwh.per.share"
_order = "version_start_date DESC"
def get_kwh_per_date(self, cursor, uid, date=None, context=None):
""" Returns kwh on date
:param date: date of calc. today if None
:return: kwh per share on date
"""
if date is None:
date = datetime.today().strftime("%Y-%m-%d")
v_ids = self.search(
cursor, uid, [
('version_start_date', '<=', date)
], order='version_start_date desc'
)
return self.read(cursor, uid, v_ids[0], ['kwh'])['kwh']
_columns = {
'version_start_date': fields.date(u"Data Valor"),
'kwh': fields.integer(u"kWh per acció"),
}
GenerationkWhkWhxShare()
|
navotsil/Open-Knesset | refs/heads/master | simple/management/commands/syncvotes.py | 9 | from django.core.management.base import NoArgsCommand
from django.http import HttpRequest
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from laws.views import VoteCsvView
class Command(NoArgsCommand):
help = "Updates votes.csv file in media"
def handle_noargs(self,**options):
# create objects for request
viewObj = VoteCsvView()
viewReq = HttpRequest()
# execute request
viewRes = viewObj.dispatch(viewReq)
# write result to media file
outputFile = ContentFile(viewRes.content)
filewithpath = VoteCsvView.filename
# remove existing file
if default_storage.exists(filewithpath):
default_storage.delete(filewithpath)
# store the file
default_storage.save(filewithpath,outputFile)
|
EUDAT-B2SHARE/invenio-old | refs/heads/next | modules/bibmatch/lib/bibmatch_validator.py | 9 | ## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibMatch - tool to match records with database content of
an Invenio instance, either locally or remotely.
bibmatch_validator - module containing functions for match validation step
"""
__revision__ = "$Id$"
import re
import sys
import pprint
import difflib
from invenio.config import CFG_BIBMATCH_MATCH_VALIDATION_RULESETS, \
CFG_BIBMATCH_FUZZY_MATCH_VALIDATION_LIMIT
from invenio.bibmatch_config import CFG_BIBMATCH_VALIDATION_MATCHING_MODES, \
CFG_BIBMATCH_VALIDATION_RESULT_MODES, \
CFG_BIBMATCH_VALIDATION_COMPARISON_MODES, \
CFG_BIBMATCH_LOGGER
from invenio.bibrecord import create_records, record_get_field_values
from invenio.xmlmarc2textmarc import get_sysno_from_record, create_marc_record
from invenio.bibauthorid_name_utils import compare_names
from invenio.bibauthorid_name_utils import string_partition
from invenio.textutils import translate_to_ascii
re_valid_tag = re.compile("^[0-9]{3}[a-zA-Z0-9_%]{0,3}$")
def validate_matches(bibmatch_recid, record, server, result_recids, \
collections="", verbose=0, ascii_mode=False):
"""
Perform record validation on a set of matches. This function will
try to find any search-result that "really" is a correct match, based on
various methods defined in a given rule-set. See more about rule-sets in
validate_match() function documentation.
This function will return a tuple containing a list of all record IDs
satisfying the count of field matching needed for exact matches and a
similar list for fuzzy matches that has less fields matching then the
threshold. Records that are not matching at all are simply left out of
the lists.
@param bibmatch_recid: Current record number. Used for logging.
@type bibmatch_recid: int
@param record: bibrec structure of original record
@type record: dict
@param server: InvenioConnector object to matched record source repository
@type server: InvenioConnector object
@param result_recids: the list of record ids from search result.
@type result_recids: list
@param collections: list of collections to search, if specified
@type collections: list
@param verbose: be loud
@type verbose: int
@param ascii_mode: True to transform values to its ascii representation
@type ascii_mode: bool
@return: list of record IDs matched
@rtype: list
"""
matches_found = []
fuzzy_matches_found = []
# Generate final rule-set by analyzing the record
final_ruleset = get_validation_ruleset(record)
if not final_ruleset:
sys.stderr.write("Bad configuration rule-set. \
Please check that CFG_BIBMATCH_MATCH_VALIDATION_RULESETS is formed correctly.\n")
return [], []
if verbose > 8:
sys.stderr.write("\nStart record validation:\n\nFinal validation ruleset used:\n")
pp = pprint.PrettyPrinter(stream=sys.stderr, indent=2)
pp.pprint(final_ruleset)
# Fetch all records in MARCXML and convert to BibRec
found_record_list = []
for recid in result_recids:
query = "001:%d" % (recid,)
if collections:
search_params = dict(p=query, of="xm", c=collections)
else:
search_params = dict(p=query, of="xm")
result_marcxml = server.search_with_retry(**search_params)
result_record_list = create_records(result_marcxml)
# Check if record was found and BibRecord generation was successful
if result_record_list == [] or \
len(result_record_list) != 1 or \
result_record_list[0] == None:
# Error fetching a record. Unable to validate and returning with empty list.
if verbose > 8:
sys.stderr.write("\nError retrieving MARCXML for matched record %s\n" % (str(recid),))
return [], []
# Add a tuple of record ID (for easy look-up later) and BibRecord structure
found_record_list.append((recid, result_record_list[0][0]))
# Validate records one-by-one, adding any matches to the list of matching record IDs
current_index = 1
for recid, matched_record in found_record_list:
if verbose > 8:
sys.stderr.write("\n Validating matched record #%d (%s):\n" % \
(current_index, recid))
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Comparing to matched record %s" % \
(bibmatch_recid, recid))
match_ratio = validate_match(record, matched_record, final_ruleset, \
verbose, ascii_mode)
if match_ratio == 1.0:
# All matches were a success, this is an exact match
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Exact match found -> %s" % (bibmatch_recid, recid))
matches_found.append(recid)
elif match_ratio >= CFG_BIBMATCH_FUZZY_MATCH_VALIDATION_LIMIT:
# This means that some matches failed, but some succeeded as well. That's fuzzy...
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Fuzzy match found -> %s" % \
(bibmatch_recid, recid))
fuzzy_matches_found.append(recid)
else:
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Not a match" % (bibmatch_recid,))
current_index += 1
# Return list of matching record IDs
return matches_found, fuzzy_matches_found
def validate_match(org_record, matched_record, ruleset, verbose=0, ascii_mode=False):
"""
This function will try to match the original record with matched record.
This comparison uses various methods defined in configuration and/or
determined from the source record.
These methods can be derived from each rule-set defined, which contains a
mapping of a certain pattern to a list of rules defining the "match-strategy".
For example:
('260__', [{ 'tags' : '260__c',
'threshold' : 0.8,
'compare_mode' : 'lazy',
'match_mode' : 'date',
'result_mode' : 'normal' }])
Quick run-down of possible values:
Compare mode:
'strict' : all (sub-)fields are compared, and all must match. Order is significant.
'normal' : all (sub-)fields are compared, and all must match. Order is ignored.
'lazy' : all (sub-)fields are compared with each other and at least one must match
'ignored' : the tag is ignored in the match. Used to disable previously defined rules.
Match mode:
'title' : uses a method specialized for comparing titles, e.g. looking for subtitles
'author' : uses a special authorname comparison. Will take initials into account.
'identifier': special matching for identifiers, stripping away punctuation
'date' : matches dates by extracting and comparing the year
'normal' : normal string comparison.
Result mode:
'normal' : a failed match will cause the validation to continue on other rules (if any)
a successful match will cause the validation to continue on other rules (if any)
'final' : a failed match will cause the validation to immediately exit as a failure.
a successful match will cause validation to immediately exit as a success.
'joker' : a failed match will cause the validation to continue on other rules (if any).
a successful match will cause validation to immediately exit as a success.
Fields are considered matching when all its subfields or values match. ALL matching strategy
must return successfully for a match to be validated (except for 'joker' mode).
@param org_record: bibrec structure of original record
@type org_record: dict
@param matched_record: bibrec structure of matched record
@type matched_record: dict
@param ruleset: the default rule-set {tag: strategy,..} used when validating
@type ruleset: dict
@param verbose: be loud
@type verbose: int
@param ascii_mode: True to transform values to its ascii representation
@type ascii_mode: bool
@return: Number of matches succeeded divided by number of comparisons done. At least two
successful matches must be done unless a joker or final match is found
@rtype: float
"""
total_number_of_matches = 0
total_number_of_comparisons = 0
for field_tags, threshold, compare_mode, match_mode, result_mode in ruleset:
field_tag_list = field_tags.split(',')
if verbose > 8:
sys.stderr.write("\nValidating tags: %s in parsing mode '%s' and comparison\
mode '%s' as '%s' result with threshold %0.2f\n" \
% (field_tag_list, compare_mode, match_mode, \
result_mode, threshold))
current_matching_status = False
## 1. COMPARE MODE
# Fetch defined fields from both records
original_record_values = []
matched_record_values = []
for field_tag in field_tag_list:
tag_structure = validate_tag(field_tag)
if tag_structure != None:
tag, ind1, ind2, code = tag_structure
# Fetch all field instances to match
original_record_values.extend(record_get_field_values(\
org_record, tag, ind1, ind2, code))
matched_record_values.extend(record_get_field_values(\
matched_record, tag, ind1, ind2, code))
if (len(original_record_values) == 0 or len(matched_record_values) == 0):
# Any or both records do not have values, ignore.
if verbose > 8:
sys.stderr.write("\nBoth records do not have this field. Continue.\n")
continue
if ascii_mode:
original_record_values = translate_to_ascii(original_record_values)
matched_record_values = translate_to_ascii(matched_record_values)
ignore_order = True
matches_needed = 0
# How many field-value matches are needed for successful validation of this record
if compare_mode == 'lazy':
# 'lazy' : all fields are matched with each other, if any match = success
matches_needed = 1
elif compare_mode == 'normal':
# 'normal' : all fields are compared, and all must match.
# Order is ignored. The number of matches needed is equal
# to the value count of original record
matches_needed = len(original_record_values)
elif compare_mode == 'strict':
# 'strict' : all fields are compared, and all must match. Order matters.
if len(original_record_values) != len(matched_record_values):
# Not the same number of fields, not a valid match
# Unless this is a joker, we return indicating failure
if result_mode != 'joker':
return 0.0
continue
matches_needed = len(original_record_values)
ignore_order = False
if verbose > 8:
sys.stderr.write("Total matches needed: %d -> " % (matches_needed,))
## 2. MATCH MODE
total_number_of_comparisons += 1
comparison_function = None
if match_mode == 'title':
# Special title mode
comparison_function = compare_fieldvalues_title
elif match_mode == 'author':
# Special author mode
comparison_function = compare_fieldvalues_authorname
elif match_mode == 'identifier':
# Special identifier mode
comparison_function = compare_fieldvalues_identifier
elif match_mode == 'date':
# Special identifier mode
comparison_function = compare_fieldvalues_date
else:
# Normal mode
comparison_function = compare_fieldvalues_normal
# Get list of comparisons to perform containing extracted values
field_comparisons = get_paired_comparisons(original_record_values, \
matched_record_values, \
ignore_order)
if verbose > 8:
sys.stderr.write("Field comparison values:\n%s\n" % (field_comparisons,))
# Run comparisons according to match_mode
current_matching_status, matches = comparison_function(field_comparisons, \
threshold, \
matches_needed)
CFG_BIBMATCH_LOGGER.info("-- Comparing fields %s with %s = %d matches of %d" % \
(str(original_record_values), \
str(matched_record_values), \
matches, matches_needed))
## 3. RESULT MODE
if current_matching_status:
if verbose > 8:
sys.stderr.write("Fields matched successfully.\n")
if result_mode in ['final', 'joker']:
# Matching success. Return 5,5 indicating exact-match when final or joker.
return 1.0
total_number_of_matches += 1
else:
# Matching failed. Not a valid match
if result_mode == 'final':
# Final does not allow failure
return 0.0
elif result_mode == 'joker':
# Jokers looks count as a match even if its not
total_number_of_matches += 1
if verbose > 8:
sys.stderr.write("Fields not matching. (Joker)\n")
else:
if verbose > 8:
sys.stderr.write("Fields not matching. \n")
if total_number_of_matches < 2 or total_number_of_comparisons == 0:
return 0.0
return total_number_of_matches / float(total_number_of_comparisons)
def transform_record_to_marc(record, options={'text-marc':1, 'aleph-marc':0}):
""" This function will transform a given bibrec record into marc using
methods from xmlmarc2textmarc in invenio.textutils. The function returns the
record as a MARC string.
@param record: bibrec structure for record to transform
@type record: dict
@param options: dictionary describing type of MARC record. Defaults to textmarc.
@type options: dict
@return resulting MARC record as string """
sysno = get_sysno_from_record(record, options)
# Note: Record dict is copied as create_marc_record() perform deletions
return create_marc_record(record.copy(), sysno, options)
def compare_fieldvalues_normal(field_comparisons, threshold, matches_needed):
"""
Performs field validation given an list of field comparisons using a standard
normalized string distance metric. Each comparison is done according to given
threshold which the normalized result must be equal or above to match.
Before the values are compared they will be massaged by putting all values
lower-case and any leading/trailing spaces are removed.
During validation the fields are compared and matches are counted per
field, up to the given amount of matches needed is met, causing the
function to return True. If validation ends before this threshold is met
it will return False.
@param field_comparisons: list of comparisons, each which contains a list
of field-value to field-value comparisons.
@type field_comparisons: list
@param threshold: number describing the match threshold a comparison must
exceed to become a positive match.
@type threshold: float
@param matches_needed: number of positive field matches needed for the entire
comparison process to give a positive result.
@type matches_needed: int
@return: tuple of matching result, True if enough matches are found, False if not,
and number of matches.
@rtype: tuple
"""
matches_found = 0
# Loop over all possible comparisons field by field, if a match is found,
# we are done with this field and break out to try and match next field.
for comparisons in field_comparisons:
for value, other_value in comparisons:
# Value matching - put values in lower case and strip leading/trailing spaces
diff = difflib.SequenceMatcher(None, value.lower().strip(), \
other_value.lower().strip()).ratio()
if diff >= threshold:
matches_found += 1
break
# If we already have found required number of matches, we return immediately
if matches_found >= matches_needed:
return True, matches_found
return matches_found >= matches_needed, matches_found
def compare_fieldvalues_authorname(field_comparisons, threshold, matches_needed):
"""
Performs field validation given an list of field comparisons using a technique
that is meant for author-names taking into account initials vs. full-name,
using matching techniques available from BibAuthorId.
Each comparison is done according to given threshold which the result must
be equal or above to match.
During validation the fields are compared and matches are counted per
field, up to the given amount of matches needed is met, causing the
function to return True. If validation ends before this threshold is met
it will return False.
@param field_comparisons: list of comparisons, each which contains a list
of field-value to field-value comparisons.
@type field_comparisons: list
@param threshold: number describing the match threshold a comparison must
exceed to become a positive match.
@type threshold: float
@param matches_needed: number of positive field matches needed for the entire
comparison process to give a positive result.
@type matches_needed: int
@return: tuple of matching result, True if enough matches are found, False if not,
and number of matches.
@rtype: tuple
"""
matches_found = 0
# Loop over all possible comparisons field by field, if a match is found,
# we are done with this field and break out to try and match next field.
for comparisons in field_comparisons:
for value, other_value in comparisons:
# Grab both permutations of a name (before, after and after, before)
# and compare to each unique commutative combination. Ex:
# Doe,J vs. Smith,J -> [(('Smith,J', 'Doe,J'), ('Smith,J', 'J,Doe')),
# (('J,Smith', 'Doe,J'), ('J,Smith', 'J,Doe'))]
author_comparisons = [pair for pair in get_paired_comparisons(\
get_reversed_string_variants(value), \
get_reversed_string_variants(other_value))][0]
for str1, str2 in author_comparisons:
# Author-name comparison - using BibAuthorid function
diff = compare_names(str1, str2)
if diff >= threshold:
matches_found += 1
break
else:
# We continue as no match was found
continue
# We break out as a match was found
break
# If we already have found required number of matches, we return immediately
if matches_found >= matches_needed:
return True, matches_found
# Often authors are not matching fully, so lets allow for the number of matches to
# be a little lower, using the same threshold
result = matches_found >= matches_needed or matches_found / float(matches_needed) > threshold
return result, matches_found
def compare_fieldvalues_identifier(field_comparisons, threshold, matches_needed):
"""
Performs field validation given an list of field comparisons using a method to
normalize identifiers for comparisons. For example by removing hyphens and other
symbols.
Each comparison is done according to given threshold which the normalized
result must be equal or above to match. Before the values are compared they will be
converted to lower-case.
During validation the fields are compared and matches are counted per
field, up to the given amount of matches needed is met, causing the
function to return True. If validation ends before this threshold is met
it will return False.
@param field_comparisons: list of comparisons, each which contains a list
of field-value to field-value comparisons.
@type field_comparisons: list
@param threshold: number describing the match threshold a comparison must
exceed to become a positive match.
@type threshold: float
@param matches_needed: number of positive field matches needed for the entire
comparison process to give a positive result.
@type matches_needed: int
@return: tuple of matching result, True if enough matches are found, False if not,
and number of matches.
@rtype: tuple
"""
matches_found = 0
# Loop over all possible comparisons field by field, if a match is found,
# we are done with this field and break out to try and match next field.
for comparisons in field_comparisons:
for value, other_value in comparisons:
# Value matching - put values in lower case and remove punctuation
# and trailing zeroes. 'DESY-F35D-97-04' -> 'DESYF35D974'
value = re.sub('\D[0]|\W+', "", value.lower())
other_value = re.sub('\D[0]|\W+', "", other_value.lower())
diff = difflib.SequenceMatcher(None, value, other_value).ratio()
if diff >= threshold:
matches_found += 1
break
# If we already have found required number of matches, we return immediately
if matches_found >= matches_needed:
return True, matches_found
return matches_found >= matches_needed, matches_found
def compare_fieldvalues_title(field_comparisons, threshold, matches_needed):
"""
Performs field validation given an list of field comparisons using a method
specialized for comparing titles. For example by looking for possible
concatenated title and subtitles or having a KB of common word aliases.
Each comparison is done according to given threshold which the normalized
result must be equal or above to match.
Before the values are compared they will be massaged by putting all values
lower-case and any leading/trailing spaces are removed.
During validation the fields are compared and matches are counted per
field, up to the given amount of matches needed is met, causing the
function to return True. If validation ends before this threshold is met
it will return False.
@param field_comparisons: list of comparisons, each which contains a list
of field-value to field-value comparisons.
@type field_comparisons: list
@param threshold: number describing the match threshold a comparison must
exceed to become a positive match.
@type threshold: float
@param matches_needed: number of positive field matches needed for the entire
comparison process to give a positive result.
@type matches_needed: int
@return: tuple of matching result, True if enough matches are found, False if not,
and number of matches.
@rtype: tuple
"""
matches_found = 0
# Loop over all possible comparisons field by field, if a match is found,
# we are done with this field and break out to try and match next field.
for comparisons in field_comparisons:
for value, other_value in comparisons:
# TODO: KB of alias mappings of common names
title_comparisons = [pair for pair in _get_grouped_pairs(\
get_separated_string_variants(value), \
get_separated_string_variants(other_value))][0]
for str1, str2 in title_comparisons:
# Title comparison
diff = difflib.SequenceMatcher(None, str1.lower().strip(), \
str2.lower().strip()).ratio()
if diff >= threshold:
matches_found += 1
break
else:
# We continue as no match was found
continue
# We break out as a match was found
break
# If we already have found required number of matches, we return immediately
if matches_found >= matches_needed:
return True, matches_found
return matches_found >= matches_needed, matches_found
def compare_fieldvalues_date(field_comparisons, threshold, matches_needed):
"""
Performs field validation given an list of field comparisons specialized
towards matching dates. Each comparison is done according to given
threshold which the final result must be equal or above to match.
During validation the fields are compared and matches are counted per
field, up to the given amount of matches needed is met, causing the
function to return True. If validation ends before this threshold is met
it will return False.
@param field_comparisons: list of comparisons, each which contains a list
of field-value to field-value comparisons.
@type field_comparisons: list
@param threshold: number describing the match threshold a comparison must
exceed to become a positive match.
@type threshold: float
@param matches_needed: number of positive field matches needed for the entire
comparison process to give a positive result.
@type matches_needed: int
@return: tuple of matching result, True if enough matches are found, False if not,
and number of matches.
@rtype: tuple
"""
matches_found = 0
# Loop over all possible comparisons field by field, if a match is found,
# we are done with this field and break out to try and match next field.
for comparisons in field_comparisons:
for value, other_value in comparisons:
value_list = re.findall('[0-9]{4}', value.lower())
other_value_list = re.findall('[0-9]{4}', other_value.lower())
for year1 in value_list:
for year2 in other_value_list:
# Value matching - convert values to int
diff = compare_numbers(int(year1), int(year2))
if diff >= threshold:
matches_found += 1
break
else:
continue
break
else:
continue
break
# If we already have found required number of matches, we return immediately
if matches_found >= matches_needed:
return True, matches_found
return matches_found >= matches_needed, matches_found
def get_validation_ruleset(record):
"""
This function will iterate over any defined rule-sets in
CFG_BIBMATCH_MATCH_VALIDATION_RULESETS, generating a validation
rule-set for use when comparing records.
in the order of appearance. Meaning that the last rules will have
precedence over earlier one, should MARC tags be conflicting.
You can add your own rule-sets in invenio.conf. The 'default' rule-set
is always applied, but the tag-rules can be overwritten by other
rule-sets. The rule-sets are only allowed to be tuples of two items.
For example: ('980__ \$\$aTHESIS', { tag : (rules) })
* The first part is a string containing a regular expression
that is matched against the textmarc representation of each
record. If a match is found, the final rule-set is updated with
the given "sub rule-set", i.e. second item.
* The second item is a dict that indicates specific MARC tags with
corresponding validation rules.
@param record: bibrec record dict to analyze
@type record: dict
@return: list of ordered rule-sets
@rtype: list
"""
# Convert original record to textmarc in order to regexp search
original_record_marc = transform_record_to_marc(record)
# Lets parse the rule-set configuration to try to match rule-sets
# with original record, adding to/overwritin as we go
validation_ruleset = {}
for pattern, rules in CFG_BIBMATCH_MATCH_VALIDATION_RULESETS:
if pattern == "default" or re.search(pattern, original_record_marc) != None:
for rule in rules:
# Simple validation of rules syntax
if rule['compare_mode'] not in CFG_BIBMATCH_VALIDATION_COMPARISON_MODES:
return
if rule['match_mode'] not in CFG_BIBMATCH_VALIDATION_MATCHING_MODES:
return
if rule['result_mode'] not in CFG_BIBMATCH_VALIDATION_RESULT_MODES:
return
try:
# Update/Add rule in rule-set
validation_ruleset[rule['tags']] = (rule['threshold'], \
rule['compare_mode'], \
rule['match_mode'], \
rule['result_mode'])
except KeyError:
# Bad rule-set, return None
return
# Now generate the final list of rules in proper order, so final and joker result-modes
# are executed before normal rules. Order of precedence: final, joker, normal
final_list = []
joker_list = []
normal_list = []
for tag, (threshold, compare_mode, match_mode, result_mode) in validation_ruleset.iteritems():
if compare_mode == 'ignored' or threshold <= 0.0:
# Ignore rule
continue
if result_mode == 'final':
final_list.append((tag, threshold, compare_mode, match_mode, result_mode))
elif result_mode == 'joker':
joker_list.append((tag, threshold, compare_mode, match_mode, result_mode))
else:
normal_list.append((tag, threshold, compare_mode, match_mode, result_mode))
return final_list + joker_list + normal_list
def validate_tag(field_tag):
"""
This function will return a tuple of (tag, ind1, ind2, code) as extracted
from given string. If the tag is not deemed valid: return None.
For example: "100__a" will return ('100', '', '', 'a')
@param field_tag: field tag to extract MARC parts from
@type field_tag: string
@return: tuple of MARC tag parts, tag, ind1, ind2, code
@rtype: tuple
"""
if re_valid_tag.match(field_tag) != None:
tag = field_tag[0:3]
ind1 = field_tag[3:4]
ind2 = field_tag[4:5]
code = field_tag[5:6]
if ind1 == "_":
ind1 = ""
if ind2 == "_":
ind2 = ""
return tag, ind1, ind2, code
return None
def get_paired_comparisons(first_list, second_list, ignore_order=True):
"""
This function will return a a list of comparisons, each which contains
a list of all the possible unique item to item comparisons.
If ordering is required, the lists must be of same length and the
comparisons will be single item by item comparisons.
@param first_list: a iterable to pair with second_list items
@type first_list: iterable
@param second_list: an iterable to be paired against first_list
@type first_list: iterable
@return: the resulting iterable of pairs grouped by first_list items
@rtype: iterable
"""
if ignore_order:
# Get grouped permutations of comparisons between subfields
paired_comparisons = _get_grouped_pairs(first_list, second_list)
else:
# Must have same number of items
if len(first_list) != len(second_list):
return []
# Now prepare direct one-to-one comparisons
paired_comparisons = [((first_list[i], second_list[i]),) \
for i in range(0, len(first_list))]
return paired_comparisons
def compare_numbers(num1, num2):
"""
This function will try to compare two numbers to each other,
returning the normalized distance between them. The value
returned will be between 0.0 - 1.0, with 1.0 being a full
match, decreasing 0.1 per year in difference.
Inspired by similar function in MarcXimil
(http://marcximil.sourceforge.net/).
@param num1: the first number to compare
@type num1: int
@param num2: the second number to compare
@type num2: int
@return: the normalized equality score between 0.0 and 1.0
@rtype: float
"""
return 1.0 - (abs(num1 - num2) * 0.1)
def get_separated_string_variants(s, sep=':'):
"""
This function will return a list of all the possible combinations
of substrings of given title when separated by given separator.
For example:
"scalar tensor theory : validity of Cosmic no hair conjecture"
produces:
['scalar tensor theory ',
' validity of Cosmic no hair conjecture',
'scalar tensor theory : validity of Cosmic no hair conjecture']
It also returns variants containing several separators:
"scalar tensor theory : validity of Cosmic no hair : conjecture"
produces:
['scalar tensor theory ',
' validity of Cosmic no hair : conjecture',
'scalar tensor theory : validity of Cosmic no hair ',
' conjecture',
'scalar tensor theory : validity of Cosmic no hair : conjecture']
@param s: string to generate variants from
@type s: string
@param sep: separator that splits the string in two. Defaults to colon (:).
@type sep: string
@return: list of strings
@rtype: list
"""
string_variants = []
str_parts = s.split(sep)
start_index = 1
for dummy in str_parts:
first_part = sep.join(str_parts[:start_index])
if first_part != '':
string_variants.append(first_part)
last_part = sep.join(str_parts[start_index:])
if last_part != '':
string_variants.append(last_part)
if start_index <= len(str_parts):
start_index += 1
else:
break
return string_variants
def get_reversed_string_variants(s, sep=','):
"""
This function will return a tuple containing a pair of the original
string and the reversed version, with regards to text before/after the
separator (on first encounter of said separator).
For example, "lastname, firstname", "firstname, lastname"
@param s: string to extract pair from
@type s: string
@param sep: separator that splits the string in two. Defaults to comma (,).
@type sep: string
@return: tuple of strings
@rtype: tuple
"""
# Extract the different parts of the name using partition function.
left, sep, right = string_partition(s, sep)
return (left + sep + right, right + sep + left)
def _get_grouped_pairs(first_list, second_list):
"""
This function will return a list of grouped pairs of items from
the first list with every item in the second list.
e.g. [1,2,3],[4,5] -> [([1, 4], [1, 5]),
([2, 4], [2, 5]),
([3, 4], [3, 5])]
@param first_list: an iterable to pair with second_list items
@type first_list: iterable
@param second_list: an iterable to be paired against first_list
@type second_list: iterable
@return: the resulting iterable of pairs grouped by first_list items
@rtype: iterable
"""
pairs = []
for first_item in first_list:
pair_group = []
for second_item in second_list:
pair_group.append((first_item, second_item))
pairs.append(tuple(pair_group))
return pairs
|
chalasr/Flask-P2P | refs/heads/master | venv/lib/python2.7/site-packages/flask/testsuite/test_apps/lib/python2.5/site-packages/site_package/__init__.py | 1799 | import flask
app = flask.Flask(__name__)
|
KokareIITP/django | refs/heads/master | tests/gis_tests/test_spatialrefsys.py | 319 | import unittest
from django.contrib.gis.gdal import HAS_GDAL
from django.db import connection
from django.test import skipUnlessDBFeature
from django.utils import six
from .utils import SpatialRefSys, oracle, postgis, spatialite
test_srs = ({
'srid': 4326,
'auth_name': ('EPSG', True),
'auth_srid': 4326,
# Only the beginning, because there are differences depending on installed libs
'srtext': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84"',
# +ellps=WGS84 has been removed in the 4326 proj string in proj-4.8
'proj4_re': r'\+proj=longlat (\+ellps=WGS84 )?(\+datum=WGS84 |\+towgs84=0,0,0,0,0,0,0 )\+no_defs ',
'spheroid': 'WGS 84', 'name': 'WGS 84',
'geographic': True, 'projected': False, 'spatialite': True,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.3, 298.257223563),
'eprec': (1, 1, 9),
}, {
'srid': 32140,
'auth_name': ('EPSG', False),
'auth_srid': 32140,
'srtext': (
'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",'
'DATUM["North_American_Datum_1983",SPHEROID["GRS 1980"'
),
'proj4_re': r'\+proj=lcc \+lat_1=30.28333333333333 \+lat_2=28.38333333333333 \+lat_0=27.83333333333333 '
r'\+lon_0=-99 \+x_0=600000 \+y_0=4000000 (\+ellps=GRS80 )?'
r'(\+datum=NAD83 |\+towgs84=0,0,0,0,0,0,0 )?\+units=m \+no_defs ',
'spheroid': 'GRS 1980', 'name': 'NAD83 / Texas South Central',
'geographic': False, 'projected': True, 'spatialite': False,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.31414, 298.257222101),
'eprec': (1, 5, 10),
})
@unittest.skipUnless(HAS_GDAL, "SpatialRefSysTest needs gdal support")
@skipUnlessDBFeature("has_spatialrefsys_table")
class SpatialRefSysTest(unittest.TestCase):
def test_retrieve(self):
"""
Test retrieval of SpatialRefSys model objects.
"""
for sd in test_srs:
srs = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
if postgis or (oracle and oracle_flag):
self.assertTrue(srs.auth_name.startswith(auth_name))
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No proj.4 and different srtext on oracle backends :(
if postgis:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
six.assertRegex(self, srs.proj4text, sd['proj4_re'])
def test_osr(self):
"""
Test getting OSR objects from SpatialRefSys model objects.
"""
for sd in test_srs:
sr = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertTrue(sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
if not (spatialite and not sd['spatialite']):
# Can't get 'NAD83 / Texas South Central' from PROJ.4 string
# on SpatiaLite
self.assertTrue(sr.name.startswith(sd['name']))
# Testing the SpatialReference object directly.
if postgis or spatialite:
srs = sr.srs
six.assertRegex(self, srs.proj4, sd['proj4_re'])
# No `srtext` field in the `spatial_ref_sys` table in SpatiaLite < 4
if not spatialite or connection.ops.spatial_version[0] >= 4:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
def test_ellipsoid(self):
"""
Test the ellipsoid property.
"""
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
@skipUnlessDBFeature('supports_add_srs_entry')
def test_add_entry(self):
"""
Test adding a new entry in the SpatialRefSys model using the
add_srs_entry utility.
"""
from django.contrib.gis.utils import add_srs_entry
add_srs_entry(3857)
self.assertTrue(
SpatialRefSys.objects.filter(srid=3857).exists()
)
srs = SpatialRefSys.objects.get(srid=3857)
self.assertTrue(
SpatialRefSys.get_spheroid(srs.wkt).startswith('SPHEROID[')
)
|
martinwicke/tensorflow | refs/heads/master | tensorflow/python/debug/session_debug_testlib.py | 8 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug import debug_data
from tensorflow.python.debug import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
cls._main_device = "/job:localhost/replica:0/task:0/gpu:0"
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
tf.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def testDumpToFileOverlappingParentDir(self):
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "testDumpToFile/u"
v_name = "testDumpToFile/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name="testDumpToFile/matmul")
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
# Verify the dumped tensor values for u and v.
self.assertEqual(2, dump.size)
self.assertAllClose([u_init_val], dump.get_tensors("%s/read" % u_name, 0,
"DebugIdentity"))
self.assertAllClose([v_init_val], dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % u_name, 0, "DebugIdentity")[0], 0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % v_name, 0, "DebugIdentity")[0], 0)
def testDifferentWatchesOnDifferentRuns(self):
"""Test watching different tensors on different runs of the same graph."""
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "diff_Watch/u"
v_name = "diff_Watch/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name="diff_Watch/matmul")
u.initializer.run()
v.initializer.run()
for i in xrange(2):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_dump_root = self._debug_dump_dir(run_number=i)
debug_urls = self._debug_urls(run_number=i)
if i == 0:
# First debug run: Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
else:
# Second debug run: Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
run_dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
# Each run should have generated only one dumped tensor, not two.
self.assertEqual(1, dump.size)
if i == 0:
self.assertAllClose([u_init_val],
dump.get_tensors("%s/read" % u_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % u_name, 0,
"DebugIdentity")[0], 0)
else:
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % v_name, 0,
"DebugIdentity")[0], 0)
def testDumpStringTensorsToFileSystem(self):
with session.Session() as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.Variable(str1_init, name=str1_name)
str2 = variables.Variable(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val], dump.get_tensors("%s/read" % str1_name,
0, "DebugIdentity"))
self.assertEqual([str2_init_val], dump.get_tensors("%s/read" % str2_name,
0, "DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.Variable(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s" % u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "%s" % s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsNone(u_vals[0])
self.assertEqual(1, len(s_vals))
self.assertIsNone(s_vals[0])
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDumpToFileWhileLoop(self):
with session.Session() as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.Variable(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.Variable(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(cond, body, [i], parallel_iterations=1)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
self.assertTrue(os.path.isdir(os.path.join(self._dump_root, u_namespace)))
self.assertTrue(
os.path.isdir(os.path.join(self._dump_root, v_namespace, "v")))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val], dump.get_tensors(u_name, 0,
"DebugIdentity"))
self.assertAllClose([v_init_val], dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
for rel_time in while_id_rel_timestamps:
self.assertGreaterEqual(rel_time, prev_rel_time)
prev_rel_time = rel_time
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.Variable(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.sub(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.mul(w, x, name=y_name)
z = math_ops.mul(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def testDumpGraphStructureLookup(self):
# TODO(cais): Separate this test into multiple test methods.
with session.Session() as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(ValueError, "No node named \"foo\" exists"):
dump.node_attributes("foo")
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]),
set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.transitive_inputs(u_name + "foo")
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
# Test node_op_type().
self.assertEqual("Variable", dump.node_op_type(u_name))
self.assertEqual("Identity", dump.node_op_type(u_name + "/read"))
self.assertEqual("Add", dump.node_op_type(v_name))
self.assertEqual("Add", dump.node_op_type(w_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_op_type(u_name + "foo")
# Now load the dump again, without the parition graphs, so we can check
# the errors raised for no partition graphs loaded.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
with self.assertRaisesRegexp(RuntimeError,
"No partition graphs have been loaded"):
dump.partition_graphs()
self.assertFalse(dump.loaded_partition_graphs())
with self.assertRaisesRegexp(
RuntimeError, "Node inputs are not loaded from partition graphs yet"):
dump.node_inputs(u_name)
with self.assertRaisesRegexp(RuntimeError,
"No partition graphs have been loaded"):
dump.nodes()
with self.assertRaisesRegexp(
RuntimeError,
"Node recipients are not loaded from partition graphs yet"):
dump.node_recipients(u_name)
with self.assertRaisesRegexp(
RuntimeError, "Node inputs are not loaded from partition graphs yet"):
dump.transitive_inputs(u_name)
with self.assertRaisesRegexp(
RuntimeError, "Devices are not loaded from partition graphs yet"):
dump.devices()
with self.assertRaisesRegexp(
RuntimeError,
"Node devices are not loaded from partition graphs yet"):
dump.node_device(u_name)
with self.assertRaisesRegexp(
RuntimeError,
"Node op types are not loaded from partition graphs yet"):
dump.node_op_type(u_name)
def testDumpCausalityCheck(self):
with session.Session() as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a RuntimeError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(u_name, 0, "DebugIdentity")))
u_file_path = dump.get_tensor_file_paths(u_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
u_timestamp = int(u_file_path[u_file_path.rindex("_") + 1:])
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
# Swap the time stamps
new_u_file_path = u_file_path[:u_file_path.rindex(
"_")] + "_%d" % v_timestamp
new_v_file_path = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % u_timestamp
os.rename(u_file_path, new_u_file_path)
os.rename(v_file_path, new_v_file_path)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
def testWatchingOutputSlotWithoutOutgoingEdge(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOps(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="gdo/v")
w = math_ops.mul(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.Variable(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = tf.unique(x, name="unconnected/unique_x")
y = tf.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
result = sess.run(y, options=run_options, run_metadata=run_metadata)
self.assertAllClose([2, 4, 7], result)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testRunWithError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = tf.placeholder(tf.float32, name="mismatch/ph")
x = tf.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertFalse(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
if __name__ == "__main__":
googletest.main()
|
rmanoni/mi-instrument | refs/heads/master | mi/instrument/star_asimet/bulkmet/metbk_a/test/test_driver.py | 2 | """
@package mi.instrument.star_asimet.bulkmet.metbk_a.test.test_driver
@file marine-integrations/mi/instrument/star_aismet/bulkmet/metbk_a/test/test_driver.py
@author Bill Bollenbacher
@brief Test cases for metbk_a driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Bill Bollenbacher'
__license__ = 'Apache 2.0'
import unittest
from mock import Mock
from nose.plugins.attrib import attr
import time
from mi.core.log import get_logger ;
log = get_logger()
# MI imports.
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.idk.unit_test import InstrumentDriverQualificationTestCase
from mi.idk.unit_test import InstrumentDriverPublicationTestCase
from mi.idk.unit_test import DriverTestMixin
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.idk.unit_test import DriverStartupConfigKey
# from interface.objects import AgentCommand
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverEvent
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import InstrumentDriver
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import DataParticleType
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import Command
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import ProtocolState
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import ProtocolEvent
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import Capability
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import Parameter
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import Protocol
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import Prompt
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import NEWLINE
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import METBK_SampleDataParticleKey
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import METBK_SampleDataParticle
from mi.instrument.star_asimet.bulkmet.metbk_a.driver import METBK_StatusDataParticleKey
from mi.core.exceptions import SampleException
from mi.core.exceptions import InstrumentCommandException
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.idk.exceptions import IDKException
# Globals
raw_stream_received = False
parsed_stream_received = False
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.star_asimet.bulkmet.metbk_a.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id = 'DQPJJX',
instrument_agent_name = 'star_aismet_ooicore',
instrument_agent_packet_config = DataParticleType(),
driver_startup_config = {
DriverStartupConfigKey.PARAMETERS: {
Parameter.SAMPLE_INTERVAL: 20,
},
}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
###############################################################################
# DATA PARTICLE TEST MIXIN #
# Defines a set of assert methods used for data particle verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles.
###############################################################################
class UtilMixin(DriverTestMixin):
'''
Mixin class used for storing data particle constants and common data assertion methods.
'''
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
METBK_STATUS_DATA = "Model: NEWLGR53" + NEWLINE +\
"SerNum: 17DEC12" + NEWLINE +\
"CfgDat: 17DEC12" + NEWLINE +\
"Firmware: LOGR53 v4.11cf" + NEWLINE +\
"RTClock: 2013/05/21 13:55:51" + NEWLINE +\
"Logging Interval: 60; Current Tick: 6" + NEWLINE +\
"R-interval: 1" + NEWLINE +\
"Compact Flash Card present - Compact Flash OK!" + NEWLINE +\
"Main Battery Voltage: 12.50" + NEWLINE +\
"Failed last attempt to update PTT module" + NEWLINE +\
"TMP failed" + NEWLINE +\
"46B1BAD3E8E9FF7F9681300017D1F446ADBED76909FE7F9601200017D1F4706A" + NEWLINE +\
"46A9BED82911FE7F9601400017D1F446A5C2D668F1FE7F9581400017D1F4FFA6" + NEWLINE +\
"46A1BED628D9FE7F9581400017D1F4469DC2D7E8C1FE7F9501500017D1F40B4F" + NEWLINE +\
"Sampling GO" + NEWLINE
METBK_SAMPLE_DATA1 = "1012.53 44.543 24.090 0.0 1.12 24.240 0.0000 32788.7 -0.03 -0.02 0.0000 12.50" + NEWLINE
METBK_SAMPLE_DATA2 = "1013.53 44.543 24.090 0.0 1.12 24.240 0.0000 32788.7 -0.03 -0.02 0.0000 12.50" + NEWLINE
_driver_capabilities = {
# capabilities defined in the IOS
Capability.START_AUTOSAMPLE : {STATES: [ProtocolState.COMMAND]},
Capability.STOP_AUTOSAMPLE : {STATES: [ProtocolState.AUTOSAMPLE]},
Capability.CLOCK_SYNC : {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE, ProtocolState.SYNC_CLOCK]},
Capability.ACQUIRE_SAMPLE : {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.ACQUIRE_STATUS : {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.FLASH_STATUS : {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
}
###
# Parameter and Type Definitions
###
_driver_parameters = {Parameter.CLOCK: {TYPE: str, READONLY: True, DA: False, STARTUP: False, VALUE: "2013/05/21 15:46:30", REQUIRED: True},
Parameter.SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 20, REQUIRED: True}}
###
# Data Particle Parameters
###
_sample_parameters = {
# particle data defined in the OPTAA Driver doc
METBK_SampleDataParticleKey.BAROMETRIC_PRESSURE : {'type': float, 'value': 1012.53},
METBK_SampleDataParticleKey.RELATIVE_HUMIDITY : {'type': float, 'value': 44.543},
METBK_SampleDataParticleKey.AIR_TEMPERATURE : {'type': float, 'value': 24.09},
METBK_SampleDataParticleKey.LONGWAVE_IRRADIANCE : {'type': float, 'value': 0.0},
METBK_SampleDataParticleKey.PRECIPITATION : {'type': float, 'value': 1.12},
METBK_SampleDataParticleKey.SEA_SURFACE_TEMPERATURE : {'type': float, 'value': 24.24},
METBK_SampleDataParticleKey.SEA_SURFACE_CONDUCTIVITY : {'type': float, 'value': 0.0},
METBK_SampleDataParticleKey.SHORTWAVE_IRRADIANCE : {'type': float, 'value': 32788.7},
METBK_SampleDataParticleKey.EASTWARD_WIND_VELOCITY : {'type': float, 'value': -0.03},
METBK_SampleDataParticleKey.NORTHWARD_WIND_VELOCITY : {'type': float, 'value': -0.02}
}
_status_parameters = {
METBK_StatusDataParticleKey.INSTRUMENT_MODEL : {'type': unicode, 'value': 'NEWLGR53'},
METBK_StatusDataParticleKey.SERIAL_NUMBER : {'type': unicode, 'value': '17DEC12'},
METBK_StatusDataParticleKey.CALIBRATION_DATE : {'type': unicode, 'value': '17DEC12'},
METBK_StatusDataParticleKey.FIRMWARE_VERSION : {'type': unicode, 'value': 'LOGR53 v4.11cf'},
METBK_StatusDataParticleKey.DATE_TIME_STRING : {'type': unicode, 'value': '2013/05/21 13:55:51'},
METBK_StatusDataParticleKey.LOGGING_INTERVAL : {'type': int, 'value': 60},
METBK_StatusDataParticleKey.CURRENT_TICK : {'type': int, 'value': 6},
METBK_StatusDataParticleKey.RECENT_RECORD_INTERVAL : {'type': int, 'value': 1},
METBK_StatusDataParticleKey.FLASH_CARD_PRESENCE : {'type': unicode, 'value': 'Compact Flash Card present - Compact Flash OK!'},
METBK_StatusDataParticleKey.BATTERY_VOLTAGE_MAIN : {'type': float, 'value': 12.50},
METBK_StatusDataParticleKey.FAILURE_MESSAGES : {'type': list, 'value': ["Failed last attempt to update PTT module",
"TMP failed"]},
METBK_StatusDataParticleKey.PTT_ID1 : {'type': unicode, 'value': '46B1BAD3E8E9FF7F9681300017D1F446ADBED76909FE7F9601200017D1F4706A'},
METBK_StatusDataParticleKey.PTT_ID2 : {'type': unicode, 'value': '46A9BED82911FE7F9601400017D1F446A5C2D668F1FE7F9581400017D1F4FFA6'},
METBK_StatusDataParticleKey.PTT_ID3 : {'type': unicode, 'value': '46A1BED628D9FE7F9581400017D1F4469DC2D7E8C1FE7F9501500017D1F40B4F'},
METBK_StatusDataParticleKey.SAMPLING_STATE : {'type': unicode, 'value': 'GO'},
}
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values = False, verify_sample_interval=False):
"""
Verify that all driver parameters are correct and potentially verify values.
@param current_parameters: driver parameters read from the driver instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters, verify_values)
if verify_sample_interval:
self.assertEqual(current_parameters[Parameter.SAMPLE_INTERVAL],
self._driver_parameters[Parameter.SAMPLE_INTERVAL][self.VALUE],
"sample_interval %d != expected value %d" %(current_parameters[Parameter.SAMPLE_INTERVAL],
self._driver_parameters[Parameter.SAMPLE_INTERVAL][self.VALUE]))
def assert_sample_interval_parameter(self, current_parameters, verify_values = False):
"""
Verify that sample_interval parameter is correct and potentially verify value.
@param current_parameters: driver parameters read from the driver instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters, False)
self.assertEqual(current_parameters[Parameter.SAMPLE_INTERVAL],
self._driver_parameters[Parameter.SAMPLE_INTERVAL][self.VALUE],
"sample_interval %d != expected value %d" %(current_parameters[Parameter.SAMPLE_INTERVAL],
self._driver_parameters[Parameter.SAMPLE_INTERVAL][self.VALUE]))
###
# Data Particle Parameters Methods
###
def assert_data_particle_sample(self, data_particle, verify_values = False):
'''
Verify an optaa sample data particle
@param data_particle: OPTAAA_SampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
'''
self.assert_data_particle_header(data_particle, DataParticleType.METBK_PARSED)
self.assert_data_particle_parameters(data_particle, self._sample_parameters, verify_values)
def assert_data_particle_status(self, data_particle, verify_values = False):
"""
Verify an optaa status data particle
@param data_particle: OPTAAA_StatusDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_header(data_particle, DataParticleType.METBK_STATUS)
self.assert_data_particle_parameters(data_particle, self._status_parameters, verify_values)
def assert_particle_not_published(self, driver, sample_data, particle_assert_method, verify_values = False):
try:
self.assert_particle_published(driver, sample_data, particle_assert_method, verify_values)
except AssertionError as e:
if str(e) == "0 != 1":
return
else:
raise e
else:
raise IDKException("assert_particle_not_published: particle was published")
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class TestUNIT(InstrumentDriverUnitTestCase, UtilMixin):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilites
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(Command())
# Test capabilities for duplicates, them verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
self.assert_chunker_sample(chunker, self.METBK_SAMPLE_DATA1)
self.assert_chunker_sample_with_noise(chunker, self.METBK_SAMPLE_DATA1)
self.assert_chunker_fragmented_sample(chunker, self.METBK_SAMPLE_DATA1)
self.assert_chunker_combined_sample(chunker, self.METBK_SAMPLE_DATA1)
self.assert_chunker_sample(chunker, self.METBK_STATUS_DATA)
self.assert_chunker_sample_with_noise(chunker, self.METBK_STATUS_DATA)
self.assert_chunker_fragmented_sample(chunker, self.METBK_STATUS_DATA)
self.assert_chunker_combined_sample(chunker, self.METBK_STATUS_DATA)
def test_corrupt_data_sample(self):
# garbage is not okay
particle = METBK_SampleDataParticle(self.METBK_SAMPLE_DATA1.replace('-0.03', 'foo'),
port_timestamp = 3558720820.531179)
with self.assertRaises(SampleException):
particle.generate()
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# validating data particles are published
self.assert_particle_published(driver, self.METBK_STATUS_DATA, self.assert_data_particle_status, True)
self.assert_particle_published(driver, self.METBK_SAMPLE_DATA1, self.assert_data_particle_sample, True)
# validate that a duplicate sample is not published
self.assert_particle_not_published(driver, self.METBK_SAMPLE_DATA1, self.assert_data_particle_sample, True)
# validate that a new sample is published
self.assert_particle_published(driver, self.METBK_SAMPLE_DATA2, self.assert_data_particle_sample, False)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock(spec="UNKNOWN WHAT SHOULD GO HERE FOR evt_callback")
protocol = Protocol(Prompt, NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
capabilities = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_START_DIRECT',
'DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_CLOCK_SYNC',
'DRIVER_EVENT_FLASH_STATUS'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_STOP_AUTOSAMPLE',
'DRIVER_EVENT_GET',
'DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_CLOCK_SYNC',
'DRIVER_EVENT_FLASH_STATUS'],
ProtocolState.DIRECT_ACCESS: ['DRIVER_EVENT_STOP_DIRECT',
'EXECUTE_DIRECT'],
ProtocolState.SYNC_CLOCK: ['DRIVER_EVENT_CLOCK_SYNC']
}
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, capabilities)
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class TestINT(InstrumentDriverIntegrationTestCase, UtilMixin):
def setUp(self):
InstrumentDriverIntegrationTestCase.setUp(self)
def assert_async_particle_not_generated(self, particle_type, timeout=10):
end_time = time.time() + timeout
while end_time > time.time():
if len(self.get_sample_events(particle_type)) > 0:
self.fail("assert_async_particle_not_generated: a particle of type %s was published" %particle_type)
time.sleep(.3)
def test_acquire_sample(self):
"""
Test that we can generate sample particle with command
"""
self.assert_initialize_driver()
self.assert_particle_generation(ProtocolEvent.ACQUIRE_SAMPLE, DataParticleType.METBK_PARSED, self.assert_data_particle_sample)
def test_autosample_particle_generation(self):
"""
Test that we can generate particles when in autosample.
To test status particle instrument must be off and powered on will test is waiting
"""
# put driver into autosample mode
self.assert_initialize_driver(DriverProtocolState.AUTOSAMPLE)
# test that sample particle is generated
log.debug("test_autosample_particle_generation: waiting 60 seconds for instrument data")
self.assert_async_particle_generation(DataParticleType.METBK_PARSED, self.assert_data_particle_sample, timeout=90)
# take driver out of autosample mode
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
# test that sample particle is not generated
log.debug("test_autosample_particle_generation: waiting 60 seconds for no instrument data")
self.clear_events()
self.assert_async_particle_not_generated(DataParticleType.METBK_PARSED, timeout=90)
# put driver back in autosample mode
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
# test that sample particle is generated
log.debug("test_autosample_particle_generation: waiting 60 seconds for instrument data")
self.assert_async_particle_generation(DataParticleType.METBK_PARSED, self.assert_data_particle_sample, timeout=90)
def test_parameters(self):
"""
Test driver parameters and verify their type. Startup parameters also verify the parameter
value. This test confirms that parameters are being read/converted properly and that
the startup has been applied.
"""
self.assert_initialize_driver()
reply = self.driver_client.cmd_dvr('get_resource', Parameter.ALL)
self.assert_driver_parameters(reply, verify_sample_interval=True)
def assert_clock_synced(self):
"""
Verify the clock is set to the current time with in a few seconds.
"""
reply = self.driver_client.cmd_dvr('get_resource', Parameter.CLOCK)
# convert driver's time from formatted date/time string to seconds integer
instrument_time = time.mktime(time.strptime(reply.get(Parameter.CLOCK).lower(), "%Y/%m/%d %H:%M:%S"))
# need to convert local machine's time to date/time string and back to seconds to 'drop' the DST attribute so test passes
# get time from local machine
lt = time.strftime("%d %b %Y %H:%M:%S", time.gmtime(time.mktime(time.localtime())))
# convert local time from formatted date/time string to seconds integer to drop DST
local_time = time.mktime(time.strptime(lt, "%d %b %Y %H:%M:%S"))
# Now verify that the time matches to within 5 seconds
self.assertLessEqual(abs(instrument_time - local_time), 5)
def test_commands(self):
"""
Run instrument commands from both command and streaming mode.
"""
self.assert_initialize_driver()
####
# First test in command mode
####
self.assert_driver_command(ProtocolEvent.CLOCK_SYNC, assert_function=self.assert_clock_synced)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_async_particle_generation(DataParticleType.METBK_PARSED, self.assert_data_particle_sample, timeout=90)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'.*Sampling STOPPED')
self.assert_driver_command(ProtocolEvent.FLASH_STATUS, regex=r'Compact Flash Card present - Compact Flash OK!\r\n\r\r\nVolume in drive is .+ bytes free\r\r\n')
####
# Test in streaming mode
####
# Put us in streaming
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_async_particle_generation(DataParticleType.METBK_PARSED, self.assert_data_particle_sample, timeout=90)
self.assert_driver_command(ProtocolEvent.CLOCK_SYNC, assert_function=self.assert_clock_synced)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'.*Sampling GO')
self.assert_driver_command(ProtocolEvent.FLASH_STATUS, regex=r'Compact Flash Card present - Compact Flash OK!\r\n\r\r\nVolume in drive is .+ bytes free\r\r\n')
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
####
# Test a bad command
####
self.assert_driver_command_exception('ima_bad_command', exception_class=InstrumentCommandException)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class TestQUAL(InstrumentDriverQualificationTestCase, UtilMixin):
def assert_sample_polled(self, sampleDataAssert, sampleQueue, timeout = 10):
"""
Test observatory polling function.
Verifies the acquire_status command.
"""
# Set up all data subscriptions. Stream names are defined
# in the driver PACKET_CONFIG dictionary
self.data_subscribers.start_data_subscribers()
self.addCleanup(self.data_subscribers.stop_data_subscribers)
self.assert_enter_command_mode()
###
# Poll for a sample
###
# make sure there aren't any junk samples in the parsed
# data queue.
log.debug("Acquire Sample")
self.data_subscribers.clear_sample_queue(sampleQueue)
cmd = AgentCommand(command=DriverEvent.ACQUIRE_SAMPLE)
self.instrument_agent_client.execute_resource(cmd, timeout=timeout)
# Watch the parsed data queue and return once a sample
# has been read or the default timeout has been reached.
samples = self.data_subscribers.get_samples(sampleQueue, 1, timeout = timeout)
self.assertGreaterEqual(len(samples), 1)
log.error("SAMPLE: %s" % samples)
# Verify
for sample in samples:
sampleDataAssert(sample)
self.assert_reset()
self.doCleanups()
def test_poll(self):
'''
poll for a single sample
'''
self.assert_sample_polled(self.assert_data_particle_sample,
DataParticleType.METBK_PARSED)
def test_autosample(self):
'''
start and stop autosample and verify data particle
'''
self.assert_sample_autosample(self.assert_data_particle_sample,
DataParticleType.METBK_PARSED,
sample_count=1,
timeout = 60)
def test_direct_access_telnet_mode(self):
"""
@brief This test automatically tests that the Instrument Driver properly supports direct access to the physical instrument. (telnet mode)
"""
self.assert_enter_command_mode()
# go into direct access
self.assert_direct_access_start_telnet(timeout=600)
self.tcp_client.send_data("#D\r\n")
self.assertTrue(self.tcp_client.expect("\r\n"))
self.assert_direct_access_stop_telnet()
def test_get_capabilities(self):
"""
@brief Walk through all driver protocol states and verify capabilities
returned by get_current_capabilities
"""
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.GET,
ProtocolEvent.CLOCK_SYNC,
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.FLASH_STATUS,
ProtocolEvent.ACQUIRE_STATUS,
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
log.debug("test_get_capabilities: enter command")
self.assert_enter_command_mode()
log.debug("test_get_capabilities: in command")
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.GET,
ProtocolEvent.CLOCK_SYNC,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.FLASH_STATUS,
ProtocolEvent.STOP_AUTOSAMPLE,
]
self.assert_start_autosample()
self.assert_capabilities(capabilities)
self.assert_stop_autosample()
##################
# DA Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.DIRECT_ACCESS)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = self._common_da_resource_commands()
self.assert_direct_access_start_telnet()
self.assert_capabilities(capabilities)
self.assert_direct_access_stop_telnet()
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
# Can't call get capabilities when uninitialized anymore?
#self.assert_reset()
#self.assert_capabilities(capabilities)
def test_execute_clock_sync(self):
"""
Verify we can synchronize the instrument internal clock
"""
self.assert_enter_command_mode()
self.assert_execute_resource(ProtocolEvent.CLOCK_SYNC)
# get the time from the driver
check_new_params = self.instrument_agent_client.get_resource([Parameter.CLOCK])
# convert driver's time from formatted date/time string to seconds integer
instrument_time = time.mktime(time.strptime(check_new_params.get(Parameter.CLOCK).lower(), "%Y/%m/%d %H:%M:%S"))
# need to convert local machine's time to date/time string and back to seconds to 'drop' the DST attribute so test passes
# get time from local machine
lt = time.strftime("%d %b %Y %H:%M:%S", time.gmtime(time.mktime(time.localtime())))
# convert local time from formatted date/time string to seconds integer to drop DST
local_time = time.mktime(time.strptime(lt, "%d %b %Y %H:%M:%S"))
# Now verify that the time matches to within 5 seconds
self.assertLessEqual(abs(instrument_time - local_time), 5)
@unittest.skip("Needs new agent code that automatically inits startup params")
def test_get_parameters(self):
'''
verify that parameters can be gotten properly
'''
self.assert_enter_command_mode()
reply = self.instrument_agent_client.get_resource(Parameter.ALL)
self.assert_driver_parameters(reply, verify_sample_interval=True)
###############################################################################
# PUBLICATION TESTS #
# Device specific pulication tests are for #
# testing device specific capabilities #
###############################################################################
@attr('PUB', group='mi')
class TestPUB(InstrumentDriverPublicationTestCase, UtilMixin):
def test_granule_generation(self):
self.assert_initialize_driver()
# Currently these tests only verify that the data granule is generated, but the values
# are not tested. We will eventually need to replace log.debug with a better callback
# function that actually tests the granule.
self.assert_sample_async("raw data", log.debug, DataParticleType.RAW, timeout=10)
self.assert_sample_async(self.METBK_SAMPLE_DATA, log.debug, DataParticleType.METBK_PARSED, timeout=10)
self.assert_sample_async(self.METBK_STATUS_DATA, log.debug, DataParticleType.METBK_STATUS, timeout=10)
|
ArcherSys/ArcherSys | refs/heads/master | Lib/site-packages/libpasteurize/fixes/fix_metaclass.py | 61 | u"""
Fixer for (metaclass=X) -> __metaclass__ = X
Some semantics (see PEP 3115) may be altered in the translation."""
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name, syms, Node, Leaf, Newline, find_root
from lib2to3.pygram import token
from libfuturize.fixer_util import indentation, suitify
# from ..fixer_util import Name, syms, Node, Leaf, Newline, find_root, indentation, suitify
def has_metaclass(parent):
results = None
for node in parent.children:
kids = node.children
if node.type == syms.argument:
if kids[0] == Leaf(token.NAME, u"metaclass") and \
kids[1] == Leaf(token.EQUAL, u"=") and \
kids[2]:
#Hack to avoid "class X(=):" with this case.
results = [node] + kids
break
elif node.type == syms.arglist:
# Argument list... loop through it looking for:
# Node(*, [*, Leaf(token.NAME, u"metaclass"), Leaf(token.EQUAL, u"="), Leaf(*, *)]
for child in node.children:
if results: break
if child.type == token.COMMA:
#Store the last comma, which precedes the metaclass
comma = child
elif type(child) == Node:
meta = equal = name = None
for arg in child.children:
if arg == Leaf(token.NAME, u"metaclass"):
#We have the (metaclass) part
meta = arg
elif meta and arg == Leaf(token.EQUAL, u"="):
#We have the (metaclass=) part
equal = arg
elif meta and equal:
#Here we go, we have (metaclass=X)
name = arg
results = (comma, meta, equal, name)
break
return results
class FixMetaclass(fixer_base.BaseFix):
PATTERN = u"""
classdef<any*>
"""
def transform(self, node, results):
meta_results = has_metaclass(node)
if not meta_results: return
for meta in meta_results:
meta.remove()
target = Leaf(token.NAME, u"__metaclass__")
equal = Leaf(token.EQUAL, u"=", prefix=u" ")
# meta is the last item in what was returned by has_metaclass(): name
name = meta
name.prefix = u" "
stmt_node = Node(syms.atom, [target, equal, name])
suitify(node)
for item in node.children:
if item.type == syms.suite:
for stmt in item.children:
if stmt.type == token.INDENT:
# Insert, in reverse order, the statement, a newline,
# and an indent right after the first indented line
loc = item.children.index(stmt) + 1
# Keep consistent indentation form
ident = Leaf(token.INDENT, stmt.value)
item.insert_child(loc, ident)
item.insert_child(loc, Newline())
item.insert_child(loc, stmt_node)
break
|
barseghyanartur/python-social-auth | refs/heads/master | examples/django_me_example/example/app/pipeline.py | 112 | from django.shortcuts import redirect
from social.pipeline.partial import partial
@partial
def require_email(strategy, details, user=None, is_new=False, *args, **kwargs):
if kwargs.get('ajax') or user and user.email:
return
elif is_new and not details.get('email'):
email = strategy.request_data().get('email')
if email:
details['email'] = email
else:
return redirect('require_email')
|
rodrigovrgs/cazy-parser | refs/heads/master | setup.py | 1 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from setuptools.command.install import install
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
class CazyParser(install):
def run(self):
install.run(self)
setup(
name='cazy-parser',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.4.2',
description='A way to extract specific information from CAZy',
long_description=long_description,
# The project's main homepage.
url='https://github.com/rodrigovrgs/cazy-parser',
# Author details
author='Rodrigo Honorato',
author_email='rvhonorato@gmail.com',
# Choose your license
license='GPL3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='cazy database datamining',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['beautifulsoup4', 'progressbar2'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={'cazy-parser': [],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'create_cazy_db=cazy_parser.create_cazy_db:main',
'extract_cazy_ids=cazy_parser.extract_cazy_ids:main',
],
},
cmdclass={'install': CazyParser}
)
|
tensorflow/federated | refs/heads/master | tensorflow_federated/python/examples/simple_fedavg/simple_fedavg_tff.py | 1 | # Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the Federated Averaging algorithm.
This is intended to be a minimal stand-alone implementation of Federated
Averaging, suitable for branching as a starting point for algorithm
modifications; see `tff.learning.build_federated_averaging_process` for a
more full-featured implementation.
Based on the paper:
Communication-Efficient Learning of Deep Networks from Decentralized Data
H. Brendan McMahan, Eider Moore, Daniel Ramage,
Seth Hampson, Blaise Aguera y Arcas. AISTATS 2017.
https://arxiv.org/abs/1602.05629
"""
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.examples.simple_fedavg.simple_fedavg_tf import build_server_broadcast_message
from tensorflow_federated.python.examples.simple_fedavg.simple_fedavg_tf import client_update
from tensorflow_federated.python.examples.simple_fedavg.simple_fedavg_tf import server_update
from tensorflow_federated.python.examples.simple_fedavg.simple_fedavg_tf import ServerState
def _initialize_optimizer_vars(model, optimizer):
"""Creates optimizer variables to assign the optimizer's state."""
# Create zero gradients to force an update that doesn't modify.
# Force eagerly constructing the optimizer variables. Normally Keras lazily
# creates the variables on first usage of the optimizer. Optimizers such as
# Adam, Adagrad, or using momentum need to create a new set of variables shape
# like the model weights.
zero_gradient = [tf.zeros_like(t) for t in model.weights.trainable]
optimizer.apply_gradients(zip(zero_gradient, model.weights.trainable))
assert optimizer.variables()
def build_federated_averaging_process(
model_fn,
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0),
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.1)):
"""Builds the TFF computations for optimization using federated averaging.
Args:
model_fn: A no-arg function that returns a
`simple_fedavg_tf.KerasModelWrapper`.
server_optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer` for server update.
client_optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer` for client update.
Returns:
A `tff.templates.IterativeProcess`.
"""
whimsy_model = model_fn()
@tff.tf_computation
def server_init_tf():
model = model_fn()
server_optimizer = server_optimizer_fn()
_initialize_optimizer_vars(model, server_optimizer)
return ServerState(
model_weights=model.weights,
optimizer_state=server_optimizer.variables(),
round_num=0)
server_state_type = server_init_tf.type_signature.result
model_weights_type = server_state_type.model_weights
@tff.tf_computation(server_state_type, model_weights_type.trainable)
def server_update_fn(server_state, model_delta):
model = model_fn()
server_optimizer = server_optimizer_fn()
_initialize_optimizer_vars(model, server_optimizer)
return server_update(model, server_optimizer, server_state, model_delta)
@tff.tf_computation(server_state_type)
def server_message_fn(server_state):
return build_server_broadcast_message(server_state)
server_message_type = server_message_fn.type_signature.result
tf_dataset_type = tff.SequenceType(whimsy_model.input_spec)
@tff.tf_computation(tf_dataset_type, server_message_type)
def client_update_fn(tf_dataset, server_message):
model = model_fn()
client_optimizer = client_optimizer_fn()
return client_update(model, tf_dataset, server_message, client_optimizer)
federated_server_state_type = tff.type_at_server(server_state_type)
federated_dataset_type = tff.type_at_clients(tf_dataset_type)
@tff.federated_computation(federated_server_state_type,
federated_dataset_type)
def run_one_round(server_state, federated_dataset):
"""Orchestration logic for one round of computation.
Args:
server_state: A `ServerState`.
federated_dataset: A federated `tf.data.Dataset` with placement
`tff.CLIENTS`.
Returns:
A tuple of updated `ServerState` and `tf.Tensor` of average loss.
"""
server_message = tff.federated_map(server_message_fn, server_state)
server_message_at_client = tff.federated_broadcast(server_message)
client_outputs = tff.federated_map(
client_update_fn, (federated_dataset, server_message_at_client))
weight_denom = client_outputs.client_weight
round_model_delta = tff.federated_mean(
client_outputs.weights_delta, weight=weight_denom)
server_state = tff.federated_map(server_update_fn,
(server_state, round_model_delta))
round_loss_metric = tff.federated_mean(
client_outputs.model_output, weight=weight_denom)
return server_state, round_loss_metric
@tff.federated_computation
def server_init_tff():
"""Orchestration logic for server model initialization."""
return tff.federated_value(server_init_tf(), tff.SERVER)
return tff.templates.IterativeProcess(
initialize_fn=server_init_tff, next_fn=run_one_round)
|
tillahoffmann/tensorflow | refs/heads/master | tensorflow/compiler/tests/variable_ops_test.py | 11 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reading and writing variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
class VariableOpsTest(XLATestCase):
"""Test cases for resource variable operators."""
def testOneWriteOneOutput(self):
# Regression test for a bug where computations with one non-constant
# output and one variable update were mishandled.
for dtype in self.numeric_types:
init = np.array([[1, 2], [3, 4]], dtype=dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
p = array_ops.placeholder(dtype)
x = v.assign_add(p)
with ops.control_dependencies([x]):
y = v.read_value()
self.assertAllClose(np.array([[2, 3], [4, 5]], dtype=dtype),
sess.run(y, {p: 1}))
def testSparseRead0DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], dtype=dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read(2)
self.assertAllClose(np.array([8, 9, 10, 11], dtype=dtype), sess.run(x))
def testSparseRead1DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], dtype=dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([2, 1])
self.assertAllClose(
np.array([[8, 9, 10, 11], [4, 5, 6, 7]], dtype=dtype), sess.run(x))
def testSparseRead2DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], dtype=dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([[2, 1], [0, 2]])
self.assertAllClose(
np.array(
[[[8, 9, 10, 11], [4, 5, 6, 7]], [[0, 1, 2, 3], [8, 9, 10,
11]]],
dtype=dtype), sess.run(x))
def testSparseRead2DIndices3DTensor(self):
for dtype in self.numeric_types:
init = np.array(
[[[0, 1, 2], [3, 4, 5]], [[10, 11, 12], [13, 14, 15]],
[[20, 21, 22], [23, 24, 25]], [[30, 31, 32], [33, 34, 35]]],
dtype=dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([[2, 1], [3, 0]])
self.assertAllClose(
np.array(
[[[[20, 21, 22], [23, 24, 25]], [[10, 11, 12], [13, 14, 15]]],
[[[30, 31, 32], [33, 34, 35]], [[0, 1, 2], [3, 4, 5]]]],
dtype=dtype), sess.run(x))
def testReadWrite(self):
"""Tests initialization, reading, and writing a resource variable."""
with self.test_session() as session:
with self.test_scope():
with variable_scope.variable_scope("ascope", use_resource=True):
x = variable_scope.get_variable(
"x",
shape=[],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(2))
a = x.read_value()
with ops.control_dependencies([a]):
b = state_ops.assign(x, 47)
with ops.control_dependencies([b]):
c = x.read_value()
with ops.control_dependencies([c]):
d = state_ops.assign_add(x, 3)
with ops.control_dependencies([d]):
e = x.read_value()
session.run(variables.global_variables_initializer())
v1, v2, v3 = session.run([a, c, e])
self.assertAllClose(2.0, v1)
self.assertAllClose(47.0, v2)
self.assertAllClose(50.0, v3)
def testTraining(self):
"""Tests a gradient descent step for a simple model."""
with self.test_session() as session:
with self.test_scope():
with variable_scope.variable_scope("ascope", use_resource=True):
w = variable_scope.get_variable(
"w",
shape=[4, 2],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.float32)))
b = variable_scope.get_variable(
"b",
shape=[2],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
np.array([2, 3], dtype=np.float32)))
x = array_ops.placeholder(dtypes.float32, shape=[1, 4])
y = math_ops.matmul(x, w) + b
loss = math_ops.reduce_sum(y)
optimizer = GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
session.run(variables.global_variables_initializer())
session.run(train, {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
vw, vb = session.run([w, b])
self.assertAllClose(
np.array(
[[0.3, 1.3], [2.7, 3.7], [4.5, 5.5], [6.1, 7.1]],
dtype=np.float32),
vw,
rtol=1e-4)
self.assertAllClose(np.array([1.9, 2.9], dtype=np.float32), vb, rtol=1e-4)
class StridedSliceAssignChecker(object):
"""Compares the results of a slice assignment using Tensorflow and numpy."""
def __init__(self, test, x, dtype):
self.dtype = dtype
self.test = test
self.x_np = np.array(x).astype(dtype)
# Randomly start on mode 0 or 1.
self.which_mode = np.random.randint(2, size=1)[0]
def __setitem__(self, index, value):
self.which_mode = 1 - self.which_mode
value = np.array(value).astype(self.dtype)
with self.test.test_session() as sess, self.test.test_scope():
x = constant_op.constant(self.x_np, dtype=self.dtype)
var = resource_variable_ops.ResourceVariable(x)
sess.run(variables.variables_initializer([var]))
if self.which_mode == 0:
val = sess.run(var[index].assign(value))
else:
assert self.which_mode == 1
val = sess.run(state_ops.assign(var[index], value))
valnp = np.copy(self.x_np)
valnp[index] = np.array(value)
self.test.assertAllEqual(val, valnp)
class SliceAssignTest(XLATestCase):
def testSliceAssign(self):
for dtype in self.numeric_types:
checker = StridedSliceAssignChecker(self, [[1, 2, 3], [4, 5, 6]],
dtype=dtype)
# No-op assignment
checker[:] = [[10, 20, 30], [40, 50, 60]]
# Checks trivial (1,1) shape tensor
checker[1:2, 1:2] = [[66]]
# shrink shape changes
checker[1:2, 1] = [66]
checker[1, 1:2] = [66]
checker[1, 1] = 66
# newaxis shape changes
checker[:, None, :] = [[[10, 20, 30]], [[40, 50, 50]]]
# shrink and newaxis
checker[None, None, 0, 0:1] = [[[99]]]
# Non unit strides
checker[::1, 1::-1] = [[3, 33], [4, 44]]
# degenerate interval
checker[8:10, 0] = []
checker[8:10, 8:10] = [[]]
# Assign vector to scalar (rank-0) using newaxis
checker2 = StridedSliceAssignChecker(self, 222, dtype=dtype)
checker2[()] = 6 # no indices
checker2[...] = 6 # ellipsis
checker2[None] = [6] # new axis
def testUninitialized(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"uninitialized variable"):
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable([1, 2])
sess.run(v[:].assign([1, 2]))
if __name__ == "__main__":
googletest.main()
|
javierTerry/odoo | refs/heads/8.0 | addons/sale/wizard/__init__.py | 444 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_make_invoice
import sale_line_invoice
import sale_make_invoice_advance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
juharris/tensorflow | refs/heads/master | tensorflow/contrib/testing/__init__.py | 16 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.testing.python.framework.fake_summary_writer import *
from tensorflow.contrib.testing.python.framework.util_test import *
|
DerThorsten/nifty | refs/heads/master | src/python/test/cgp/test_cgp.py | 1 | import nifty.cgp as ncgp
import nifty.graph.rag as nrag
import unittest
import nifty
import unittest
import nifty.cgp as ncgp
import numpy
numpy.random.seed(42)
class TestCgp2d(unittest.TestCase):
def test_corner_case_3x3_grid_a(self):
assertEq = self.assertEqual
# 4 one cells are active
# but still no junction
seg = [
[1,1,2],
[1,3,1],
[1,1,1]
]
seg = numpy.array(seg,dtype='uint32')
tGrid = ncgp.TopologicalGrid2D(seg)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells,[0,2,3])
tShape = tGrid.topologicalGridShape
assertEq(tShape, [5,5])
shape = tGrid.shape
assertEq(shape, [3,3])
# check the bounds
bounds = tGrid.extractCellsBounds()
bounds0 = bounds[0]
bounds1 = bounds[1]
boundedBy1 = bounds0.reverseMapping()
boundedBy2 = bounds1.reverseMapping()
assertEq(len(bounds0),0)
assertEq(len(bounds1),2)
assertEq(len(boundedBy1),2)
assertEq(len(boundedBy2),3)
# check the geometry
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
for geo in geos:
for c in [0,1,2]:
g = geo[c]
assert len(g) == numberOfCells[c]
def test_corner_case_3x3_grid_b(self):
assertEq = self.assertEqual
seg = [
[1,1,1],
[1,2,1],
[1,1,1]
]
seg = numpy.array(seg,dtype='uint32')
tGrid = ncgp.TopologicalGrid2D(seg)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells,[0,1,2])
tShape = tGrid.topologicalGridShape
assertEq(tShape, [5,5])
shape = tGrid.shape
assertEq(shape, [3,3])
# check the bounds
bounds = tGrid.extractCellsBounds()
bounds0 = bounds[0]
bounds1 = bounds[1]
boundedBy1 = bounds0.reverseMapping()
boundedBy2 = bounds1.reverseMapping()
assertEq(len(bounds0),0)
assertEq(len(bounds1),1)
assertEq(len(boundedBy1),1)
assertEq(len(boundedBy2),2)
# check the geometry
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
for geo in geos:
for c in [0,1,2]:
g = geo[c]
assert len(g) == numberOfCells[c]
def test_corner_case_3x3_grid_c(self):
assertEq = self.assertEqual
seg = [
[1,1,3],
[1,2,3],
[1,1,3]
]
seg = numpy.array(seg,dtype='uint32')
tGrid = ncgp.TopologicalGrid2D(seg)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells,[2,4,3])
tShape = tGrid.topologicalGridShape
assertEq(tShape, [5,5])
shape = tGrid.shape
assertEq(shape, [3,3])
# check the bounds
bounds = tGrid.extractCellsBounds()
bounds0 = bounds[0]
bounds1 = bounds[1]
boundedBy1 = bounds0.reverseMapping()
boundedBy2 = bounds1.reverseMapping()
assertEq(len(bounds0),2)
assertEq(len(bounds1),4)
assertEq(len(boundedBy1),4)
assertEq(len(boundedBy2),3)
# check the geometry
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
for geo in geos:
for c in [0,1,2]:
g = geo[c]
assert len(g) == numberOfCells[c]
def test_corner_case_3x3_grid_d(self):
assertEq = self.assertEqual
seg = [
[1,1,1],
[1,2,1],
[1,1,3]
]
# 01234
# --------------------
# 0 |1|1|1| 0
# 1 |-*-*-| 1
# 2 |1|2|1| 2
# 3 |-*-*-| 3
# 4 |1|1|3| 4
# ----------------------
# 01234
seg = numpy.array(seg,dtype='uint32').T
tGrid = ncgp.TopologicalGrid2D(seg)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells,[0,2,3])
tShape = tGrid.topologicalGridShape
assertEq(tShape, [5,5])
shape = tGrid.shape
assertEq(shape, [3,3])
# check the bounds
bounds = tGrid.extractCellsBounds()
bounds0 = bounds[0]
bounds1 = bounds[1]
boundedBy1 = bounds0.reverseMapping()
boundedBy2 = bounds1.reverseMapping()
assertEq(len(bounds0),0)
assertEq(len(bounds1),2)
assertEq(len(boundedBy1),2)
assertEq(len(boundedBy2),3)
# check the geometry
#print(seg)
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
for geo in geos:
for c in [0,1,2]:
g = geo[c]
assert len(g) == numberOfCells[c]
def test_randomized_big(self):
for x in range(100):
assertEq = self.assertEqual
shape = (10, 20)
size = shape[0]*shape[1]
labels = numpy.random.randint(0, 4,size=size).reshape(shape)
gg = nifty.graph.undirectedGridGraph(shape)
cc = nifty.graph.connectedComponentsFromNodeLabels(gg, labels.ravel())
cc = cc.reshape(shape) + 1
cc = numpy.require(cc, dtype='uint32')
tGrid = ncgp.TopologicalGrid2D(cc)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells[2], cc.max())
# check the bounds
bounds = tGrid.extractCellsBounds()
boundedBy = {
1:bounds[0].reverseMapping(),
2:bounds[1].reverseMapping(),
}
try:
# check the geometry
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
except:
print(cc)
import sys
sys.exit()
def test_randomized_medium(self):
for x in range(1000):
assertEq = self.assertEqual
shape = (7, 7)
size = shape[0]*shape[1]
labels = numpy.random.randint(0, 4,size=size).reshape(shape)
#print(labels)
gg = nifty.graph.undirectedGridGraph(shape)
cc = nifty.graph.connectedComponentsFromNodeLabels(gg, labels.ravel())
cc = cc.reshape(shape) + 1
cc = numpy.require(cc, dtype='uint32')
tGrid = ncgp.TopologicalGrid2D(cc)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells[2], cc.max())
# check the bounds
bounds = tGrid.extractCellsBounds()
boundedBy = {
1:bounds[0].reverseMapping(),
2:bounds[1].reverseMapping(),
}
try:
# check the geometry
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
except:
print(cc)
import sys
sys.exit()
def test_randomized_small(self):
for x in range(3000):
assertEq = self.assertEqual
shape = (4, 3)
size = shape[0]*shape[1]
labels = numpy.random.randint(1, 5,size=size).reshape(shape)
#print(labels)
gg = nifty.graph.undirectedGridGraph(shape)
cc = nifty.graph.connectedComponentsFromNodeLabels(gg, labels.ravel())
cc = cc.reshape(shape) + 1
cc = numpy.require(cc, dtype='uint32')
tGrid = ncgp.TopologicalGrid2D(cc)
numberOfCells = tGrid.numberOfCells
assertEq(numberOfCells[2], cc.max())
# check the bounds
bounds = tGrid.extractCellsBounds()
boundedBy = {
1:bounds[0].reverseMapping(),
2:bounds[1].reverseMapping(),
}
try:
# check the geometry
geometryFS = tGrid.extractCellsGeometry(fill=True, sort1Cells=True)
geometryF = tGrid.extractCellsGeometry(fill=True, sort1Cells=False)
geometryS = tGrid.extractCellsGeometry(fill=False, sort1Cells=True)
geometry = tGrid.extractCellsGeometry(fill=False, sort1Cells=False)
geos = [geometryFS,geometryF,geometryS,geometry]
except:
print(cc)
print("labels")
print(labels)
import sys
sys.exit()
if __name__ == '__main__':
unittest.main()
|
CloudBoltSoftware/cloudbolt-forge | refs/heads/master | blueprints/eks/management/enable_nodes.py | 12133432 | |
davidcusatis/horizon | refs/heads/master | openstack_dashboard/dashboards/project/network_topology/networks/__init__.py | 12133432 | |
aptivate/alfie | refs/heads/master | django/website/logframe/tests/test_admin.py | 3 | from mock import Mock
from ..admin import RatingAdmin, SubIndicatorAdmin
from ..models import colors, Rating, SubIndicator
def test_sub_indicator_admin_result_returns_indicator_result():
sub_indicator = Mock(indicator=Mock(result='result'))
admin = SubIndicatorAdmin(SubIndicator, None)
assert sub_indicator.indicator.result == admin.result(sub_indicator)
def test_rating_admin_colored_name_returns_name_for_colours():
obj = Mock(color=colors[0][0])
admin = RatingAdmin(Rating, None)
assert '<span class="rating-list-item {0}">{1}</span>'.format(colors[0][0], colors[0][1]) == admin.colored_name(obj)
|
dqnykamp/sympy | refs/heads/master | sympy/polys/domains/pythonintegerring.py | 117 | """Implementaton of :class:`PythonIntegerRing` class. """
from __future__ import print_function, division
from sympy.polys.domains.integerring import IntegerRing
from sympy.polys.domains.groundtypes import (
PythonInteger, SymPyInteger, python_sqrt,
python_factorial, python_gcdex, python_gcd, python_lcm,
)
from sympy.polys.polyerrors import CoercionFailed
from sympy.utilities import public
@public
class PythonIntegerRing(IntegerRing):
"""Integer ring based on Python's ``int`` type. """
dtype = PythonInteger
zero = dtype(0)
one = dtype(1)
alias = 'ZZ_python'
def __init__(self):
"""Allow instantiation of this domain. """
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return SymPyInteger(a)
def from_sympy(self, a):
"""Convert SymPy's Integer to ``dtype``. """
if a.is_Integer:
return PythonInteger(a.p)
elif a.is_Float and int(a) == a:
return PythonInteger(int(a))
else:
raise CoercionFailed("expected an integer, got %s" % a)
def from_FF_python(K1, a, K0):
"""Convert ``ModularInteger(int)`` to Python's ``int``. """
return a.to_int()
def from_ZZ_python(K1, a, K0):
"""Convert Python's ``int`` to Python's ``int``. """
return a
def from_QQ_python(K1, a, K0):
"""Convert Python's ``Fraction`` to Python's ``int``. """
if a.denominator == 1:
return a.numerator
def from_FF_gmpy(K1, a, K0):
"""Convert ``ModularInteger(mpz)`` to Python's ``int``. """
return PythonInteger(a.to_int())
def from_ZZ_gmpy(K1, a, K0):
"""Convert GMPY's ``mpz`` to Python's ``int``. """
return PythonInteger(a)
def from_QQ_gmpy(K1, a, K0):
"""Convert GMPY's ``mpq`` to Python's ``int``. """
if a.denom() == 1:
return PythonInteger(a.numer())
def from_RealField(K1, a, K0):
"""Convert mpmath's ``mpf`` to Python's ``int``. """
p, q = K0.to_rational(a)
if q == 1:
return PythonInteger(p)
def gcdex(self, a, b):
"""Compute extended GCD of ``a`` and ``b``. """
return python_gcdex(a, b)
def gcd(self, a, b):
"""Compute GCD of ``a`` and ``b``. """
return python_gcd(a, b)
def lcm(self, a, b):
"""Compute LCM of ``a`` and ``b``. """
return python_lcm(a, b)
def sqrt(self, a):
"""Compute square root of ``a``. """
return python_sqrt(a)
def factorial(self, a):
"""Compute factorial of ``a``. """
return python_factorial(a)
|
songmonit/CTTMSONLINE | refs/heads/master | addons/google_spreadsheet/__openerp__.py | 312 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Google Spreadsheet',
'version': '1.0',
'category': 'Tools',
'description': """
The module adds the possibility to display data from OpenERP in Google Spreadsheets in real time.
=================================================================================================
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['board', 'google_drive'],
'data' : [
'google_spreadsheet_view.xml',
'google_spreadsheet_data.xml',
'views/google_spreadsheet.xml',
],
'qweb': ['static/src/xml/*.xml'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ran5515/DeepDecision | refs/heads/master | tensorflow/contrib/opt/python/training/lazy_adam_optimizer_test.py | 104 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LazyAdamOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import lazy_adam_optimizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(test.TestCase):
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = lazy_adam_optimizer.LazyAdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = lazy_adam_optimizer.LazyAdamOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
repeated_update = repeated_update_opt.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
aggregated_update = aggregated_update_opt.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
if __name__ == "__main__":
test.main()
|
neumerance/deploy | refs/heads/master | .venv/lib/python2.7/site-packages/sphinx/ext/oldcmarkup.py | 7 | # -*- coding: utf-8 -*-
"""
sphinx.ext.oldcmarkup
~~~~~~~~~~~~~~~~~~~~~
Extension for compatibility with old C markup (directives and roles).
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
_warned_oldcmarkup = False
WARNING_MSG = 'using old C markup; please migrate to new-style markup ' \
'(e.g. c:function instead of cfunction), see ' \
'http://sphinx.pocoo.org/domains.html'
class OldCDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'noindex': directives.flag,
'module': directives.unchanged,
}
def run(self):
env = self.state.document.settings.env
if not env.app._oldcmarkup_warned:
self.state_machine.reporter.warning(WARNING_MSG, line=self.lineno)
env.app._oldcmarkup_warned = True
newname = 'c:' + self.name[1:]
newdir = env.lookup_domain_element('directive', newname)[0]
return newdir(newname, self.arguments, self.options,
self.content, self.lineno, self.content_offset,
self.block_text, self.state, self.state_machine).run()
def old_crole(typ, rawtext, text, lineno, inliner, options={}, content=[]):
env = inliner.document.settings.env
if not typ:
typ = env.config.default_role
if not env.app._oldcmarkup_warned:
inliner.reporter.warning(WARNING_MSG, line=lineno)
env.app._oldcmarkup_warned = True
newtyp = 'c:' + typ[1:]
newrole = env.lookup_domain_element('role', newtyp)[0]
return newrole(newtyp, rawtext, text, lineno, inliner, options, content)
def setup(app):
app._oldcmarkup_warned = False
app.add_directive('cfunction', OldCDirective)
app.add_directive('cmember', OldCDirective)
app.add_directive('cmacro', OldCDirective)
app.add_directive('ctype', OldCDirective)
app.add_directive('cvar', OldCDirective)
app.add_role('cdata', old_crole)
app.add_role('cfunc', old_crole)
app.add_role('cmacro', old_crole)
app.add_role('ctype', old_crole)
app.add_role('cmember', old_crole)
|
hxp2k6/https-github.com-stamparm-maltrail | refs/heads/master | trails/feeds/emergingthreatsbot.py | 1 | #!/usr/bin/env python
"""
Copyright (c) 2014-2015 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import re
from core.common import retrieve_content
__url__ = "http://rules.emergingthreats.net/open/suricata/rules/botcc.rules"
__check__ = "CnC Server"
__info__ = "malware"
__reference__ = "emergingthreats.net"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for match in re.finditer(r"\d+\.\d+\.\d+\.\d+", content):
retval[match.group(0)] = (__info__, __reference__)
return retval
|
zanderle/django | refs/heads/master | tests/timezones/tests.py | 165 | from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from unittest import SkipTest, skipIf
from xml.dom.minidom import parseString
from django.contrib.auth.models import User
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import connection, connections
from django.db.models import Max, Min
from django.http import HttpRequest
from django.template import (
Context, RequestContext, Template, TemplateSyntaxError, context_processors,
)
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, override_settings,
skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import six, timezone
from .forms import (
EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp,
)
try:
import pytz
except ImportError:
pytz = None
requires_pytz = skipIf(pytz is None, "this test requires pytz")
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 4, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class NewDatabaseTests(TestCase):
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
@requires_pytz
def test_query_filter_with_pytz_timezones(self):
tz = pytz.timezone('Europe/Paris')
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1)
self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
# naive datetimes are interpreted in local time
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
self.assertEqual(len(recorded), 3)
for warning in recorded:
msg = str(warning.message)
self.assertTrue(msg.startswith("DateTimeField Event.dt "
"received a naive datetime"))
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC.
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
@skipUnlessDBFeature('supports_timezones')
def test_cursor_execute_accepts_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_timezones')
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, timezone.utc)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [utc_naive_dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_timezones')
def test_cursor_execute_returns_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
@skipIfDBFeature('supports_timezones')
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, timezone.utc)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [utc_naive_dt])
self.assertEqual(cursor.fetchall()[0][0], utc_naive_dt)
@requires_tz_support
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertEqual(e.dt, None)
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class ForcedTimeZoneDatabaseTests(TransactionTestCase):
"""
Test the TIME_ZONE database configuration parameter.
Since this involves reading and writing to the same database through two
connections, this is a TransactionTestCase.
"""
available_apps = ['timezones']
@classmethod
def setUpClass(cls):
# @skipIfDBFeature and @skipUnlessDBFeature cannot be chained. The
# outermost takes precedence. Handle skipping manually instead.
if connection.features.supports_timezones:
raise SkipTest("Database has feature(s) supports_timezones")
if not connection.features.test_db_allows_multiple_connections:
raise SkipTest("Database doesn't support feature(s): test_db_allows_multiple_connections")
super(ForcedTimeZoneDatabaseTests, cls).setUpClass()
connections.databases['tz'] = connections.databases['default'].copy()
connections.databases['tz']['TIME_ZONE'] = 'Asia/Bangkok'
@classmethod
def tearDownClass(cls):
connections['tz'].close()
del connections['tz']
del connections.databases['tz']
super(ForcedTimeZoneDatabaseTests, cls).tearDownClass()
def test_read_datetime(self):
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
Event.objects.create(dt=fake_dt)
event = Event.objects.using('tz').get()
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, dt)
def test_write_datetime(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.using('tz').create(dt=dt)
event = Event.objects.get()
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, fake_dt)
@skipUnlessDBFeature('supports_timezones')
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class UnsupportedTimeZoneDatabaseTests(TestCase):
def test_time_zone_parameter_not_supported_if_database_supports_timezone(self):
connections.databases['tz'] = connections.databases['default'].copy()
connections.databases['tz']['TIME_ZONE'] = 'Asia/Bangkok'
tz_conn = connections['tz']
try:
with self.assertRaises(ImproperlyConfigured):
tz_conn.cursor()
finally:
connections['tz'].close() # in case the test fails
del connections['tz']
del connections.databases['tz']
@override_settings(TIME_ZONE='Africa/Nairobi')
class SerializationTests(SimpleTestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes,
# but when it loads this representation, it substracts the offset and
# returns a naive datetime object in UTC (http://pyyaml.org/ticket/202).
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]['fields']['dt'], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName('field')[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
six.assertRegex(self, yaml,
r"\n fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class TemplateTests(TestCase):
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
'naive': datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"),
'noarg': Template("{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'on': Template("{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'off': Template("{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return '|'.join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
'utc': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('utc', 'eat', 'utc', 'ict'),
},
'eat': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('eat', 'eat', 'utc', 'ict'),
},
'ict': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('ict', 'eat', 'utc', 'ict'),
},
'naive': {
'notag': t('naive', 'eat', 'utc', 'ict'),
'noarg': t('naive', 'eat', 'utc', 'ict'),
'on': t('naive', 'eat', 'utc', 'ict'),
'off': t('naive', 'eat', 'utc', 'ict'),
}
}
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
# Changes for USE_TZ = False
results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict')
results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict')
with self.settings(USE_TZ=False):
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
@requires_pytz
def test_localtime_filters_with_pytz(self):
"""
Test the |localtime, |utc, and |timezone filters with pytz.
"""
# Use a pytz timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE='Europe/Paris'):
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00")
# Use a pytz timezone as argument
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}")
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({'dt': None, 'tz': ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({'dt': 'not a date', 'tz': ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'})
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}"
)
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'tz1': ICT, 'tz2': None})
self.assertEqual(tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00")
@requires_pytz
def test_timezone_templatetag_with_pytz(self):
"""
Test the {% timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a pytz timezone as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': 'Europe/Paris'})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'}))
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template("{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
@requires_pytz
def test_get_current_timezone_templatetag_with_pytz(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
with timezone.override(pytz.timezone('Europe/Paris')):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template("{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.template.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
context = Context()
self.assertEqual(tpl.render(context), "")
request_context = RequestContext(HttpRequest(), processors=[context_processors.tz])
self.assertEqual(tpl.render(request_context), "Africa/Nairobi" if pytz else "EAT")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template("{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}{% endlocaltime %}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
def test_localtime_with_time_zone_setting_set_to_none(self):
# Regression for #17274
tpl = Template("{% load tz %}{{ dt }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)})
with self.settings(TIME_ZONE=None):
# the actual value depends on the system time zone of the host
self.assertTrue(tpl.render(ctx).startswith("2011"))
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template("{% now \"O\" %}")
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False)
class LegacyFormsTests(TestCase):
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
@requires_pytz
def test_form_with_non_existent_time(self):
form = EventForm({'dt': '2011-03-27 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0))
@requires_pytz
def test_form_with_ambiguous_time(self):
form = EventForm({'dt': '2011-10-30 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0))
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class NewFormsTests(TestCase):
@requires_tz_support
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_other_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30'})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_explicit_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30+07:00'})
# Datetime inputs formats don't allow providing a time zone.
self.assertFalse(form.is_valid())
@requires_pytz
def test_form_with_non_existent_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-03-27 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-03-27 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_pytz
def test_form_with_ambiguous_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-10-30 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-10-30 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_form(self):
form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)})
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@requires_tz_support
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_model_form(self):
form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='timezones.urls')
class AdminTests(TestCase):
@classmethod
def setUpTestData(cls):
# password = "secret"
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
is_superuser=True, username='super', first_name='Super', last_name='User',
email='super@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
)
def setUp(self):
self.client.login(username='super', password='secret')
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin_tz:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(ICT).isoformat())
|
adedayo/intellij-community | refs/heads/master | python/testData/refactoring/introduceVariable/substringContainsFormatChars.after.py | 83 | a = "lo %s"
print(("Hel" + a + "!") % "World") |
shepdelacreme/ansible | refs/heads/devel | lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset.py | 28 | #!/usr/bin/python
#
# Copyright (c) 2017 Obezimnaka Boms, <t-ozboms@microsoft.com>
# Copyright (c) 2017 Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_dnsrecordset
version_added: "2.4"
short_description: Create, delete and update DNS record sets and records.
description:
- Creates, deletes, and updates DNS records sets and records within an existing Azure DNS Zone.
options:
resource_group:
description:
- name of resource group
required: true
zone_name:
description:
- name of the existing DNS zone in which to manage the record set
required: true
relative_name:
description:
- relative name of the record set
required: true
record_type:
description:
- the type of record set to create or delete
choices:
- A
- AAAA
- CNAME
- MX
- NS
- SRV
- TXT
- PTR
required: true
record_mode:
description:
- whether existing record values not sent to the module should be purged
default: purge
choices:
- append
- purge
state:
description:
- Assert the state of the record set. Use C(present) to create or update and
C(absent) to delete.
default: present
choices:
- absent
- present
time_to_live:
description:
- time to live of the record set in seconds
default: 3600
records:
description:
- list of records to be created depending on the type of record (set)
suboptions:
preference:
description:
- used for creating an MX record set/records
priority:
description:
- used for creating an SRV record set/records
weight:
description:
- used for creating an SRV record set/records
port:
description:
- used for creating an SRV record set/records
entry:
description:
- primary data value for all record types.
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Obezimnaka Boms (@ozboms)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: ensure an "A" record set with multiple records
azure_rm_dnsrecordset:
resource_group: Testing
relative_name: www
zone_name: testing.com
record_type: A
state: present
records:
- entry: 192.168.100.101
- entry: 192.168.100.102
- entry: 192.168.100.103
- name: delete a record set
azure_rm_dnsrecordset:
resource_group: Testing
record_type: A
relative_name: www
zone_name: testing.com
state: absent
- name: create multiple "A" record sets with multiple records
azure_rm_dnsrecordset:
resource_group: Testing
zone_name: testing.com
state: present
relative_name: "{{ item.name }}"
record_type: "{{ item.type }}"
records: "{{ item.records }}"
with_items:
- { name: 'servera', type: 'A', records: [ { entry: '10.10.10.20' }, { entry: '10.10.10.21' }] }
- { name: 'serverb', type: 'A', records: [ { entry: '10.10.10.30' }, { entry: '10.10.10.41' }] }
- { name: 'serverc', type: 'A', records: [ { entry: '10.10.10.40' }, { entry: '10.10.10.41' }] }
- name: create SRV records in a new record set
azure_rm_dnsrecordset:
resource_group: Testing
relative_name: _sip._tcp.testing.com
zone_name: testing.com
time_to_live: 7200
record_type: SRV
state: present
records:
- entry: sip.testing.com
preference: 10
priority: 20
weight: 10
port: 5060
- name: create PTR record in a new record set
azure_rm_dnsrecordset:
resource_group: Testing
relative_name: 192.168.100.101.in-addr.arpa
zone_name: testing.com
record_type: PTR
records:
- entry: servera.testing.com
- name: create TXT record in a new record set
azure_rm_dnsrecordset:
resource_group: Testing
relative_name: mail.testing.com
zone_name: testing.com
record_type: TXT
records:
- entry: 'v=spf1 a -all'
'''
RETURN = '''
'''
import inspect
import sys
from ansible.module_utils.basic import _load_params
from ansible.module_utils.six import iteritems
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.dns.models import Zone, RecordSet, ARecord, AaaaRecord, MxRecord, NsRecord, PtrRecord, SrvRecord, TxtRecord, CnameRecord, SoaRecord
except ImportError:
# This is handled in azure_rm_common
pass
RECORD_ARGSPECS = dict(
A=dict(
ipv4_address=dict(type='str', required=True, aliases=['entry'])
),
AAAA=dict(
ipv6_address=dict(type='str', required=True, aliases=['entry'])
),
CNAME=dict(
cname=dict(type='str', required=True, aliases=['entry'])
),
MX=dict(
preference=dict(type='int', required=True),
exchange=dict(type='str', required=True, aliases=['entry'])
),
NS=dict(
nsdname=dict(type='str', required=True, aliases=['entry'])
),
PTR=dict(
ptrdname=dict(type='str', required=True, aliases=['entry'])
),
SRV=dict(
priority=dict(type='int', required=True),
port=dict(type='int', required=True),
weight=dict(type='int', required=True),
target=dict(type='str', required=True, aliases=['entry'])
),
TXT=dict(
value=dict(type='list', required=True, aliases=['entry'])
),
# FUTURE: ensure all record types are supported (see https://github.com/Azure/azure-sdk-for-python/tree/master/azure-mgmt-dns/azure/mgmt/dns/models)
)
RECORDSET_VALUE_MAP = dict(
A=dict(attrname='arecords', classobj=ARecord, is_list=True),
AAAA=dict(attrname='aaaa_records', classobj=AaaaRecord, is_list=True),
CNAME=dict(attrname='cname_record', classobj=CnameRecord, is_list=False),
MX=dict(attrname='mx_records', classobj=MxRecord, is_list=True),
NS=dict(attrname='ns_records', classobj=NsRecord, is_list=True),
PTR=dict(attrname='ptr_records', classobj=PtrRecord, is_list=True),
SRV=dict(attrname='srv_records', classobj=SrvRecord, is_list=True),
TXT=dict(attrname='txt_records', classobj=TxtRecord, is_list=True),
# FUTURE: add missing record types from https://github.com/Azure/azure-sdk-for-python/blob/master/azure-mgmt-dns/azure/mgmt/dns/models/record_set.py
) if HAS_AZURE else {}
class AzureRMRecordSet(AzureRMModuleBase):
def __init__(self):
# we're doing two-pass arg validation, sample and store the args internally to allow this
_load_params()
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
relative_name=dict(type='str', required=True),
zone_name=dict(type='str', required=True),
record_type=dict(choices=RECORD_ARGSPECS.keys(), required=True, type='str'),
record_mode=dict(choices=['append', 'purge'], default='purge'),
state=dict(choices=['present', 'absent'], default='present', type='str'),
time_to_live=dict(type='int', default=3600),
records=dict(type='list', elements='dict')
)
required_if = [
('state', 'present', ['records'])
]
self.results = dict(
changed=False
)
# first-pass arg validation so we can get the record type- skip exec_module
super(AzureRMRecordSet, self).__init__(self.module_arg_spec, required_if=required_if, supports_check_mode=True, skip_exec=True)
# look up the right subspec and metadata
record_subspec = RECORD_ARGSPECS.get(self.module.params['record_type'])
self.record_type_metadata = RECORDSET_VALUE_MAP.get(self.module.params['record_type'])
# patch the right record shape onto the argspec
self.module_arg_spec['records']['options'] = record_subspec
# monkeypatch __hash__ on SDK model objects so we can safely use them in sets
for rvm in RECORDSET_VALUE_MAP.values():
rvm['classobj'].__hash__ = gethash
# rerun validation and actually run the module this time
super(AzureRMRecordSet, self).__init__(self.module_arg_spec, required_if=required_if, supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys():
setattr(self, key, kwargs[key])
# retrieve resource group to make sure it exists
self.get_resource_group(self.resource_group)
zone = self.dns_client.zones.get(self.resource_group, self.zone_name)
if not zone:
self.fail('The zone {0} does not exist in the resource group {1}'.format(self.zone_name, self.resource_group))
try:
self.log('Fetching Record Set {0}'.format(self.relative_name))
record_set = self.dns_client.record_sets.get(self.resource_group, self.zone_name, self.relative_name, self.record_type)
except CloudError as ce:
record_set = None
# FUTURE: fail on anything other than ResourceNotFound
# FUTURE: implement diff mode
if self.state == 'present':
# convert the input records to SDK objects
self.input_sdk_records = self.create_sdk_records(self.records)
if not record_set:
changed = True
else:
# and use it to get the type-specific records
server_records = getattr(record_set, self.record_type_metadata['attrname'])
# compare the input records to the server records
changed = self.records_changed(self.input_sdk_records, server_records)
# also check top-level recordset properties
changed |= record_set.ttl != self.time_to_live
# FUTURE: add metadata/tag check on recordset
self.results['changed'] |= changed
elif self.state == 'absent':
if record_set:
self.results['changed'] = True
if self.check_mode:
return self.results
if self.results['changed']:
if self.state == 'present':
record_set_args = dict(
ttl=self.time_to_live
)
if not self.record_type_metadata['is_list']:
records_to_create_or_update = self.input_sdk_records[0]
elif self.record_mode == 'append' and record_set: # append mode, merge with existing values before update
records_to_create_or_update = set(self.input_sdk_records).union(set(server_records))
else:
records_to_create_or_update = self.input_sdk_records
record_set_args[self.record_type_metadata['attrname']] = records_to_create_or_update
record_set = RecordSet(**record_set_args)
rsout = self.dns_client.record_sets.create_or_update(self.resource_group, self.zone_name, self.relative_name, self.record_type, record_set)
elif self.state == 'absent':
# delete record set
self.delete_record_set()
return self.results
def delete_record_set(self):
try:
# delete the record set
self.dns_client.record_sets.delete(self.resource_group, self.zone_name, self.relative_name, self.record_type)
except Exception as exc:
self.fail("Error deleting record set {0} - {1}".format(self.relative_name, str(exc)))
return None
def create_sdk_records(self, input_records):
record_sdk_class = self.record_type_metadata['classobj']
record_argspec = inspect.getargspec(record_sdk_class.__init__)
return [record_sdk_class(**dict([(k, v) for k, v in iteritems(x) if k in record_argspec.args])) for x in input_records]
def records_changed(self, input_records, server_records):
# ensure we're always comparing a list, even for the single-valued types
if not isinstance(server_records, list):
server_records = [server_records]
input_set = set(input_records)
server_set = set(server_records)
if self.record_mode == 'append': # only a difference if the server set is missing something from the input set
return len(input_set.difference(server_set)) > 0
# non-append mode; any difference in the sets is a change
return input_set != server_set
# Quick 'n dirty hash impl suitable for monkeypatching onto SDK model objects (so we can use set comparisons)
def gethash(self):
if not getattr(self, '_cachedhash', None):
spec = inspect.getargspec(self.__init__)
valuetuple = tuple(
map(lambda v: v if not isinstance(v, list) else str(v), [
getattr(self, x, None) for x in spec.args if x != 'self'
])
)
self._cachedhash = hash(valuetuple)
return self._cachedhash
def main():
AzureRMRecordSet()
if __name__ == '__main__':
main()
|
its-django/mysite | refs/heads/master | mysite/zoo/migrations/__init__.py | 12133432 | |
dodobas/osm-export-tool2 | refs/heads/master | utils/tests/__init__.py | 12133432 | |
erinspace/scrapi | refs/heads/develop | scrapi/harvesters/uiucideals.py | 9 | """
Harvester for the University of Illinois at Urbana-Champaign, IDEALS
information about UIUC-IDEALS can be found here:
https://github.com/CenterForOpenScience/SHARE/blob/master/providers/UIUC-IDEALS.md
"""
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class UiucIdealsHarvester(OAIHarvester):
long_name = 'University of Illinois at Urbana-Champaign, IDEALS'
short_name = 'uiucideals'
url = 'https://www.ideals.illinois.edu'
base_url = 'http://ideals.uiuc.edu/dspace-oai/request'
property_list = [
'type', 'format', 'date',
'identifier', 'setSpec', 'source', 'coverage',
'relation', 'rights'
]
|
joelddiaz/openshift-tools | refs/heads/prod | openshift/installer/vendored/openshift-ansible-3.5.91/roles/lib_openshift/src/class/oc_secret.py | 22 | # pylint: skip-file
# flake8: noqa
# pylint: skip-file
# pylint: disable=wrong-import-position,wrong-import-order
import base64
# pylint: disable=too-many-arguments
class OCSecret(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
def __init__(self,
namespace,
secret_name=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecret, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = secret_name
self.decode = decode
def get(self):
'''return a secret by name '''
results = self._get('secrets', self.name)
results['decoded'] = {}
results['exists'] = False
if results['returncode'] == 0 and results['results'][0]:
results['exists'] = True
if self.decode:
if 'data' in results['results'][0]:
for sname, value in results['results'][0]['data'].items():
results['decoded'][sname] = base64.b64decode(value)
if results['returncode'] != 0 and '"%s" not found' % self.name in results['stderr']:
results['returncode'] = 0
return results
def delete(self):
'''delete a secret by name'''
return self._delete('secrets', self.name)
def create(self, files=None, contents=None):
'''Create a secret '''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['secrets', 'new', self.name]
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
return results
def update(self, files, force=False):
'''run update secret
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
secret = self.prep_secret(files)
if secret['returncode'] != 0:
return secret
sfile_path = '/tmp/%s' % self.name
with open(sfile_path, 'w') as sfd:
sfd.write(json.dumps(secret['results']))
atexit.register(Utils.cleanup, [sfile_path])
return self._replace(sfile_path, force=force)
def prep_secret(self, files=None, contents=None):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-ojson', 'secrets', 'new', self.name]
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocsecret = OCSecret(params['namespace'],
params['name'],
params['decode'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = ocsecret.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval, state: 'list'}
if not params['name']:
return {'failed': True,
'msg': 'Please specify a name when state is absent|present.'}
########
# Delete
########
if state == 'absent':
if not Utils.exists(api_rval['results'], params['name']):
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': True, 'msg': 'Would have performed a delete.'}
api_rval = ocsecret.delete()
return {'changed': True, 'results': api_rval, 'state': 'absent'}
if state == 'present':
if params['files']:
files = params['files']
elif params['contents']:
files = Utils.create_tmp_files_from_contents(params['contents'])
else:
return {'failed': True,
'msg': 'Either specify files or contents.'}
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
if check_mode:
return {'changed': True,
'msg': 'Would have performed a create.'}
api_rval = ocsecret.create(files, params['contents'])
# Remove files
if files and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
return {'failed': True,
'msg': api_rval}
return {'changed': True,
'results': api_rval,
'state': 'present'}
########
# Update
########
secret = ocsecret.prep_secret(params['files'], params['contents'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
if Utils.check_def_equal(secret['results'], api_rval['results'][0]):
# Remove files
if files and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
return {'changed': False,
'results': secret['results'],
'state': 'present'}
if check_mode:
return {'changed': True,
'msg': 'Would have performed an update.'}
api_rval = ocsecret.update(files, force=params['force'])
# Remove files
if secret and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
return {'failed': True,
'msg': api_rval}
return {'changed': True,
'results': api_rval,
'state': 'present'}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
|
coupdair/pyoptools | refs/heads/master | pyoptools/misc/lsq/__init__.py | 9 | from lsq import *
__all__=["polyfit2d",
"polyfito1",
"polyfito2",
"vander_matrix"]
|
bdang2012/taiga-back-casting | refs/heads/branch_casting | taiga/projects/notifications/management/commands/send_notifications.py | 1 | # Copyright (C) 2014-2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014-2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2015 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from taiga.base.utils.iterators import iter_queryset
from taiga.projects.notifications.models import HistoryChangeNotification
from taiga.projects.notifications.services import send_sync_notifications
class Command(BaseCommand):
def handle(self, *args, **options):
qs = HistoryChangeNotification.objects.all()
for change_notification in iter_queryset(qs, itersize=100):
send_sync_notifications(change_notification.pk)
|
yukusu/IoT-Rasp | refs/heads/master | iot-projekti/CoAPthon/coapthon/reverse_proxy/__init__.py | 10 | __author__ = 'giacomo'
|
L3K0V/lifebelt | refs/heads/master | server/api/courses/migrations/0002_course.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('initials', models.CharField(max_length=16)),
('full_name', models.CharField(max_length=48)),
('repository', models.URLField(blank=True)),
('description', models.TextField()),
('year', models.PositiveSmallIntegerField()),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
],
),
]
|
konstruktoid/ansible-upstream | refs/heads/devel | lib/ansible/modules/cloud/vmware/vmware_host_firewall_manager.py | 10 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_firewall_manager
short_description: Manage firewall configurations about an ESXi host
description:
- This module can be used to manage firewall configurations about an ESXi host when ESXi hostname or Cluster name is given.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Firewall settings are applied to every ESXi host system in given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Firewall settings are applied to this ESXi host system.
- If C(cluster_name) is not given, this parameter is required.
rules:
description:
- A list of Rule set which needs to be managed.
- Each member of list is rule set name and state to be set the rule.
- Both rule name and rule state are required parameters.
- Please see examples for more information.
default: []
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Enable vvold rule set for all ESXi Host in given Cluster
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
rules:
- name: vvold
enabled: True
- name: Enable vvold rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
- name: Manage multiple rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
- name: CIMHttpServer
enabled: False
'''
RETURN = r'''
rule_set_state:
description:
- dict with hostname as key and dict with firewall rule set facts as value
returned: success
type: dict
sample: {
"rule_set_state": {
"localhost.localdomain": {
"CIMHttpServer": {
"current_state": true,
"desired_state": true,
"previous_state": true
},
"vvold": {
"current_state": true,
"desired_state": true,
"previous_state": true
}
}
}
}
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VmwareFirewallManager(PyVmomi):
def __init__(self, module):
super(VmwareFirewallManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.options = self.params.get('options', dict())
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.firewall_facts = dict()
self.rule_options = self.module.params.get("rules")
self.gather_rule_set()
def gather_rule_set(self):
for host in self.hosts:
self.firewall_facts[host.name] = {}
firewall_system = host.configManager.firewallSystem
if firewall_system:
for rule_set_obj in firewall_system.firewallInfo.ruleset:
temp_rule_dict = dict()
temp_rule_dict['enabled'] = rule_set_obj.enabled
self.firewall_facts[host.name][rule_set_obj.key] = temp_rule_dict
def ensure(self):
"""
Function to ensure rule set configuration
"""
fw_change_list = []
results = dict(changed=False, rule_set_state=dict())
for host in self.hosts:
firewall_system = host.configManager.firewallSystem
if firewall_system is None:
continue
results['rule_set_state'][host.name] = dict()
for rule_option in self.rule_options:
rule_name = rule_option.get('name', None)
if rule_name is None:
self.module.fail_json(msg="Please specify rule.name for rule set"
" as it is required parameter.")
if rule_name not in self.firewall_facts[host.name]:
self.module.fail_json(msg="rule named '%s' wasn't found." % rule_name)
rule_enabled = rule_option.get('enabled', None)
if rule_enabled is None:
self.module.fail_json(msg="Please specify rules.enabled for rule set"
" %s as it is required parameter." % rule_name)
current_rule_state = self.firewall_facts[host.name][rule_name]['enabled']
if current_rule_state != rule_enabled:
try:
if rule_enabled:
firewall_system.EnableRuleset(id=rule_name)
else:
firewall_system.DisableRuleset(id=rule_name)
fw_change_list.append(True)
except vim.fault.NotFound as not_found:
self.module.fail_json(msg="Failed to enable rule set %s as"
" rule set id is unknown : %s" % (rule_name,
to_native(not_found.msg)))
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(msg="Failed to enabled rule set %s as an internal"
" error happened while reconfiguring"
" rule set : %s" % (rule_name,
to_native(host_config_fault.msg)))
results['rule_set_state'][host.name][rule_name] = dict(current_state=rule_enabled,
previous_state=current_rule_state,
desired_state=rule_enabled,
)
if any(fw_change_list):
results['changed'] = True
self.module.exit_json(**results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
rules=dict(type='list', default=list(), required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
]
)
vmware_firewall_manager = VmwareFirewallManager(module)
vmware_firewall_manager.ensure()
if __name__ == "__main__":
main()
|
wilvk/ansible | refs/heads/devel | lib/ansible/modules/windows/win_pagefile.py | 42 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2017, Liran Nisanov <lirannis@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_pagefile
version_added: "2.4"
short_description: Query or change pagefile configuration
description:
- Query current pagefile configuration.
- Enable/Disable AutomaticManagedPagefile.
- Create new or override pagefile configuration.
options:
drive:
description:
- The drive of the pagefile.
initial_size:
description:
- The initial size of the pagefile in megabytes.
maximum_size:
description:
- The maximum size of the pagefile in megabytes.
override:
description:
- Override the current pagefile on the drive.
type: bool
default: 'yes'
system_managed:
description:
- Configures current pagefile to be managed by the system.
type: bool
default: 'no'
automatic:
description:
- Configures AutomaticManagedPagefile for the entire system.
type: bool
remove_all:
description:
- Remove all pagefiles in the system, not including automatic managed.
type: bool
default: 'no'
test_path:
description:
- Use Test-Path on the drive to make sure the drive is accessible before creating the pagefile.
type: bool
default: 'yes'
state:
description:
- State of the pagefile.
choices:
- present
- absent
- query
default: query
notes:
- There is difference between automatic managed pagefiles that configured once for the entire system and system managed pagefile that configured per pagefile.
- InitialSize 0 and MaximumSize 0 means the pagefile is managed by the system.
- Value out of range exception may be caused by several different issues, two common problems - No such drive, Pagefile size is too small.
- Setting a pagefile when AutomaticManagedPagefile is on will disable the AutomaticManagedPagefile.
author:
- Liran Nisanov (@LiranNis)
'''
EXAMPLES = r'''
- name: Query pagefiles configuration
win_pagefile:
- name: Query C pagefile
win_pagefile:
drive: C
- name: Set C pagefile, don't override if exists
win_pagefile:
drive: C
initial_size: 1024
maximum_size: 1024
override: no
state: present
- name: Set C pagefile, override if exists
win_pagefile:
drive: C
initial_size: 1024
maximum_size: 1024
state: present
- name: Remove C pagefile
win_pagefile:
drive: C
state: absent
- name: Remove all current pagefiles, enable AutomaticManagedPagefile and query at the end
win_pagefile:
remove_all: yes
automatic: yes
- name: Remove all pagefiles disable AutomaticManagedPagefile and set C pagefile
win_pagefile:
drive: C
initial_size: 2048
maximum_size: 2048
remove_all: yes
automatic: no
state: present
- name: Set D pagefile, override if exists
win_pagefile:
drive: d
initial_size: 1024
maximum_size: 1024
state: present
'''
RETURN = r'''
automatic_managed_pagefiles:
description: Whether the pagefiles is automatically managed.
returned: When state is query.
type: boolean
sample: true
pagefiles:
description: Contains caption, description, initial_size, maximum_size and name for each pagefile in the system.
returned: When state is query.
type: list
sample:
[{"caption": "c:\\ 'pagefile.sys'", "description": "'pagefile.sys' @ c:\\", "initial_size": 2048, "maximum_size": 2048, "name": "c:\\pagefile.sys"},
{"caption": "d:\\ 'pagefile.sys'", "description": "'pagefile.sys' @ d:\\", "initial_size": 1024, "maximum_size": 1024, "name": "d:\\pagefile.sys"}]
'''
|
jderobot-varribas/JdeRobot | refs/heads/jderobot | src/stable/components/introrob_py/gui/sensorsWidget.py | 1 | #
# Copyright (C) 1997-2015 JDE Developers Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
# Authors :
# Alberto Martin Florido <almartinflorido@gmail.com>
#
from PyQt4 import QtGui, QtCore
from gui.speedoMeter import SpeedoMeter
from gui.attitudeIndicator import AttitudeIndicator
from PyQt4 import Qt
import PyQt4.Qwt5 as Qwt
import math
def enumList(enum, sentinel):
'''
'''
return [enum(i) for i in range(sentinel)]
colorGroupList = enumList(
Qt.QPalette.ColorGroup, Qt.QPalette.NColorGroups)
colorRoleList = enumList(
Qt.QPalette.ColorRole, Qt.QPalette.NColorRoles)
handList = enumList(
Qwt.QwtAnalogClock.Hand, Qwt.QwtAnalogClock.NHands)
class SensorsWidget(QtGui.QWidget):
sensorsUpdate=QtCore.pyqtSignal()
def __init__(self,winParent):
super(SensorsWidget, self).__init__()
self.winParent=winParent
self.sensorsUpdate.connect(self.updateSensors)
self.initUI()
def initUI(self):
self.setMinimumSize(660,450)
self.setMaximumSize(660,450)
self.setWindowTitle("Sensors")
self.horizon=self.__createDial(0)
self.horizon.resize(200,200)
self.horizon.move(60,20)
self.pitchLabel=QtGui.QLabel('Pitch:',self)
self.pitchLabel.move(300,140)
self.pitchValueLabel=QtGui.QLabel('-180',self)
self.pitchValueLabel.resize(60,21)
self.pitchValueLabel.move(340,140)
self.rollLabel=QtGui.QLabel('Roll:',self)
self.rollLabel.move(300,160)
self.rollValueLabel=QtGui.QLabel('180',self)
self.rollValueLabel.resize(60,21)
self.rollValueLabel.move(340,160)
self.compass=self.__createCompass(1)
self.compass.resize(120,120)
self.compass.move(280,10)
self.compass.show()
self.yawLabel=QtGui.QLabel('Yaw:',self)
self.yawLabel.move(300,180)
self.yawValueLabel=QtGui.QLabel('45',self)
self.yawValueLabel.resize(60,21)
self.yawValueLabel.move(340,180)
self.altd=self.__createDial(2)
self.altd.resize(150,150)
self.altd.move(420,50)
self.battery=Qwt.QwtThermo(self)
self.battery.setMaxValue(100.0)
self.battery.setMinValue(0.0)
self.battery.setPipeWidth(10)
self.battery.move(580,10)
self.battery.resize(56,241)
self.batteryLabel=QtGui.QLabel('Battery (%)',self)
self.batteryLabel.move(580,251)
self.velLinX=self.__createDial(1)
self.velLinX.resize(150,150)
self.velLinX.move(60,270)
self.velXLabel = QtGui.QLabel('Linear X (m/s)',self)
self.velXLabel.move(95,420)
self.velLinY=self.__createDial(1)
self.velLinY.resize(150,150)
self.velLinY.move(240,270)
self.velYLabel = QtGui.QLabel('Linear Y (m/s)',self)
self.velYLabel.move(275,420)
self.velLinZ=self.__createDial(1)
self.velLinZ.resize(150,150)
self.velLinZ.setLabel("8 m/s")
self.velLinZ.move(420,270)
self.velZLabel = QtGui.QLabel('Linear Z (m/s)',self)
self.velZLabel.move(455,420)
self.__speed_offset = 0.8
self.__angle_offset = 0.05
self.__gradient_offset = 0.005
def updateSensors(self):
pose=self.winParent.getSensor().getPose3D()
if pose != None:
qw=pose.q0
qx=pose.q1
qy=pose.q2
qz=pose.q3
self.drawAltd(pose.z)
self.drawYawValues(self.quatToYaw(qw,qx,qy,qz)*180/math.pi)
self.drawPitchRollValues(self.quatToPitch(qw,qx,qy,qz)*180/math.pi,self.quatToRoll(qw,qx,qy,qz)*180/math.pi)
navdata=self.winParent.getSensor().getNavdata()
if navdata != None:
self.battery.setValue(navdata.batteryPercent)
self.drawVelocities(navdata.vx,navdata.vy,navdata.vz)
def drawYawValues(self,degress):
value="{0:.2f}".format(degress)
self.yawValueLabel.setText(unicode(value))
self.compass.setValue(degress)
def drawAltd(self, meters):
if(meters>=10 and meters<100):
self.altd.setValue(meters%10)
elif (meters>=100 and meters<1000):
self.altd.setValue(meters%100)
else:
self.altd.setValue(meters)
altLabel="{0:.0f}".format(meters)+' m'
self.altd.setLabel(altLabel)
def drawPitchRollValues(self,pitch,roll):
if(pitch>0 and pitch<=90):
result=pitch/90
result=-result
elif (pitch<0 and pitch>=-90):
result=pitch/-90
else:
result=0.0
self.horizon.setGradient(result)
self.horizon.setAngle(-roll)
pitchValue="{0:.2f}".format(pitch)
rollValue="{0:.2f}".format(roll)
self.pitchValueLabel.setText(unicode(pitchValue))
self.rollValueLabel.setText(unicode(rollValue))
def drawVelocities(self,vx,vy,vz):
vx=math.fabs(vx)
vx/=1000.0
self.velLinX.setValue(vx)
vx=math.fabs(vx)
vxLabel="{0:.0f}".format(vx)+' m/s'
self.velLinX.setLabel(vxLabel)
vy=math.fabs(vy)
vy/=1000.0
self.velLinY.setValue(vy)
vyLabel="{0:.0f}".format(vy)+' m/s'
self.velLinY.setLabel(vyLabel)
vz=math.fabs(vz)
vz/=1000.0
self.velLinZ.setValue(vz)
vzLabel="{0:.0f}".format(vz)+' m/s'
self.velLinZ.setLabel(vzLabel)
def quatToRoll(self,qw,qx,qy,qz):
rotateXa0=2.0*(qy*qz + qw*qx)
rotateXa1=qw*qw - qx*qx - qy*qy + qz*qz
rotateX=0.0
if(rotateXa0 != 0.0 and rotateXa1 !=0.0):
rotateX=math.atan2(rotateXa0, rotateXa1)
return rotateX
def quatToPitch(self,qw,qx,qy,qz):
rotateYa0=-2.0*(qx*qz - qw*qy)
rotateY=0.0
if(rotateYa0>=1.0):
rotateY=math.pi/2.0
elif(rotateYa0<=-1.0):
rotateY=-math.pi/2.0
else:
rotateY=math.asin(rotateYa0)
return rotateY
def quatToYaw(self,qw,qx,qy,qz):
rotateZa0=2.0*(qx*qy + qw*qz)
rotateZa1=qw*qw + qx*qx - qy*qy - qz*qz
rotateZ=0.0
if(rotateZa0 != 0.0 and rotateZa1 != 0.0):
rotateZ=math.atan2(rotateZa0,rotateZa1)
return rotateZ
def closeEvent(self, event):
self.winParent.closeSensorsWidget()
def __createDial(self, pos):
dial = None
if pos == 0:
self.__ai = AttitudeIndicator(self)
dial = self.__ai
elif pos == 1:
self.__speedo = SpeedoMeter(self)
self.__speedo.setRange(0.0, 8.0)
self.__speedo.setLabel("0")
self.__speedo.setOrigin(180)
self.__speedo.setScaleArc(0.0,270.0)
self.__speedo.setScale(-1, 2, 1.0)
dial = self.__speedo
elif pos == 2:
self.__speedo = SpeedoMeter(self)
self.__speedo.setRange(0.0, 10.0)
self.__speedo.setLabel("m")
self.__speedo.setOrigin(-90)
self.__speedo.setScaleArc(0.0,360.0)
self.__speedo.setScale(-1, 2, 1)
dial = self.__speedo
if dial:
dial.setReadOnly(True)
dial.scaleDraw().setPenWidth(3)
dial.setLineWidth(4)
dial.setFrameShadow(Qwt.QwtDial.Sunken)
return dial
def __createCompass(self, pos):
palette = Qt.QPalette()
for colorRole in colorRoleList:
palette.setColor(colorRole, Qt.QColor())
palette.setColor(
Qt.QPalette.Base,
self.palette().color(self.backgroundRole()).light(120))
palette.setColor(
Qt.QPalette.Foreground,
palette.color(Qt.QPalette.Base))
compass = Qwt.QwtCompass(self)
compass.setLineWidth(4)
if pos < 3:
compass.setFrameShadow(Qwt.QwtCompass.Sunken)
else:
compass.setFrameShadow(Qwt.QwtCompass.Raised)
if pos == 0:
compass.setLabelMap({0.0: "N",
90.0: "E",
180.0: "S",
270.0: "W"})
rose = Qwt.QwtSimpleCompassRose(4, 1)
compass.setRose(rose)
compass.setNeedle(
Qwt.QwtCompassWindArrow(Qwt.QwtCompassWindArrow.Style2))
compass.setValue(60.0)
elif pos == 1:
compass.setLabelMap({0.0: "",
90.0: "",
180.0: "",
270.0: ""})
compass.setScaleOptions(Qwt.QwtDial.ScaleBackbone
| Qwt.QwtDial.ScaleTicks
| Qwt.QwtDial.ScaleLabel)
compass.setScaleTicks(0, 0, 3)
compass.setNeedle(Qwt.QwtCompassMagnetNeedle(
Qwt.QwtCompassMagnetNeedle.TriangleStyle,
Qt.Qt.white,
Qt.Qt.red))
compass.setValue(220.0)
newPalette = compass.palette()
for colorRole in colorRoleList:
if palette.color(colorRole).isValid():
for colorGroup in colorGroupList:
newPalette.setColor(
colorGroup, colorRole, palette.color(colorRole))
for colorGroup in colorGroupList:
light = newPalette.color(
colorGroup, Qt.QPalette.Base).light(170)
dark = newPalette.color(
colorGroup, Qt.QPalette.Base).dark(170)
if compass.frameShadow() == Qwt.QwtDial.Raised:
mid = newPalette.color(
colorGroup, Qt.QPalette.Base).dark(110)
else:
mid = newPalette.color(
colorGroup, Qt.QPalette.Base).light(110)
newPalette.setColor(colorGroup, Qt.QPalette.Dark, dark)
newPalette.setColor(colorGroup, Qt.QPalette.Mid, mid)
newPalette.setColor(colorGroup, Qt.QPalette.Light, light)
compass.setPalette(newPalette)
compass.setReadOnly(True)
return compass
|
simone-campagna/invoice | refs/heads/master | tests/unittests/test_invoice_main_validators.py | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
__all__ = [
'Test_invoice_main_config',
]
import datetime
import glob
import os
import tempfile
import unittest
from invoice.log import get_null_logger
from invoice.invoice_collection import InvoiceCollection
from invoice.invoice_main import invoice_main
from invoice.invoice_db import InvoiceDb
from invoice.database.db_types import Path
from invoice.string_printer import StringPrinter
from invoice.version import VERSION
class Test_invoice_main_config(unittest.TestCase):
VALIDATORS_SHOW_EMPTY = """\
validators:
"""
VALIDATORS_SHOW_EXAMPLE = """\
validators:
+ filter: 'Date("2014-01-01") <= date <= Date("2014-12-31")'
check: 'not date.weekday() in {Weekday["Saturday"], Weekday["Sunday"]}'
message: 'invalid weekday for year 2014'
"""
LIST_SHORT = """\
anno numero città data codice_fiscale nome compenso rimborsi cpa bolli incasso valuta
2014 1 Gotham City 2014-01-03 WNYBRC01G01H663S Bruce Wayne 50.0 0.0 1.0 0.0 51.00 euro
2014 2 New York City 2014-01-03 PRKPRT01G01H663M Peter B. Parker 75.0 0.0 1.5 0.0 76.50 euro
2014 3 Greenville 2014-01-22 BNNBRC01G01H663S Robert Bruce Banner 100.0 0.0 2.0 5.0 107.00 euro
"""
LIST_FULL = LIST_SHORT + """\
2014 4 Gotham City 2014-01-25 WNYBRC01G01H663S Bruce Wayne 50.0 0.0 1.0 0.0 51.00 euro
2014 5 Smallville 2014-01-29 KNTCRK01G01H663X Clark Kent 150.0 0.0 3.0 2.0 155.00 euro
2014 6 Smallville 2014-02-28 KNTCRK01G01H663X Clark Kent 120.0 30.0 3.0 30.0 246.66 euro
"""
def setUp(self):
self.dirname = Path.db_to(os.path.join(os.path.dirname(__file__), '..', '..', 'example'))
self.logger = get_null_logger()
self.maxDiff = None
def test_invoice_main_validators_scan(self):
with tempfile.TemporaryDirectory() as tmpdir:
rc_dir = os.path.join(tmpdir, 'rc_dir')
os.makedirs(rc_dir)
p = StringPrinter()
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['init', '-R', rc_dir, os.path.join(self.dirname, '*.doc')],
)
self.assertEqual(p.string(), '')
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EMPTY)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir, '--add', 'Date("2014-01-01") <= date <= Date("2014-12-31")', 'not date.weekday() in {Weekday["Saturday"], Weekday["Sunday"]}', 'invalid weekday for year 2014'],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EXAMPLE)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EXAMPLE)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['scan', '-R', rc_dir, '--progressbar=off']
)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['list', '-R', rc_dir]
)
self.assertEqual(p.string(), self.LIST_SHORT)
def test_invoice_main_validators_validate(self):
with tempfile.TemporaryDirectory() as tmpdir:
rc_dir = os.path.join(tmpdir, 'rc_dir')
os.makedirs(rc_dir)
p = StringPrinter()
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['init', '-R', rc_dir, os.path.join(self.dirname, '*.doc')],
)
self.assertEqual(p.string(), '')
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['scan', '-R', rc_dir, '--progressbar=off'],
)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['list', '-R', rc_dir],
)
print(p.string())
self.assertEqual(p.string(), self.LIST_FULL)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir, '--add', 'Date("2014-01-01") <= date <= Date("2014-12-31")', 'not date.weekday() in {Weekday["Saturday"], Weekday["Sunday"]}', 'invalid weekday for year 2014'],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EXAMPLE)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validate', '-R', rc_dir],
)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['list', '-R', rc_dir],
)
self.assertEqual(p.string(), self.LIST_SHORT)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['scan', '-R', rc_dir, '--progressbar=off'],
)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['list', '-R', rc_dir],
)
self.assertEqual(p.string(), self.LIST_SHORT)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir, '--clear'],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EMPTY)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['scan', '-R', rc_dir, '--progressbar=off'],
)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['list', '-R', rc_dir],
)
#self.assertEqual(p.string(), self.LIST_SHORT)
#
# p.reset()
# invoice_main(
# printer=p,
# logger=self.logger,
# args=['scan', '-R', rc_dir, '--force-refresh', '--progressbar=off'],
# )
#
# p.reset()
# invoice_main(
# printer=p,
# logger=self.logger,
# args=['list', '-R', rc_dir],
# )
self.assertEqual(p.string(), self.LIST_FULL)
def test_invoice_main_validators_import_export(self):
with tempfile.TemporaryDirectory() as tmpdir, tempfile.NamedTemporaryFile() as v_filename:
rc_dir = os.path.join(tmpdir, 'rc_dir')
os.makedirs(rc_dir)
p = StringPrinter()
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['init', '-R', rc_dir, os.path.join(self.dirname, '*.doc')],
)
self.assertEqual(p.string(), '')
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir, '--add', 'Date("2014-01-01") <= date <= Date("2014-12-31")', 'not date.weekday() in {Weekday["Saturday"], Weekday["Sunday"]}', 'invalid weekday for year 2014'],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EXAMPLE)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EXAMPLE)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir, '--export', v_filename.name],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EXAMPLE)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir, '--clear'],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EMPTY)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir, '--import', v_filename.name],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EXAMPLE)
def test_invoice_main_validators_edit(self):
with tempfile.TemporaryDirectory() as tmpdir:
rc_dir = os.path.join(tmpdir, 'rc_dir')
os.makedirs(rc_dir)
p = StringPrinter()
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['init', '-R', rc_dir, os.path.join(self.dirname, '*.doc')],
)
self.assertEqual(p.string(), '')
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir, '--add', 'Date("2014-01-01") <= date <= Date("2014-12-31")', 'not date.weekday() in {Weekday["Saturday"], Weekday["Sunday"]}', 'invalid weekday for year 2014'],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EXAMPLE)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EXAMPLE)
p.reset()
invoice_main(
printer=p,
logger=self.logger,
args=['validators', '-R', rc_dir, '--edit', '--editor', 'sed "s/2014/2028/g" -i'],
)
self.assertEqual(p.string(), self.VALIDATORS_SHOW_EXAMPLE.replace('2014', '2028'))
|
bmi-forum/bmi-pyre | refs/heads/master | pythia-0.8/packages/merlin/tests/hello/__init__.py | 4 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# version
__id__ = "$Id: __init__.py,v 1.1.1.1 2005/03/08 16:13:59 aivazis Exp $"
# End of file
|
schryer/schryer_pelican_blog | refs/heads/master | pelicanconf.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'David Schryer'
SITENAME = u'David Schryer'
SITEURL = 'schryer_pelican_blog'
CUSTOM_ARTICLE_SHARING = 'sharing.html'
CUSTOM_ARTICLE_SCRIPTS = 'sharing_scripts.html'
TIMEZONE = 'Europe/Tallinn'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
#LINKS = (('Pelican', 'http://getpelican.com/'),
# ('Python.org', 'http://python.org/'),
# ('Jinja2', 'http://jinja.pocoo.org/'),
# ('You can modify those links in your config file', '#'),)
# Social widget
#SOCIAL = (('You can add links in your config file', '#'),
# ('Another social link', '#'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
THEME = 'pelican-themes/subtle'
MARKUP = ['md', 'ipynb']
DISPLAY_CATEGORIES_ON_MENU = True
PLUGIN_PATHS = ['pelican-plugins',]
PLUGINS = ['render_math', 'ipynb']
|
kiyoto/statsmodels | refs/heads/master | statsmodels/distributions/__init__.py | 30 | from .empirical_distribution import ECDF, monotone_fn_inverter, StepFunction
from .edgeworth import ExpandedNormal
|
pombredanne/creo | refs/heads/master | server/creo/__init__.py | 12133432 | |
writefaruq/lionface-app | refs/heads/master | django/conf/locale/eu/__init__.py | 12133432 | |
hassanabidpk/django | refs/heads/master | django/contrib/gis/management/commands/__init__.py | 12133432 | |
NifTK/NiftyNet | refs/heads/dev | niftynet/contrib/evaluation/regression_evaluations.py | 12133432 | |
jeske/csla | refs/heads/master | pysrc/clearsilver/odb_postgres.py | 1 | #! /usr/bin/env python
"""
usage: %(progname)s [args]
"""
import os, sys, string, time, getopt
from log import *
import odb
from pyPgSQL import PgSQL
class Cursor(odb.Cursor):
def insert_id(self, tablename, colname):
self.execute("select last_value from %s_%s_seq" % (tablename, colname))
row = self.fetchone()
return row[0]
class Connection(odb.Connection):
def __init__(self, *args, **kwargs):
odb.Connection.__init__(self)
self._conn = apply(PgSQL.connect, args, kwargs)
self.SQLError = PgSQL.OperationalError
def getConnType(self): return "postgres"
def cursor(self):
return Cursor(self._conn.cursor())
def escape(self,str):
if str is None:
return None
elif type(str) == type(""):
return string.replace(str,"'","''")
elif type(str) == type(1):
return str
else:
raise "unknown column data type: %s" % type(str)
def listTables(self, cursor):
cursor.execute("select tablename from pg_catalog.pg_tables")
rows = cursor.fetchall()
tables = []
for row in rows:
tables.append(row[0])
return tables
def listIndices(self, tableName, cursor):
sql = "select indexname from pg_catalog.pg_indexes where tablename='%s'" % tableName
cursor.execute(sql)
rows = cursor.fetchall()
tables = map(lambda row: row[0], rows)
return tables
def listFieldsDict(self, table_name, cursor):
sql = "SELECT c.oid, n.nspname, c.relname FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE pg_catalog.pg_table_is_visible(c.oid) AND c.relname = '%s' ORDER BY 2, 3;" % table_name
cursor.execute(sql)
row = cursor.fetchone()
oid = row[0]
sql = "SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod), (SELECT substring(d.adsrc for 128) FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef), a.attnotnull, a.attnum FROM pg_catalog.pg_attribute a WHERE a.attrelid = '%s' AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum" % oid
cursor.execute(sql)
rows = cursor.fetchall()
columns = {}
for row in rows:
colname = row[0]
columns[colname] = row
return columns
def alterTableToMatch(self, table, cursor):
invalidAppCols, invalidDBCols = table.checkTable()
if not invalidAppCols: return
defs = []
for colname in invalidAppCols.keys():
col = table.getColumnDef(colname)
colname = col[0]
coltype = col[1]
options = col[2]
defs.append(table._colTypeToSQLType(colname, coltype, options))
defs = string.join(defs, ", ")
sql = "alter table %s add column " % table.getTableName()
sql = sql + "(" + defs + ")"
print sql
cursor.execute(sql)
def auto_increment(self, coltype):
return "SERIAL", None
def supportsTriggers(self): return False
|
ocadotechnology/boto | refs/heads/develop | tests/unit/glacier/test_concurrent.py | 88 | #!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import tempfile
from boto.compat import Queue
from tests.compat import mock, unittest
from tests.unit import AWSMockServiceTestCase
from boto.glacier.concurrent import ConcurrentUploader, ConcurrentDownloader
from boto.glacier.concurrent import UploadWorkerThread
from boto.glacier.concurrent import _END_SENTINEL
class FakeThreadedConcurrentUploader(ConcurrentUploader):
def _start_upload_threads(self, results_queue, upload_id,
worker_queue, filename):
self.results_queue = results_queue
self.worker_queue = worker_queue
self.upload_id = upload_id
def _wait_for_upload_threads(self, hash_chunks, result_queue, total_parts):
for i in range(total_parts):
hash_chunks[i] = b'foo'
class FakeThreadedConcurrentDownloader(ConcurrentDownloader):
def _start_download_threads(self, results_queue, worker_queue):
self.results_queue = results_queue
self.worker_queue = worker_queue
def _wait_for_download_threads(self, filename, result_queue, total_parts):
pass
class TestConcurrentUploader(unittest.TestCase):
def setUp(self):
super(TestConcurrentUploader, self).setUp()
self.stat_patch = mock.patch('os.stat')
self.addCleanup(self.stat_patch.stop)
self.stat_mock = self.stat_patch.start()
# Give a default value for tests that don't care
# what the file size is.
self.stat_mock.return_value.st_size = 1024 * 1024 * 8
def test_calculate_required_part_size(self):
self.stat_mock.return_value.st_size = 1024 * 1024 * 8
uploader = ConcurrentUploader(mock.Mock(), 'vault_name')
total_parts, part_size = uploader._calculate_required_part_size(
1024 * 1024 * 8)
self.assertEqual(total_parts, 2)
self.assertEqual(part_size, 4 * 1024 * 1024)
def test_calculate_required_part_size_too_small(self):
too_small = 1 * 1024 * 1024
self.stat_mock.return_value.st_size = 1024 * 1024 * 1024
uploader = ConcurrentUploader(mock.Mock(), 'vault_name',
part_size=too_small)
total_parts, part_size = uploader._calculate_required_part_size(
1024 * 1024 * 1024)
self.assertEqual(total_parts, 256)
# Part size if 4MB not the passed in 1MB.
self.assertEqual(part_size, 4 * 1024 * 1024)
def test_work_queue_is_correctly_populated(self):
uploader = FakeThreadedConcurrentUploader(mock.MagicMock(),
'vault_name')
uploader.upload('foofile')
q = uploader.worker_queue
items = [q.get() for i in range(q.qsize())]
self.assertEqual(items[0], (0, 4 * 1024 * 1024))
self.assertEqual(items[1], (1, 4 * 1024 * 1024))
# 2 for the parts, 10 for the end sentinels (10 threads).
self.assertEqual(len(items), 12)
def test_correct_low_level_api_calls(self):
api_mock = mock.MagicMock()
uploader = FakeThreadedConcurrentUploader(api_mock, 'vault_name')
uploader.upload('foofile')
# The threads call the upload_part, so we're just verifying the
# initiate/complete multipart API calls.
api_mock.initiate_multipart_upload.assert_called_with(
'vault_name', 4 * 1024 * 1024, None)
api_mock.complete_multipart_upload.assert_called_with(
'vault_name', mock.ANY, mock.ANY, 8 * 1024 * 1024)
def test_downloader_work_queue_is_correctly_populated(self):
job = mock.MagicMock()
job.archive_size = 8 * 1024 * 1024
downloader = FakeThreadedConcurrentDownloader(job)
downloader.download('foofile')
q = downloader.worker_queue
items = [q.get() for i in range(q.qsize())]
self.assertEqual(items[0], (0, 4 * 1024 * 1024))
self.assertEqual(items[1], (1, 4 * 1024 * 1024))
# 2 for the parts, 10 for the end sentinels (10 threads).
self.assertEqual(len(items), 12)
class TestUploaderThread(unittest.TestCase):
def setUp(self):
self.fileobj = tempfile.NamedTemporaryFile()
self.filename = self.fileobj.name
def test_fileobj_closed_when_thread_shuts_down(self):
thread = UploadWorkerThread(mock.Mock(), 'vault_name',
self.filename, 'upload_id',
Queue(), Queue())
fileobj = thread._fileobj
self.assertFalse(fileobj.closed)
# By settings should_continue to False, it should immediately
# exit, and we can still verify cleanup behavior.
thread.should_continue = False
thread.run()
self.assertTrue(fileobj.closed)
def test_upload_errors_have_exception_messages(self):
api = mock.Mock()
job_queue = Queue()
result_queue = Queue()
upload_thread = UploadWorkerThread(
api, 'vault_name', self.filename,
'upload_id', job_queue, result_queue, num_retries=1,
time_between_retries=0)
api.upload_part.side_effect = Exception("exception message")
job_queue.put((0, 1024))
job_queue.put(_END_SENTINEL)
upload_thread.run()
result = result_queue.get(timeout=1)
self.assertIn("exception message", str(result))
def test_num_retries_is_obeyed(self):
# total attempts is 1 + num_retries so if I have num_retries of 2,
# I'll attempt the upload once, and if that fails I'll retry up to
# 2 more times for a total of 3 attempts.
api = mock.Mock()
job_queue = Queue()
result_queue = Queue()
upload_thread = UploadWorkerThread(
api, 'vault_name', self.filename,
'upload_id', job_queue, result_queue, num_retries=2,
time_between_retries=0)
api.upload_part.side_effect = Exception()
job_queue.put((0, 1024))
job_queue.put(_END_SENTINEL)
upload_thread.run()
self.assertEqual(api.upload_part.call_count, 3)
if __name__ == '__main__':
unittest.main()
|
joshua-cogliati-inl/raven | refs/heads/devel | doc/workshop/stochasticCollocation/inputs/projectile.py | 2 | #***************************************
#* Simple analytic test ExternalModule *
#***************************************
#
# Simulates time-dependent track of a projectile through the air from start to 0,
# assuming no air resistance.
# Inputs:
# (x0,y0) - initial position
# v0 - initial total velocity
# ang - angle of initial motion, in degrees, with respect to flat ground
# Outputs:
# (x,y) - vector positions of projectile in time
# t - corresponding time steps
#
import numpy as np
def prange(v,th,y0=0,g=9.8):
"""
Calculates the analytic range.
@ In, v, float, velocity of the projectile
@ In, th, float, angle to the ground for initial projectile motion
@ In, y0, float, optional, initial height of projectile
@ In, g, float, optional, gravitational constant (m/s/s)
@ Out, prange, float, range
"""
return v*np.cos(th)/g * (v*np.sin(th) + np.sqrt(v*v*np.sin(th)**2+2.*g*y0))
def time_to_ground(v,th,y0=0,g=9.8):
"""
Calculates the analytic time of flight
@ In, v, float, velocity of the projectile
@ In, th, float, angle to the ground for initial projectile motion
@ In, y0, float, optional, initial height of projectile
@ In, g, float, optional, gravitational constant (m/s/s)
@ Out, time_to_ground, float, time projectile is above the ground
"""
return v*np.sin(th)/g + np.sqrt(v*v*np.sin(th)**2+2.*g*y0)/g
def x_pos(x0,v,t):
"""
Calculates the x position in time
@ In, x0, float, initial horizontal position
@ In, v, float, velocity of the projectile
@ In, t, float, time of flight
@ Out, x_pos, float, horizontal position
"""
return x0 + v*t
def y_pos(y0,v,t):
"""
Calculates the analytic vertical position in time
@ In, y0, float, initial vertical position
@ In, v, float, velocity of the projectile
@ In, t, float, time of flight
@ Out, y_pos, float, vertical position
"""
return y0 + v*t - 4.9*t*t
def run(self,Input):
"""
Method require by RAVEN to run this as an external model.
@ In, self, object, object to store members on
@ In, Input, dict, dictionary containing inputs from RAVEN
@ Out, None
"""
x0 = Input.get('x0',0.0)
y0 = Input.get('y0',0.0)
v0 = Input.get('v0',1.0)
ang = Input.get('angle',45.)*np.pi/180.
self.x0 = x0
self.y0 = y0
self.v0 = v0
self.ang = ang
ts = np.linspace(0,time_to_ground(v0,ang,y0),10)
vx0 = np.cos(ang)*v0
vy0 = np.sin(ang)*v0
r = x0 + prange(v0,ang,y0)
self.x = np.zeros(len(ts))
self.y = np.zeros(len(ts))
self.r = np.zeros(len(ts))
for i,t in enumerate(ts):
self.x[i] = x_pos(x0,vx0,t)
self.y[i] = y_pos(y0,vy0,t)
self.r[i] = r
self.time = ts
#can be used as a code as well
if __name__=="__main__":
import sys
inFile = sys.argv[sys.argv.index('-i')+1]
outFile = sys.argv[sys.argv.index('-o')+1]
#construct the input
Input = {}
for line in open(inFile,'r'):
arg,val = (a.strip() for a in line.split('='))
Input[arg] = float(val)
#make a dummy class to hold values
class IO:
pass
io = IO()
#run the code
run(io,Input)
#write output
outFile = open(outFile+'.csv','w')
outFile.writelines('x0,y0,velocity,angle,r,x,y,time\n')
inpstr = ','.join(str(i) for i in (io.x0,io.y0,io.v0,io.ang*180/np.pi))
for i in range(len(io.time)):
outFile.writelines(inpstr+',%f,%f,%f,%f\n' %(io.r[i],io.x[i],io.y[i],io.time[i]))
outFile.close()
|
Medigate/cutiuta-server | refs/heads/master | cutiuta-server/env/lib/python3.4/site-packages/django/contrib/gis/geos/prototypes/__init__.py | 163 | """
This module contains all of the GEOS ctypes function prototypes. Each
prototype handles the interaction between the GEOS library and Python
via ctypes.
"""
from django.contrib.gis.geos.prototypes.coordseq import ( # NOQA
create_cs, cs_clone, cs_getdims, cs_getordinate, cs_getsize, cs_getx,
cs_gety, cs_getz, cs_setordinate, cs_setx, cs_sety, cs_setz, get_cs,
)
from django.contrib.gis.geos.prototypes.geom import ( # NOQA
create_collection, create_empty_polygon, create_linearring,
create_linestring, create_point, create_polygon, destroy_geom, from_hex,
from_wkb, from_wkt, geom_clone, geos_get_srid, geos_normalize,
geos_set_srid, geos_type, geos_typeid, get_dims, get_extring, get_geomn,
get_intring, get_nrings, get_num_coords, get_num_geoms, to_hex, to_wkb,
to_wkt,
)
from django.contrib.gis.geos.prototypes.misc import * # NOQA
from django.contrib.gis.geos.prototypes.predicates import ( # NOQA
geos_contains, geos_covers, geos_crosses, geos_disjoint, geos_equals,
geos_equalsexact, geos_hasz, geos_intersects, geos_isclosed, geos_isempty,
geos_isring, geos_issimple, geos_isvalid, geos_overlaps,
geos_relatepattern, geos_touches, geos_within,
)
from django.contrib.gis.geos.prototypes.topology import * # NOQA
|
isloux/multiscaletools | refs/heads/master | python/realtimeyahoo.py | 1 | #!/usr/bin/env python
from yahoo_finance import Share
import pandas as pd
from datetime import datetime
filepath='../../'
filename='Russell3000cleanlist.xls'
russell3000=pd.read_excel(filepath+filename)
russell3000.columns=['Company','Ticker']
russell3000['Price']=pd.Series(-1,index=russell3000.index)
russell3000['Change']=pd.Series(-1,index=russell3000.index)
russell3000['Volume']=pd.Series(-1,index=russell3000.index)
russell3000['Open']=pd.Series(-1,index=russell3000.index)
russell3000['Average daily volume']=pd.Series(-1,index=russell3000.index)
russell3000['Market cap']=pd.Series('N/A',index=russell3000.index)
russell3000['Book value']=pd.Series(-1,index=russell3000.index)
russell3000['Ebitda']=pd.Series('N/A',index=russell3000.index)
russell3000['Dividend share']=pd.Series(-1,index=russell3000.index)
#russell3000['Dividend yield']=pd.Series(-1,index=russell3000.index)
russell3000['Earnings share']=pd.Series(-1,index=russell3000.index)
russell3000['Year high']=pd.Series(-1,index=russell3000.index)
russell3000['Year low']=pd.Series(-1,index=russell3000.index)
russell3000['50 days MA']=pd.Series(-1,index=russell3000.index)
russell3000['200 days MA']=pd.Series(-1,index=russell3000.index)
russell3000['Price earnings ratio']=pd.Series(-1,index=russell3000.index)
russell3000['Price earnings growth ratio']=pd.Series(-1,index=russell3000.index)
russell3000['Price sales']=pd.Series(-1,index=russell3000.index)
russell3000['Price book']=pd.Series(-1,index=russell3000.index)
russell3000['Short ratio']=pd.Series(-1,index=russell3000.index)
russell3000.set_index('Ticker',inplace=True)
for s in russell3000.index.values:
print "Retrieving data for", s
try:
shy=Share(s)
except:
continue
try:
russell3000.set_value(s,'Price',shy.get_price())
except:
pass
try:
russell3000.set_value(s,'Change',shy.get_change())
except:
pass
try:
russell3000.set_value(s,'Volume',shy.get_volume())
except:
pass
try:
russell3000.set_value(s,'Open',shy.get_open())
except:
pass
try:
russell3000.set_value(s,'Average daily volume',shy.get_avg_daily_volume())
except:
pass
try:
russell3000.set_value(s,'Market cap',shy.get_market_cap())
except:
pass
try:
russell3000.set_value(s,'Book value',shy.get_book_value())
except:
pass
try:
russell3000.set_value(s,'Ebitda',shy.get_ebitda())
except:
pass
try:
russell3000.set_value(s,'Dividend share',shy.get_dividend_share())
except:
pass
#try:
# russell3000.set_value(s,'Divident yield',shy.get_dividend_yield())
#except:
# pass
try:
russell3000.set_value(s,'Earnings share',shy.get_earnings_share())
except:
pass
try:
russell3000.set_value(s,'Year high',shy.get_year_high())
except:
pass
try:
russell3000.set_value(s,'Year low',shy.get_year_low())
except:
pass
try:
russell3000.set_value(s,'50 days MA',shy.get_50day_moving_avg())
except:
pass
try:
russell3000.set_value(s,'200 days MA',shy.get_200day_moving_avg())
except:
pass
try:
russell3000.set_value(s,'Price earnings ratio',shy.get_price_earnings_ratio())
except:
pass
try:
russell3000.set_value(s,'Price earnings growth ratio',shy.get_price_earnings_growth_ratio())
except:
pass
try:
russell3000.set_value(s,'Price sales',shy.get_price_sales())
except:
pass
try:
russell3000.set_value(s,'Price book',shy.get_price_book())
except:
pass
try:
russell3000.set_value(s,'Short ratio',shy.get_short_ratio())
except:
pass
u=datetime.now()
ofn='r3alldata'+str(u)[0:4]+str(u)[5:7]+str(u)[8:10]+'.xls'
russell3000.to_excel(ofn)
|
tsl143/addons-server | refs/heads/master | src/olympia/activity/tests/__init__.py | 12133432 | |
uname/bleproxy | refs/heads/master | PC/BleProxyDesk/google/protobuf/pyext/__init__.py | 12133432 | |
RafaelCosman/pybrain | refs/heads/master | pybrain/rl/__init__.py | 12133432 | |
pamfilos/invenio | refs/heads/master-sql-fixes | modules/webstyle/lib/goto_plugins/__init__.py | 12133432 | |
dotKom/studlan | refs/heads/master | apps/competition/__init__.py | 12133432 | |
nfallen/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/sslutils/openssl.py | 253 | import functools
import os
import shutil
import subprocess
import tempfile
from datetime import datetime
class OpenSSL(object):
def __init__(self, logger, binary, base_path, conf_path, hosts, duration,
base_conf_path=None):
"""Context manager for interacting with OpenSSL.
Creates a config file for the duration of the context.
:param logger: stdlib logger or python structured logger
:param binary: path to openssl binary
:param base_path: path to directory for storing certificates
:param conf_path: path for configuration file storing configuration data
:param hosts: list of hosts to include in configuration (or None if not
generating host certificates)
:param duration: Certificate duration in days"""
self.base_path = base_path
self.binary = binary
self.conf_path = conf_path
self.base_conf_path = base_conf_path
self.logger = logger
self.proc = None
self.cmd = []
self.hosts = hosts
self.duration = duration
def __enter__(self):
with open(self.conf_path, "w") as f:
f.write(get_config(self.base_path, self.hosts, self.duration))
return self
def __exit__(self, *args, **kwargs):
os.unlink(self.conf_path)
def log(self, line):
if hasattr(self.logger, "process_output"):
self.logger.process_output(self.proc.pid if self.proc is not None else None,
line.decode("utf8", "replace"),
command=" ".join(self.cmd))
else:
self.logger.debug(line)
def __call__(self, cmd, *args, **kwargs):
"""Run a command using OpenSSL in the current context.
:param cmd: The openssl subcommand to run
:param *args: Additional arguments to pass to the command
"""
self.cmd = [self.binary, cmd]
if cmd != "x509":
self.cmd += ["-config", self.conf_path]
self.cmd += list(args)
env = os.environ.copy()
if self.base_conf_path is not None:
env["OPENSSL_CONF"] = self.base_conf_path.encode("utf8")
self.proc = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env)
stdout, stderr = self.proc.communicate()
self.log(stdout)
if self.proc.returncode != 0:
raise subprocess.CalledProcessError(self.proc.returncode, self.cmd,
output=stdout)
self.cmd = []
self.proc = None
return stdout
def make_subject(common_name,
country=None,
state=None,
locality=None,
organization=None,
organization_unit=None):
args = [("country", "C"),
("state", "ST"),
("locality", "L"),
("organization", "O"),
("organization_unit", "OU"),
("common_name", "CN")]
rv = []
for var, key in args:
value = locals()[var]
if value is not None:
rv.append("/%s=%s" % (key, value.replace("/", "\\/")))
return "".join(rv)
def make_alt_names(hosts):
rv = []
for name in hosts:
rv.append("DNS:%s" % name)
return ",".join(rv)
def get_config(root_dir, hosts, duration=30):
if hosts is None:
san_line = ""
else:
san_line = "subjectAltName = %s" % make_alt_names(hosts)
if os.path.sep == "\\":
# This seems to be needed for the Shining Light OpenSSL on
# Windows, at least.
root_dir = root_dir.replace("\\", "\\\\")
rv = """[ ca ]
default_ca = CA_default
[ CA_default ]
dir = %(root_dir)s
certs = $dir
new_certs_dir = $certs
crl_dir = $dir%(sep)scrl
database = $dir%(sep)sindex.txt
private_key = $dir%(sep)scakey.pem
certificate = $dir%(sep)scacert.pem
serial = $dir%(sep)sserial
crldir = $dir%(sep)scrl
crlnumber = $dir%(sep)scrlnumber
crl = $crldir%(sep)scrl.pem
RANDFILE = $dir%(sep)sprivate%(sep)s.rand
x509_extensions = usr_cert
name_opt = ca_default
cert_opt = ca_default
default_days = %(duration)d
default_crl_days = %(duration)d
default_md = sha256
preserve = no
policy = policy_anything
copy_extensions = copy
[ policy_anything ]
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
default_bits = 2048
default_keyfile = privkey.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
x509_extensions = v3_ca
# Passwords for private keys if not present they will be prompted for
# input_password = secret
# output_password = secret
string_mask = utf8only
req_extensions = v3_req
[ req_distinguished_name ]
countryName = Country Name (2 letter code)
countryName_default = AU
countryName_min = 2
countryName_max = 2
stateOrProvinceName = State or Province Name (full name)
stateOrProvinceName_default =
localityName = Locality Name (eg, city)
0.organizationName = Organization Name
0.organizationName_default = Web Platform Tests
organizationalUnitName = Organizational Unit Name (eg, section)
#organizationalUnitName_default =
commonName = Common Name (e.g. server FQDN or YOUR name)
commonName_max = 64
emailAddress = Email Address
emailAddress_max = 64
[ req_attributes ]
[ usr_cert ]
basicConstraints=CA:false
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid,issuer
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
%(san_line)s
[ v3_ca ]
basicConstraints = CA:true
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = keyCertSign
""" % {"root_dir": root_dir,
"san_line": san_line,
"duration": duration,
"sep": os.path.sep.replace("\\", "\\\\")}
return rv
class OpenSSLEnvironment(object):
ssl_enabled = True
def __init__(self, logger, openssl_binary="openssl", base_path=None,
password="web-platform-tests", force_regenerate=False,
duration=30, base_conf_path=None):
"""SSL environment that creates a local CA and host certificate using OpenSSL.
By default this will look in base_path for existing certificates that are still
valid and only create new certificates if there aren't any. This behaviour can
be adjusted using the force_regenerate option.
:param logger: a stdlib logging compatible logger or mozlog structured logger
:param openssl_binary: Path to the OpenSSL binary
:param base_path: Path in which certificates will be stored. If None, a temporary
directory will be used and removed when the server shuts down
:param password: Password to use
:param force_regenerate: Always create a new certificate even if one already exists.
"""
self.logger = logger
self.temporary = False
if base_path is None:
base_path = tempfile.mkdtemp()
self.temporary = True
self.base_path = os.path.abspath(base_path)
self.password = password
self.force_regenerate = force_regenerate
self.duration = duration
self.base_conf_path = base_conf_path
self.path = None
self.binary = openssl_binary
self.openssl = None
self._ca_cert_path = None
self._ca_key_path = None
self.host_certificates = {}
def __enter__(self):
if not os.path.exists(self.base_path):
os.makedirs(self.base_path)
path = functools.partial(os.path.join, self.base_path)
with open(path("index.txt"), "w"):
pass
with open(path("serial"), "w") as f:
f.write("01")
self.path = path
return self
def __exit__(self, *args, **kwargs):
if self.temporary:
shutil.rmtree(self.base_path)
def _config_openssl(self, hosts):
conf_path = self.path("openssl.cfg")
return OpenSSL(self.logger, self.binary, self.base_path, conf_path, hosts,
self.duration, self.base_conf_path)
def ca_cert_path(self):
"""Get the path to the CA certificate file, generating a
new one if needed"""
if self._ca_cert_path is None and not self.force_regenerate:
self._load_ca_cert()
if self._ca_cert_path is None:
self._generate_ca()
return self._ca_cert_path
def _load_ca_cert(self):
key_path = self.path("cakey.pem")
cert_path = self.path("cacert.pem")
if self.check_key_cert(key_path, cert_path, None):
self.logger.info("Using existing CA cert")
self._ca_key_path, self._ca_cert_path = key_path, cert_path
def check_key_cert(self, key_path, cert_path, hosts):
"""Check that a key and cert file exist and are valid"""
if not os.path.exists(key_path) or not os.path.exists(cert_path):
return False
with self._config_openssl(hosts) as openssl:
end_date_str = openssl("x509",
"-noout",
"-enddate",
"-in", cert_path).split("=", 1)[1].strip()
# Not sure if this works in other locales
end_date = datetime.strptime(end_date_str, "%b %d %H:%M:%S %Y %Z")
# Should have some buffer here e.g. 1 hr
if end_date < datetime.now():
return False
#TODO: check the key actually signed the cert.
return True
def _generate_ca(self):
path = self.path
self.logger.info("Generating new CA in %s" % self.base_path)
key_path = path("cakey.pem")
req_path = path("careq.pem")
cert_path = path("cacert.pem")
with self._config_openssl(None) as openssl:
openssl("req",
"-batch",
"-new",
"-newkey", "rsa:2048",
"-keyout", key_path,
"-out", req_path,
"-subj", make_subject("web-platform-tests"),
"-passout", "pass:%s" % self.password)
openssl("ca",
"-batch",
"-create_serial",
"-keyfile", key_path,
"-passin", "pass:%s" % self.password,
"-selfsign",
"-extensions", "v3_ca",
"-in", req_path,
"-out", cert_path)
os.unlink(req_path)
self._ca_key_path, self._ca_cert_path = key_path, cert_path
def host_cert_path(self, hosts):
"""Get a tuple of (private key path, certificate path) for a host,
generating new ones if necessary.
hosts must be a list of all hosts to appear on the certificate, with
the primary hostname first."""
hosts = tuple(hosts)
if hosts not in self.host_certificates:
if not self.force_regenerate:
key_cert = self._load_host_cert(hosts)
else:
key_cert = None
if key_cert is None:
key, cert = self._generate_host_cert(hosts)
else:
key, cert = key_cert
self.host_certificates[hosts] = key, cert
return self.host_certificates[hosts]
def _load_host_cert(self, hosts):
host = hosts[0]
key_path = self.path("%s.key" % host)
cert_path = self.path("%s.pem" % host)
# TODO: check that this cert was signed by the CA cert
if self.check_key_cert(key_path, cert_path, hosts):
self.logger.info("Using existing host cert")
return key_path, cert_path
def _generate_host_cert(self, hosts):
host = hosts[0]
if self._ca_key_path is None:
self._generate_ca()
ca_key_path = self._ca_key_path
assert os.path.exists(ca_key_path)
path = self.path
req_path = path("wpt.req")
cert_path = path("%s.pem" % host)
key_path = path("%s.key" % host)
self.logger.info("Generating new host cert")
with self._config_openssl(hosts) as openssl:
openssl("req",
"-batch",
"-newkey", "rsa:2048",
"-keyout", key_path,
"-in", ca_key_path,
"-nodes",
"-out", req_path)
openssl("ca",
"-batch",
"-in", req_path,
"-passin", "pass:%s" % self.password,
"-subj", make_subject(host),
"-out", cert_path)
os.unlink(req_path)
return key_path, cert_path
|
zenoss/ZenPacks.zenoss.OpenvSwitch | refs/heads/develop | ZenPacks/zenoss/OpenvSwitch/__init__.py | 1 | ##############################################################################
#
# Copyright (C) Zenoss, Inc. 2013-2015, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
"""ZenPacks.zenoss.OpenvSwitch.- OpenvSwitch monitoring for Zenoss.
This module contains initialization code for the ZenPack. Everything in
the module scope will be executed at startup by all Zenoss Python
processes.
The initialization order for ZenPacks is defined by
$ZENHOME/ZenPacks/easy-install.pth.
"""
from . import zenpacklib
#------------------------------------------------------------------------------
# Load ZPL Yaml here
#------------------------------------------------------------------------------
CFG = zenpacklib.load_yaml()
# patches
from Products.ZenUtils.Utils import unused
from . import schema
class ZenPack(schema.ZenPack):
def install(self, app):
super(ZenPack, self).install(app)
try:
from ZenPacks.zenoss.OpenStackInfrastructure.neutron_integration \
import reindex_core_components
reindex_core_components(self.dmd)
except ImportError:
pass
def remove(self, dmd, leaveObjects=False):
# since this ZP added addition eventClasses, and zencatalogservice,
# if is running, indexed them, the event catalog needs to be
# cleaned up at removal
super(ZenPack, self).remove(dmd, leaveObjects=leaveObjects)
from ZODB.transact import transact
brains = dmd.Events.eventClassSearch()
for brain in brains:
try:
test_reference = brain.getObject()
test_reference._p_deactivate()
except Exception:
object_path_string = brain.getPath()
try:
transact(dmd.Events.eventClassSearch.uncatalog_object)(
object_path_string)
except Exception as e:
pass
# Patch last to avoid import recursion problems.
from ZenPacks.zenoss.OpenvSwitch import patches
unused(patches)
|
jumpstarter-io/neutron | refs/heads/master | neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py | 11 | # Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import utils
from neutron.db import agents_db
from neutron.openstack.common import timeutils
from neutron.tests import base
class TestDhcpAgentNotifyAPI(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgentNotifyAPI, self).setUp()
self.notifier = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI(plugin=mock.Mock()))
mock_util_p = mock.patch.object(utils, 'is_extension_supported')
mock_log_p = mock.patch.object(dhcp_rpc_agent_api, 'LOG')
mock_fanout_p = mock.patch.object(self.notifier, '_fanout_message')
mock_cast_p = mock.patch.object(self.notifier, '_cast_message')
self.mock_util = mock_util_p.start()
self.mock_log = mock_log_p.start()
self.mock_fanout = mock_fanout_p.start()
self.mock_cast = mock_cast_p.start()
def _test__schedule_network(self, network,
new_agents=None, existing_agents=None,
expected_casts=0, expected_warnings=0):
self.notifier.plugin.schedule_network.return_value = new_agents
agents = self.notifier._schedule_network(
mock.ANY, network, existing_agents)
if new_agents is None:
new_agents = []
self.assertEqual(new_agents + existing_agents, agents)
self.assertEqual(expected_casts, self.mock_cast.call_count)
self.assertEqual(expected_warnings, self.mock_log.warn.call_count)
def test__schedule_network(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
network = {'id': 'foo_net_id'}
self._test__schedule_network(network,
new_agents=[agent], existing_agents=[],
expected_casts=1, expected_warnings=0)
def test__schedule_network_no_existing_agents(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
network = {'id': 'foo_net_id'}
self._test__schedule_network(network,
new_agents=None, existing_agents=[agent],
expected_casts=0, expected_warnings=0)
def test__schedule_network_no_new_agents(self):
network = {'id': 'foo_net_id'}
self._test__schedule_network(network,
new_agents=None, existing_agents=[],
expected_casts=0, expected_warnings=1)
def _test__get_enabled_agents(self, network,
agents=None, port_count=0,
expected_warnings=0, expected_errors=0):
self.notifier.plugin.get_ports_count.return_value = port_count
enabled_agents = self.notifier._get_enabled_agents(
mock.ANY, network, agents, mock.ANY, mock.ANY)
self.assertEqual(agents, enabled_agents)
self.assertEqual(expected_warnings, self.mock_log.warn.call_count)
self.assertEqual(expected_errors, self.mock_log.error.call_count)
def test__get_enabled_agents(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
network = {'id': 'foo_network_id'}
self._test__get_enabled_agents(network, agents=[agent])
def test__get_enabled_agents_with_inactive_ones(self):
agent1 = agents_db.Agent()
agent1.admin_state_up = True
agent1.heartbeat_timestamp = timeutils.utcnow()
agent2 = agents_db.Agent()
agent2.admin_state_up = True
# This is effectively an inactive agent
agent2.heartbeat_timestamp = datetime.datetime(2000, 1, 1, 0, 0)
network = {'id': 'foo_network_id'}
self._test__get_enabled_agents(network,
agents=[agent1, agent2],
expected_warnings=1, expected_errors=0)
def test__get_enabled_agents_with_notification_required(self):
network = {'id': 'foo_network_id', 'subnets': ['foo_subnet_id']}
self._test__get_enabled_agents(network, [], port_count=20,
expected_warnings=0, expected_errors=1)
def test__notify_agents_fanout_required(self):
self.notifier._notify_agents(mock.ANY,
'network_delete_end',
mock.ANY, 'foo_network_id')
self.assertEqual(1, self.mock_fanout.call_count)
def _test__notify_agents(self, method,
expected_scheduling=0, expected_casts=0):
with mock.patch.object(self.notifier, '_schedule_network') as f:
with mock.patch.object(self.notifier, '_get_enabled_agents') as g:
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
g.return_value = [agent]
dummy_payload = {'port': {}}
self.notifier._notify_agents(mock.Mock(), method,
dummy_payload, 'foo_network_id')
self.assertEqual(expected_scheduling, f.call_count)
self.assertEqual(expected_casts, self.mock_cast.call_count)
def test__notify_agents_cast_required_with_scheduling(self):
self._test__notify_agents('port_create_end',
expected_scheduling=1, expected_casts=1)
def test__notify_agents_cast_required_wo_scheduling_on_port_update(self):
self._test__notify_agents('port_update_end',
expected_scheduling=0, expected_casts=1)
def test__notify_agents_cast_required_wo_scheduling_on_subnet_create(self):
self._test__notify_agents('subnet_create_end',
expected_scheduling=0, expected_casts=1)
def test__notify_agents_no_action(self):
self._test__notify_agents('network_create_end',
expected_scheduling=0, expected_casts=0)
def test__fanout_message(self):
self.notifier._fanout_message(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, self.mock_fanout.call_count)
def test__cast_message(self):
self.notifier._cast_message(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, self.mock_cast.call_count)
|
resmo/ansible | refs/heads/devel | lib/ansible/modules/cloud/azure/azure_rm_webapp.py | 38 | #!/usr/bin/python
#
# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_webapp
version_added: "2.7"
short_description: Manage Web App instances
description:
- Create, update and delete instance of Web App.
options:
resource_group:
description:
- Name of the resource group to which the resource belongs.
required: True
name:
description:
- Unique name of the app to create or update. To create or update a deployment slot, use the {slot} parameter.
required: True
location:
description:
- Resource location. If not set, location from the resource group will be used as default.
plan:
description:
- App service plan. Required for creation.
- Can be name of existing app service plan in same resource group as web app.
- Can be the resource ID of an existing app service plan. For example
/subscriptions/<subs_id>/resourceGroups/<resource_group>/providers/Microsoft.Web/serverFarms/<plan_name>.
- Can be a dict containing five parameters, defined below.
- C(name), name of app service plan.
- C(resource_group), resource group of the app service plan.
- C(sku), SKU of app service plan, allowed values listed on U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/).
- C(is_linux), whether or not the app service plan is Linux. defaults to C(False).
- C(number_of_workers), number of workers for app service plan.
frameworks:
description:
- Set of run time framework settings. Each setting is a dictionary.
- See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
suboptions:
name:
description:
- Name of the framework.
- Supported framework list for Windows web app and Linux web app is different.
- Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018.
- Windows web apps support multiple framework at the same time.
- Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018.
- Linux web apps support only one framework.
- Java framework is mutually exclusive with others.
choices:
- java
- net_framework
- php
- python
- ruby
- dotnetcore
- node
version:
description:
- Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info.
- C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5.
- C(php) supported value sample, C(5.5), C(5.6), C(7.0).
- C(python) supported value sample, C(5.5), C(5.6), C(7.0).
- C(node) supported value sample, C(6.6), C(6.9).
- C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2).
- C(ruby) supported value sample, C(2.3).
- C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app.
settings:
description:
- List of settings of the framework.
suboptions:
java_container:
description:
- Name of Java container.
- Supported only when I(frameworks=java). Sample values C(Tomcat), C(Jetty).
java_container_version:
description:
- Version of Java container.
- Supported only when I(frameworks=java).
- Sample values for C(Tomcat), C(8.0), C(8.5), C(9.0). For C(Jetty,), C(9.1), C(9.3).
container_settings:
description:
- Web app container settings.
suboptions:
name:
description:
- Name of container, for example C(imagename:tag).
registry_server_url:
description:
- Container registry server URL, for example C(mydockerregistry.io).
registry_server_user:
description:
- The container registry server user name.
registry_server_password:
description:
- The container registry server password.
scm_type:
description:
- Repository type of deployment source, for example C(LocalGit), C(GitHub).
- List of supported values maintained at U(https://docs.microsoft.com/en-us/rest/api/appservice/webapps/createorupdate#scmtype).
deployment_source:
description:
- Deployment source for git.
suboptions:
url:
description:
- Repository url of deployment source.
branch:
description:
- The branch name of the repository.
startup_file:
description:
- The web's startup file.
- Used only for Linux web apps.
client_affinity_enabled:
description:
- Whether or not to send session affinity cookies, which route client requests in the same session to the same instance.
type: bool
default: True
https_only:
description:
- Configures web site to accept only https requests.
type: bool
dns_registration:
description:
- Whether or not the web app hostname is registered with DNS on creation. Set to C(false) to register.
type: bool
skip_custom_domain_verification:
description:
- Whether or not to skip verification of custom (non *.azurewebsites.net) domains associated with web app. Set to C(true) to skip.
type: bool
ttl_in_seconds:
description:
- Time to live in seconds for web app default domain name.
app_settings:
description:
- Configure web app application settings. Suboptions are in key value pair format.
purge_app_settings:
description:
- Purge any existing application settings. Replace web app application settings with app_settings.
type: bool
app_state:
description:
- Start/Stop/Restart the web app.
type: str
choices:
- started
- stopped
- restarted
default: started
state:
description:
- State of the Web App.
- Use C(present) to create or update a Web App and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- Yunge Zhu (@yungezz)
'''
EXAMPLES = '''
- name: Create a windows web app with non-exist app service plan
azure_rm_webapp:
resource_group: myResourceGroup
name: myWinWebapp
plan:
resource_group: myAppServicePlan_rg
name: myAppServicePlan
is_linux: false
sku: S1
- name: Create a docker web app with some app settings, with docker image
azure_rm_webapp:
resource_group: myResourceGroup
name: myDockerWebapp
plan:
resource_group: myAppServicePlan_rg
name: myAppServicePlan
is_linux: true
sku: S1
number_of_workers: 2
app_settings:
testkey: testvalue
testkey2: testvalue2
container_settings:
name: ansible/ansible:ubuntu1404
- name: Create a docker web app with private acr registry
azure_rm_webapp:
resource_group: myResourceGroup
name: myDockerWebapp
plan: myAppServicePlan
app_settings:
testkey: testvalue
container_settings:
name: ansible/ubuntu1404
registry_server_url: myregistry.io
registry_server_user: user
registry_server_password: pass
- name: Create a linux web app with Node 6.6 framework
azure_rm_webapp:
resource_group: myResourceGroup
name: myLinuxWebapp
plan:
resource_group: myAppServicePlan_rg
name: myAppServicePlan
app_settings:
testkey: testvalue
frameworks:
- name: "node"
version: "6.6"
- name: Create a windows web app with node, php
azure_rm_webapp:
resource_group: myResourceGroup
name: myWinWebapp
plan:
resource_group: myAppServicePlan_rg
name: myAppServicePlan
app_settings:
testkey: testvalue
frameworks:
- name: "node"
version: 6.6
- name: "php"
version: "7.0"
- name: Create a stage deployment slot for an existing web app
azure_rm_webapp:
resource_group: myResourceGroup
name: myWebapp/slots/stage
plan:
resource_group: myAppServicePlan_rg
name: myAppServicePlan
app_settings:
testkey:testvalue
- name: Create a linux web app with java framework
azure_rm_webapp:
resource_group: myResourceGroup
name: myLinuxWebapp
plan:
resource_group: myAppServicePlan_rg
name: myAppServicePlan
app_settings:
testkey: testvalue
frameworks:
- name: "java"
version: "8"
settings:
java_container: "Tomcat"
java_container_version: "8.5"
'''
RETURN = '''
azure_webapp:
description:
- ID of current web app.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrest.serialization import Model
from azure.mgmt.web.models import (
site_config, app_service_plan, Site,
AppServicePlan, SkuDescription, NameValuePair
)
except ImportError:
# This is handled in azure_rm_common
pass
container_settings_spec = dict(
name=dict(type='str', required=True),
registry_server_url=dict(type='str'),
registry_server_user=dict(type='str'),
registry_server_password=dict(type='str', no_log=True)
)
deployment_source_spec = dict(
url=dict(type='str'),
branch=dict(type='str')
)
framework_settings_spec = dict(
java_container=dict(type='str', required=True),
java_container_version=dict(type='str', required=True)
)
framework_spec = dict(
name=dict(
type='str',
required=True,
choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']),
version=dict(type='str', required=True),
settings=dict(type='dict', options=framework_settings_spec)
)
def _normalize_sku(sku):
if sku is None:
return sku
sku = sku.upper()
if sku == 'FREE':
return 'F1'
elif sku == 'SHARED':
return 'D1'
return sku
def get_sku_name(tier):
tier = tier.upper()
if tier == 'F1' or tier == "FREE":
return 'FREE'
elif tier == 'D1' or tier == "SHARED":
return 'SHARED'
elif tier in ['B1', 'B2', 'B3', 'BASIC']:
return 'BASIC'
elif tier in ['S1', 'S2', 'S3']:
return 'STANDARD'
elif tier in ['P1', 'P2', 'P3']:
return 'PREMIUM'
elif tier in ['P1V2', 'P2V2', 'P3V2']:
return 'PREMIUMV2'
else:
return None
def appserviceplan_to_dict(plan):
return dict(
id=plan.id,
name=plan.name,
kind=plan.kind,
location=plan.location,
reserved=plan.reserved,
is_linux=plan.reserved,
provisioning_state=plan.provisioning_state,
tags=plan.tags if plan.tags else None
)
def webapp_to_dict(webapp):
return dict(
id=webapp.id,
name=webapp.name,
location=webapp.location,
client_cert_enabled=webapp.client_cert_enabled,
enabled=webapp.enabled,
reserved=webapp.reserved,
client_affinity_enabled=webapp.client_affinity_enabled,
server_farm_id=webapp.server_farm_id,
host_names_disabled=webapp.host_names_disabled,
https_only=webapp.https_only if hasattr(webapp, 'https_only') else None,
skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None,
ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None,
state=webapp.state,
tags=webapp.tags if webapp.tags else None
)
class Actions:
CreateOrUpdate, UpdateAppSettings, Delete = range(3)
class AzureRMWebApps(AzureRMModuleBase):
"""Configuration class for an Azure RM Web App resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
plan=dict(
type='raw'
),
frameworks=dict(
type='list',
elements='dict',
options=framework_spec
),
container_settings=dict(
type='dict',
options=container_settings_spec
),
scm_type=dict(
type='str',
),
deployment_source=dict(
type='dict',
options=deployment_source_spec
),
startup_file=dict(
type='str'
),
client_affinity_enabled=dict(
type='bool',
default=True
),
dns_registration=dict(
type='bool'
),
https_only=dict(
type='bool'
),
skip_custom_domain_verification=dict(
type='bool'
),
ttl_in_seconds=dict(
type='int'
),
app_settings=dict(
type='dict'
),
purge_app_settings=dict(
type='bool',
default=False
),
app_state=dict(
type='str',
choices=['started', 'stopped', 'restarted'],
default='started'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
mutually_exclusive = [['container_settings', 'frameworks']]
self.resource_group = None
self.name = None
self.location = None
# update in create_or_update as parameters
self.client_affinity_enabled = True
self.dns_registration = None
self.skip_custom_domain_verification = None
self.ttl_in_seconds = None
self.https_only = None
self.tags = None
# site config, e.g app settings, ssl
self.site_config = dict()
self.app_settings = dict()
self.app_settings_strDic = None
# app service plan
self.plan = None
# siteSourceControl
self.deployment_source = dict()
# site, used at level creation, or update. e.g windows/linux, client_affinity etc first level args
self.site = None
# property for internal usage, not used for sdk
self.container_settings = None
self.purge_app_settings = False
self.app_state = 'started'
self.results = dict(
changed=False,
id=None,
)
self.state = None
self.to_do = []
self.frameworks = None
# set site_config value from kwargs
self.site_config_updatable_properties = ["net_framework_version",
"java_version",
"php_version",
"python_version",
"scm_type"]
# updatable_properties
self.updatable_properties = ["client_affinity_enabled",
"force_dns_registration",
"https_only",
"skip_custom_domain_verification",
"ttl_in_seconds"]
self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java']
self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java']
super(AzureRMWebApps, self).__init__(derived_arg_spec=self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
if key == "scm_type":
self.site_config[key] = kwargs[key]
old_response = None
response = None
to_be_updated = False
# set location
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
# get existing web app
old_response = self.get_webapp()
if old_response:
self.results['id'] = old_response['id']
if self.state == 'present':
if not self.plan and not old_response:
self.fail("Please specify plan for newly created web app.")
if not self.plan:
self.plan = old_response['server_farm_id']
self.plan = self.parse_resource_to_dict(self.plan)
# get app service plan
is_linux = False
old_plan = self.get_app_service_plan()
if old_plan:
is_linux = old_plan['reserved']
else:
is_linux = self.plan['is_linux'] if 'is_linux' in self.plan else False
if self.frameworks:
# java is mutually exclusive with other frameworks
if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks):
self.fail('Java is mutually exclusive with other frameworks.')
if is_linux:
if len(self.frameworks) != 1:
self.fail('Can specify one framework only for Linux web app.')
if self.frameworks[0]['name'] not in self.supported_linux_frameworks:
self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name']))
self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper()
if self.frameworks[0]['name'] == 'java':
if self.frameworks[0]['version'] != '8':
self.fail("Linux web app only supports java 8.")
if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() != 'tomcat':
self.fail("Linux web app only supports tomcat container.")
if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() == 'tomcat':
self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8'
else:
self.site_config['linux_fx_version'] = 'JAVA|8-jre8'
else:
for fx in self.frameworks:
if fx.get('name') not in self.supported_windows_frameworks:
self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name')))
else:
self.site_config[fx.get('name') + '_version'] = fx.get('version')
if 'settings' in fx and fx['settings'] is not None:
for key, value in fx['settings'].items():
self.site_config[key] = value
if not self.app_settings:
self.app_settings = dict()
if self.container_settings:
linux_fx_version = 'DOCKER|'
if self.container_settings.get('registry_server_url'):
self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
linux_fx_version += self.container_settings['registry_server_url'] + '/'
linux_fx_version += self.container_settings['name']
self.site_config['linux_fx_version'] = linux_fx_version
if self.container_settings.get('registry_server_user'):
self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user']
if self.container_settings.get('registry_server_password'):
self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password']
# init site
self.site = Site(location=self.location, site_config=self.site_config)
if self.https_only is not None:
self.site.https_only = self.https_only
if self.client_affinity_enabled:
self.site.client_affinity_enabled = self.client_affinity_enabled
# check if the web app already present in the resource group
if not old_response:
self.log("Web App instance doesn't exist")
to_be_updated = True
self.to_do.append(Actions.CreateOrUpdate)
self.site.tags = self.tags
# service plan is required for creation
if not self.plan:
self.fail("Please specify app service plan in plan parameter.")
if not old_plan:
# no existing service plan, create one
if (not self.plan.get('name') or not self.plan.get('sku')):
self.fail('Please specify name, is_linux, sku in plan')
if 'location' not in self.plan:
plan_resource_group = self.get_resource_group(self.plan['resource_group'])
self.plan['location'] = plan_resource_group.location
old_plan = self.create_app_service_plan()
self.site.server_farm_id = old_plan['id']
# if linux, setup startup_file
if old_plan['is_linux']:
if hasattr(self, 'startup_file'):
self.site_config['app_command_line'] = self.startup_file
# set app setting
if self.app_settings:
app_settings = []
for key in self.app_settings.keys():
app_settings.append(NameValuePair(name=key, value=self.app_settings[key]))
self.site_config['app_settings'] = app_settings
else:
# existing web app, do update
self.log("Web App instance already exists")
self.log('Result: {0}'.format(old_response))
update_tags, self.site.tags = self.update_tags(old_response.get('tags', None))
if update_tags:
to_be_updated = True
# check if root level property changed
if self.is_updatable_property_changed(old_response):
to_be_updated = True
self.to_do.append(Actions.CreateOrUpdate)
# check if site_config changed
old_config = self.get_webapp_configuration()
if self.is_site_config_changed(old_config):
to_be_updated = True
self.to_do.append(Actions.CreateOrUpdate)
# check if linux_fx_version changed
if old_config.linux_fx_version != self.site_config.get('linux_fx_version', ''):
to_be_updated = True
self.to_do.append(Actions.CreateOrUpdate)
self.app_settings_strDic = self.list_app_settings()
# purge existing app_settings:
if self.purge_app_settings:
to_be_updated = True
self.app_settings_strDic = dict()
self.to_do.append(Actions.UpdateAppSettings)
# check if app settings changed
if self.purge_app_settings or self.is_app_settings_changed():
to_be_updated = True
self.to_do.append(Actions.UpdateAppSettings)
if self.app_settings:
for key in self.app_settings.keys():
self.app_settings_strDic[key] = self.app_settings[key]
elif self.state == 'absent':
if old_response:
self.log("Delete Web App instance")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_webapp()
self.log('Web App instance deleted')
else:
self.fail("Web app {0} not exists.".format(self.name))
if to_be_updated:
self.log('Need to Create/Update web app')
self.results['changed'] = True
if self.check_mode:
return self.results
if Actions.CreateOrUpdate in self.to_do:
response = self.create_update_webapp()
self.results['id'] = response['id']
if Actions.UpdateAppSettings in self.to_do:
update_response = self.update_app_settings()
self.results['id'] = update_response.id
webapp = None
if old_response:
webapp = old_response
if response:
webapp = response
if webapp:
if (webapp['state'] != 'Stopped' and self.app_state == 'stopped') or \
(webapp['state'] != 'Running' and self.app_state == 'started') or \
self.app_state == 'restarted':
self.results['changed'] = True
if self.check_mode:
return self.results
self.set_webapp_state(self.app_state)
return self.results
# compare existing web app with input, determine weather it's update operation
def is_updatable_property_changed(self, existing_webapp):
for property_name in self.updatable_properties:
if hasattr(self, property_name) and getattr(self, property_name) is not None and \
getattr(self, property_name) != existing_webapp.get(property_name, None):
return True
return False
# compare xxx_version
def is_site_config_changed(self, existing_config):
for fx_version in self.site_config_updatable_properties:
if self.site_config.get(fx_version):
if not getattr(existing_config, fx_version) or \
getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper():
return True
return False
# comparing existing app setting with input, determine whether it's changed
def is_app_settings_changed(self):
if self.app_settings:
if self.app_settings_strDic:
for key in self.app_settings.keys():
if self.app_settings[key] != self.app_settings_strDic.get(key, None):
return True
else:
return True
return False
# comparing deployment source with input, determine wheather it's changed
def is_deployment_source_changed(self, existing_webapp):
if self.deployment_source:
if self.deployment_source.get('url') \
and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']:
return True
if self.deployment_source.get('branch') \
and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']:
return True
return False
def create_update_webapp(self):
'''
Creates or updates Web App with the specified configuration.
:return: deserialized Web App instance state dictionary
'''
self.log(
"Creating / Updating the Web App instance {0}".format(self.name))
try:
skip_dns_registration = self.dns_registration
force_dns_registration = None if self.dns_registration is None else not self.dns_registration
response = self.web_client.web_apps.create_or_update(resource_group_name=self.resource_group,
name=self.name,
site_envelope=self.site,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=self.skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=self.ttl_in_seconds)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Web App instance.')
self.fail(
"Error creating the Web App instance: {0}".format(str(exc)))
return webapp_to_dict(response)
def delete_webapp(self):
'''
Deletes specified Web App instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Web App instance {0}".format(self.name))
try:
response = self.web_client.web_apps.delete(resource_group_name=self.resource_group,
name=self.name)
except CloudError as e:
self.log('Error attempting to delete the Web App instance.')
self.fail(
"Error deleting the Web App instance: {0}".format(str(e)))
return True
def get_webapp(self):
'''
Gets the properties of the specified Web App.
:return: deserialized Web App instance state dictionary
'''
self.log(
"Checking if the Web App instance {0} is present".format(self.name))
response = None
try:
response = self.web_client.web_apps.get(resource_group_name=self.resource_group,
name=self.name)
# Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
if response is not None:
self.log("Response : {0}".format(response))
self.log("Web App instance : {0} found".format(response.name))
return webapp_to_dict(response)
except CloudError as ex:
pass
self.log("Didn't find web app {0} in resource group {1}".format(
self.name, self.resource_group))
return False
def get_app_service_plan(self):
'''
Gets app service plan
:return: deserialized app service plan dictionary
'''
self.log("Get App Service Plan {0}".format(self.plan['name']))
try:
response = self.web_client.app_service_plans.get(
resource_group_name=self.plan['resource_group'],
name=self.plan['name'])
# Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
if response is not None:
self.log("Response : {0}".format(response))
self.log("App Service Plan : {0} found".format(response.name))
return appserviceplan_to_dict(response)
except CloudError as ex:
pass
self.log("Didn't find app service plan {0} in resource group {1}".format(
self.plan['name'], self.plan['resource_group']))
return False
def create_app_service_plan(self):
'''
Creates app service plan
:return: deserialized app service plan dictionary
'''
self.log("Create App Service Plan {0}".format(self.plan['name']))
try:
# normalize sku
sku = _normalize_sku(self.plan['sku'])
sku_def = SkuDescription(tier=get_sku_name(
sku), name=sku, capacity=(self.plan.get('number_of_workers', None)))
plan_def = AppServicePlan(
location=self.plan['location'], app_service_plan_name=self.plan['name'], sku=sku_def, reserved=(self.plan.get('is_linux', None)))
poller = self.web_client.app_service_plans.create_or_update(
self.plan['resource_group'], self.plan['name'], plan_def)
if isinstance(poller, LROPoller):
response = self.get_poller_result(poller)
self.log("Response : {0}".format(response))
return appserviceplan_to_dict(response)
except CloudError as ex:
self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(
self.plan['name'], self.plan['resource_group'], str(ex)))
def list_app_settings(self):
'''
List application settings
:return: deserialized list response
'''
self.log("List application setting")
try:
response = self.web_client.web_apps.list_application_settings(
resource_group_name=self.resource_group, name=self.name)
self.log("Response : {0}".format(response))
return response.properties
except CloudError as ex:
self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
def update_app_settings(self):
'''
Update application settings
:return: deserialized updating response
'''
self.log("Update application setting")
try:
response = self.web_client.web_apps.update_application_settings(
resource_group_name=self.resource_group, name=self.name, properties=self.app_settings_strDic)
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
self.fail("Failed to update application settings for web app {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
def create_or_update_source_control(self):
'''
Update site source control
:return: deserialized updating response
'''
self.log("Update site source control")
if self.deployment_source is None:
return False
self.deployment_source['is_manual_integration'] = False
self.deployment_source['is_mercurial'] = False
try:
response = self.web_client.web_client.create_or_update_source_control(
self.resource_group, self.name, self.deployment_source)
self.log("Response : {0}".format(response))
return response.as_dict()
except CloudError as ex:
self.fail("Failed to update site source control for web app {0} in resource group {1}".format(
self.name, self.resource_group))
def get_webapp_configuration(self):
'''
Get web app configuration
:return: deserialized web app configuration response
'''
self.log("Get web app configuration")
try:
response = self.web_client.web_apps.get_configuration(
resource_group_name=self.resource_group, name=self.name)
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
self.log("Failed to get configuration for web app {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
return False
def set_webapp_state(self, appstate):
'''
Start/stop/restart web app
:return: deserialized updating response
'''
try:
if appstate == 'started':
response = self.web_client.web_apps.start(resource_group_name=self.resource_group, name=self.name)
elif appstate == 'stopped':
response = self.web_client.web_apps.stop(resource_group_name=self.resource_group, name=self.name)
elif appstate == 'restarted':
response = self.web_client.web_apps.restart(resource_group_name=self.resource_group, name=self.name)
else:
self.fail("Invalid web app state {0}".format(appstate))
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
request_id = ex.request_id if ex.request_id else ''
self.log("Failed to {0} web app {1} in resource group {2}, request_id {3} - {4}".format(
appstate, self.name, self.resource_group, request_id, str(ex)))
def main():
"""Main execution"""
AzureRMWebApps()
if __name__ == '__main__':
main()
|
ivoflipse/devide.johannes | refs/heads/master | extra/soappy-cvp/SOAPpy/Server.py | 5 | """
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
from __future__ import nested_scopes
ident = '$Id: Server.py,v 1.21 2005/02/15 16:32:22 warnes Exp $'
from version import __version__
#import xml.sax
import re
import socket
import sys
import SocketServer
from types import *
import BaseHTTPServer
import thread
# SOAPpy modules
from Parser import parseSOAPRPC
from Config import Config
from Types import faultType, voidType, simplify
from NS import NS
from SOAPBuilder import buildSOAP
from Utilities import debugHeader, debugFooter
try: from M2Crypto import SSL
except: pass
ident = '$Id: Server.py,v 1.21 2005/02/15 16:32:22 warnes Exp $'
from version import __version__
################################################################################
# Call context dictionary
################################################################################
_contexts = dict()
def GetSOAPContext():
global _contexts
return _contexts[thread.get_ident()]
################################################################################
# Server
################################################################################
# Method Signature class for adding extra info to registered funcs, right now
# used just to indicate it should be called with keywords, instead of ordered
# params.
class MethodSig:
def __init__(self, func, keywords=0, context=0):
self.func = func
self.keywords = keywords
self.context = context
self.__name__ = func.__name__
def __call__(self, *args, **kw):
return apply(self.func,args,kw)
class SOAPContext:
def __init__(self, header, body, attrs, xmldata, connection, httpheaders,
soapaction):
self.header = header
self.body = body
self.attrs = attrs
self.xmldata = xmldata
self.connection = connection
self.httpheaders= httpheaders
self.soapaction = soapaction
# A class to describe how header messages are handled
class HeaderHandler:
# Initially fail out if there are any problems.
def __init__(self, header, attrs):
for i in header.__dict__.keys():
if i[0] == "_":
continue
d = getattr(header, i)
try:
fault = int(attrs[id(d)][(NS.ENV, 'mustUnderstand')])
except:
fault = 0
if fault:
raise faultType, ("%s:MustUnderstand" % NS.ENV_T,
"Required Header Misunderstood",
"%s" % i)
################################################################################
# SOAP Server
################################################################################
class SOAPServerBase:
def get_request(self):
sock, addr = SocketServer.TCPServer.get_request(self)
if self.ssl_context:
sock = SSL.Connection(self.ssl_context, sock)
sock._setup_ssl(addr)
if sock.accept_ssl() != 1:
raise socket.error, "Couldn't accept SSL connection"
return sock, addr
def registerObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.objmap[namespace] = object
def registerFunction(self, function, namespace = '', funcName = None,
path = ''):
if not funcName : funcName = function.__name__
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
if self.funcmap.has_key(namespace):
self.funcmap[namespace][funcName] = function
else:
self.funcmap[namespace] = {funcName : function}
def registerKWObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
for i in dir(object.__class__):
if i[0] != "_" and callable(getattr(object, i)):
self.registerKWFunction(getattr(object,i), namespace)
# convenience - wraps your func for you.
def registerKWFunction(self, function, namespace = '', funcName = None,
path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.registerFunction(MethodSig(function,keywords=1), namespace,
funcName)
def unregisterObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
del self.objmap[namespace]
class SOAPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def version_string(self):
return '<a href="http://pywebsvcs.sf.net">' + \
'SOAPpy ' + __version__ + '</a> (Python ' + \
sys.version.split()[0] + ')'
def date_time_string(self):
self.__last_date_time_string = \
BaseHTTPServer.BaseHTTPRequestHandler.\
date_time_string(self)
return self.__last_date_time_string
def do_POST(self):
global _contexts
status = 500
try:
if self.server.config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
print self.raw_requestline.strip()
print "\n".join(map (lambda x: x.strip(),
self.headers.headers))
debugFooter(s)
data = self.rfile.read(int(self.headers["Content-length"]))
if self.server.config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
(r, header, body, attrs) = \
parseSOAPRPC(data, header = 1, body = 1, attrs = 1)
method = r._name
args = r._aslist()
kw = r._asdict()
if Config.simplify_objects:
args = simplify(args)
kw = simplify(kw)
# Handle mixed named and unnamed arguments by assuming
# that all arguments with names of the form "v[0-9]+"
# are unnamed and should be passed in numeric order,
# other arguments are named and should be passed using
# this name.
# This is a non-standard exension to the SOAP protocol,
# but is supported by Apache AXIS.
# It is enabled by default. To disable, set
# Config.specialArgs to False.
if Config.specialArgs:
ordered_args = {}
named_args = {}
for (k,v) in kw.items():
if k[0]=="v":
try:
i = int(k[1:])
ordered_args[i] = v
except ValueError:
named_args[str(k)] = v
else:
named_args[str(k)] = v
# We have to decide namespace precedence
# I'm happy with the following scenario
# if r._ns is specified use it, if not check for
# a path, if it's specified convert it and use it as the
# namespace. If both are specified, use r._ns.
ns = r._ns
if len(self.path) > 1 and not ns:
ns = self.path.replace("/", ":")
if ns[0] == ":": ns = ns[1:]
# authorization method
a = None
keylist = ordered_args.keys()
keylist.sort()
# create list in proper order w/o names
tmp = map( lambda x: ordered_args[x], keylist)
ordered_args = tmp
#print '<-> Argument Matching Yielded:'
#print '<-> Ordered Arguments:' + str(ordered_args)
#print '<-> Named Arguments :' + str(named_args)
resp = ""
# For fault messages
if ns:
nsmethod = "%s:%s" % (ns, method)
else:
nsmethod = method
try:
# First look for registered functions
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(method):
f = self.server.funcmap[ns][method]
# look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(authmethod):
a = self.server.funcmap[ns][authmethod]
else:
# Now look at registered objects
# Check for nested attributes. This works even if
# there are none, because the split will return
# [method]
f = self.server.objmap[ns]
# Look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if hasattr(f, authmethod):
a = getattr(f, authmethod)
# then continue looking for the method
l = method.split(".")
for i in l:
f = getattr(f, i)
except:
info = sys.exc_info()
try:
resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
"Method Not Found",
"%s : %s %s %s" % (nsmethod,
info[0],
info[1],
info[2])),
encoding = self.server.encoding,
config = self.server.config)
finally:
del info
status = 500
else:
try:
if header:
x = HeaderHandler(header, attrs)
fr = 1
# call context book keeping
# We're stuffing the method into the soapaction if there
# isn't one, someday, we'll set that on the client
# and it won't be necessary here
# for now we're doing both
if "SOAPAction".lower() not in self.headers.keys() or \
self.headers["SOAPAction"] == "\"\"":
self.headers["SOAPAction"] = method
thread_id = thread.get_ident()
_contexts[thread_id] = SOAPContext(header, body,
attrs, data,
self.connection,
self.headers,
self.headers["SOAPAction"])
# Do an authorization check
if a != None:
if not apply(a, (), {"_SOAPContext" :
_contexts[thread_id] }):
raise faultType("%s:Server" % NS.ENV_T,
"Authorization failed.",
"%s" % nsmethod)
# If it's wrapped, some special action may be needed
if isinstance(f, MethodSig):
c = None
if f.context: # retrieve context object
c = _contexts[thread_id]
if Config.specialArgs:
if c:
named_args["_SOAPContext"] = c
fr = apply(f, ordered_args, named_args)
elif f.keywords:
# This is lame, but have to de-unicode
# keywords
strkw = {}
for (k, v) in kw.items():
strkw[str(k)] = v
if c:
strkw["_SOAPContext"] = c
fr = apply(f, (), strkw)
elif c:
fr = apply(f, args, {'_SOAPContext':c})
else:
fr = apply(f, args, {})
else:
if Config.specialArgs:
fr = apply(f, ordered_args, named_args)
else:
fr = apply(f, args, {})
if type(fr) == type(self) and \
isinstance(fr, voidType):
resp = buildSOAP(kw = {'%sResponse' % method: fr},
encoding = self.server.encoding,
config = self.server.config)
else:
resp = buildSOAP(kw =
{'%sResponse' % method: {'Result': fr}},
encoding = self.server.encoding,
config = self.server.config)
# Clean up _contexts
if _contexts.has_key(thread_id):
del _contexts[thread_id]
except Exception, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Method %s exception' % nsmethod
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if isinstance(e, faultType):
f = e
else:
f = faultType("%s:Server" % NS.ENV_T,
"Method Failed",
"%s" % nsmethod)
if self.server.config.returnFaultInfo:
f._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(f, 'detail'):
f._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(f, encoding = self.server.encoding,
config = self.server.config)
status = 500
else:
status = 200
except faultType, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Received fault exception'
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if self.server.config.returnFaultInfo:
e._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(e, 'detail'):
e._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(e, encoding = self.server.encoding,
config = self.server.config)
status = 500
except Exception, e:
# internal error, report as HTTP server error
if self.server.config.dumpFaultInfo:
s = 'Internal exception %s' % e
import traceback
debugHeader(s)
info = sys.exc_info()
try:
traceback.print_exception(info[0], info[1], info[2])
finally:
del info
debugFooter(s)
self.send_response(500)
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, 500, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
debugFooter(s)
else:
# got a valid SOAP response
self.send_response(status)
t = 'text/xml';
if self.server.encoding != None:
t += '; charset="%s"' % self.server.encoding
self.send_header("Content-type", t)
self.send_header("Content-length", str(len(resp)))
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, status, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
print "Content-type:", t
print "Content-length:", len(resp)
debugFooter(s)
if self.server.config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print resp,
if resp[-1] != '\n':
print
debugFooter(s)
self.wfile.write(resp)
self.wfile.flush()
# We should be able to shut down both a regular and an SSL
# connection, but under Python 2.1, calling shutdown on an
# SSL connections drops the output, so this work-around.
# This should be investigated more someday.
if self.server.config.SSLserver and \
isinstance(self.connection, SSL.Connection):
self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
SSL.SSL_RECEIVED_SHUTDOWN)
else:
self.connection.shutdown(1)
def do_GET(self):
#print 'command ', self.command
#print 'path ', self.path
#print 'request_version', self.request_version
#print 'headers'
#print ' type ', self.headers.type
#print ' maintype', self.headers.maintype
#print ' subtype ', self.headers.subtype
#print ' params ', self.headers.plist
path = self.path.lower()
if path.endswith('wsdl'):
method = 'wsdl'
function = namespace = None
if self.server.funcmap.has_key(namespace) \
and self.server.funcmap[namespace].has_key(method):
function = self.server.funcmap[namespace][method]
else:
if namespace in self.server.objmap.keys():
function = self.server.objmap[namespace]
l = method.split(".")
for i in l:
function = getattr(function, i)
if function:
self.send_response(200)
self.send_header("Content-type", 'text/plain')
self.end_headers()
response = apply(function, ())
self.wfile.write(str(response))
return
# return error
self.send_response(200)
self.send_header("Content-type", 'text/html')
self.end_headers()
self.wfile.write('''\
<title>
<head>Error!</head>
</title>
<body>
<h1>Oops!</h1>
<p>
This server supports HTTP GET requests only for the the purpose of
obtaining Web Services Description Language (WSDL) for a specific
service.
Either you requested an URL that does not end in "wsdl" or this
server does not implement a wsdl method.
</p>
</body>''')
def log_message(self, format, *args):
if self.server.log:
BaseHTTPServer.BaseHTTPRequestHandler.\
log_message (self, format, *args)
class SOAPServer(SOAPServerBase, SocketServer.TCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.TCPServer.__init__(self, addr, RequestHandler)
class ThreadingSOAPServer(SOAPServerBase, SocketServer.ThreadingTCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.ThreadingTCPServer.__init__(self, addr, RequestHandler)
# only define class if Unix domain sockets are available
if hasattr(socket, "AF_UNIX"):
class SOAPUnixSocketServer(SOAPServerBase, SocketServer.UnixStreamServer):
def __init__(self, addr = 8000,
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.UnixStreamServer.__init__(self, str(addr), RequestHandler)
|
unindented/streamcode | refs/heads/master | client/static/jsrepl/extern/python/unclosured/lib/python2.7/distutils/command/clean.py | 251 | """distutils.command.clean
Implements the Distutils 'clean' command."""
# contributed by Bastian Kleineidam <calvin@cs.uni-sb.de>, added 2000-03-18
__revision__ = "$Id$"
import os
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils import log
class clean(Command):
description = "clean up temporary files from 'build' command"
user_options = [
('build-base=', 'b',
"base build directory (default: 'build.build-base')"),
('build-lib=', None,
"build directory for all modules (default: 'build.build-lib')"),
('build-temp=', 't',
"temporary build directory (default: 'build.build-temp')"),
('build-scripts=', None,
"build directory for scripts (default: 'build.build-scripts')"),
('bdist-base=', None,
"temporary directory for built distributions"),
('all', 'a',
"remove all build output, not just temporary by-products")
]
boolean_options = ['all']
def initialize_options(self):
self.build_base = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.bdist_base = None
self.all = None
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('build_scripts', 'build_scripts'),
('build_temp', 'build_temp'))
self.set_undefined_options('bdist',
('bdist_base', 'bdist_base'))
def run(self):
# remove the build/temp.<plat> directory (unless it's already
# gone)
if os.path.exists(self.build_temp):
remove_tree(self.build_temp, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
self.build_temp)
if self.all:
# remove build directories
for directory in (self.build_lib,
self.bdist_base,
self.build_scripts):
if os.path.exists(directory):
remove_tree(directory, dry_run=self.dry_run)
else:
log.warn("'%s' does not exist -- can't clean it",
directory)
# just for the heck of it, try to remove the base build directory:
# we might have emptied it right now, but if not we don't care
if not self.dry_run:
try:
os.rmdir(self.build_base)
log.info("removing '%s'", self.build_base)
except OSError:
pass
# class clean
|
YPlan/gargoyle | refs/heads/master | tests/testapp/test_conditions.py | 3 | from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import pytest
from django.core.validators import ValidationError
from django.test import TestCase
from gargoyle.conditions import AbstractDate, BeforeDate, ConditionSet, OnOrAfterDate, Percent, Range
from gargoyle.manager import SwitchManager
from gargoyle.models import SELECTIVE, Switch
class RangeTests(TestCase):
def test_is_active(self):
condition = Range()
assert not condition.is_active('1-2', 0)
assert condition.is_active('1-2', 1)
assert condition.is_active('1-2', 2)
assert not condition.is_active('1-2', 3)
def test_is_active_should_allow_floats(self):
condition = Range()
assert not condition.is_active('1-2', 0.0)
assert not condition.is_active('1-2', 0.9)
assert condition.is_active('1-2', 1.0)
assert condition.is_active('1-2', 1.5)
assert condition.is_active('1-2', 2.0)
assert not condition.is_active('1-2', 2.1)
assert not condition.is_active('1-2', 2.01)
assert not condition.is_active('1-2', 2.001)
assert not condition.is_active('1-2', 3.0)
assert not condition.is_active('1-2', 9e9)
def test_is_active_shouldnt_allow_strings(self):
condition = Range()
assert not condition.is_active('1-2', '0')
assert not condition.is_active('1-2', '1')
assert not condition.is_active('1-2', '2')
assert not condition.is_active('1-2', '3')
def test_clean_success(self):
condition = Range()
assert condition.clean('1-2') == '1-2'
def test_clean_fail_empty(self):
condition = Range()
with pytest.raises(ValidationError):
condition.clean('')
def test_clean_fail_no_dash(self):
condition = Range()
with pytest.raises(ValidationError):
condition.clean('1')
def test_clean_fail_no_second_number(self):
condition = Range()
with pytest.raises(ValidationError):
condition.clean('1-')
def test_clean_fail_no_first_number(self):
condition = Range()
with pytest.raises(ValidationError):
condition.clean('-2')
def test_clean_fail_no_numbers(self):
condition = Range()
with pytest.raises(ValidationError):
condition.clean('-')
def test_clean_fail_too_many_numbers(self):
condition = Range()
with pytest.raises(ValidationError):
condition.clean('1-2-3')
class PercentTests(TestCase):
def test_clean_success(self):
condition = Percent()
assert condition.clean('0-50') == '0-50'
def test_clean_fail_no_first_number(self):
condition = Percent()
with pytest.raises(ValidationError):
condition.clean('-50')
def test_clean_fail_no_second_number(self):
condition = Percent()
with pytest.raises(ValidationError):
condition.clean('10-')
def test_clean_fail_no_numbers(self):
condition = Percent()
with pytest.raises(ValidationError):
condition.clean('-')
def test_clean_fail_empty(self):
condition = Percent()
with pytest.raises(ValidationError):
condition.clean('')
def test_clean_fail_out_of_range(self):
condition = Percent()
with pytest.raises(ValidationError):
condition.clean('10-160')
def test_clean_first_greater_than_second(self):
condition = Percent()
with pytest.raises(ValidationError):
condition.clean('80-20')
class AbstractDateTests(TestCase):
def test_clean_success(self):
condition = AbstractDate()
assert condition.clean('2016-01-01') == '2016-01-01'
def test_clean_failed(self):
condition = AbstractDate()
with pytest.raises(ValidationError):
condition.clean("20160101")
class BeforeDateTests(TestCase):
def test_is_active_date_less(self):
condition = BeforeDate()
assert condition.is_active("2016-08-05", datetime.date(2016, 8, 2))
def test_is_active_date_equal(self):
condition = BeforeDate()
assert not condition.is_active("2016-08-05", datetime.date(2016, 8, 5))
def test_is_active_date_greater(self):
condition = BeforeDate()
assert not condition.is_active("2016-08-05", datetime.date(2016, 8, 10))
class OnOrAfterDateTests(TestCase):
def test_is_active_date_less(self):
condition = OnOrAfterDate()
assert not condition.is_active("2016-08-05", datetime.date(2016, 8, 2))
def test_is_active_date_equal(self):
condition = OnOrAfterDate()
assert condition.is_active("2016-08-05", datetime.date(2016, 8, 5))
def test_is_active_date_greater(self):
condition = OnOrAfterDate()
assert condition.is_active("2016-08-05", datetime.date(2016, 8, 10))
class NumberConditionSet(ConditionSet):
in_range = Range()
def get_field_value(self, instance, field_name):
if field_name == 'in_range':
return instance
class NumberConditionSetTests(TestCase):
condition_set = __name__ + '.' + NumberConditionSet.__name__
def setUp(self):
super(NumberConditionSetTests, self).setUp()
self.gargoyle = SwitchManager(Switch, key='key', value='value', instances=True, auto_create=True)
self.gargoyle.register(NumberConditionSet())
Switch.objects.create(key='test', status=SELECTIVE)
self.switch = self.gargoyle['test']
def test_range(self):
self.switch.add_condition(
condition_set=self.condition_set,
field_name='in_range',
condition='1-3',
)
assert not self.gargoyle.is_active('test', 0)
assert self.gargoyle.is_active('test', 1)
assert self.gargoyle.is_active('test', 2)
assert self.gargoyle.is_active('test', 3)
assert not self.gargoyle.is_active('test', 4)
|
silly-wacky-3-town-toon/SOURCE-COD | refs/heads/master | Panda3D-1.10.0/python/Lib/pipes.py | 179 | """Conversion pipeline templates.
The problem:
------------
Suppose you have some data that you want to convert to another format,
such as from GIF image format to PPM image format. Maybe the
conversion involves several steps (e.g. piping it through compress or
uuencode). Some of the conversion steps may require that their input
is a disk file, others may be able to read standard input; similar for
their output. The input to the entire conversion may also be read
from a disk file or from an open file, and similar for its output.
The module lets you construct a pipeline template by sticking one or
more conversion steps together. It will take care of creating and
removing temporary files if they are necessary to hold intermediate
data. You can then use the template to do conversions from many
different sources to many different destinations. The temporary
file names used are different each time the template is used.
The templates are objects so you can create templates for many
different conversion steps and store them in a dictionary, for
instance.
Directions:
-----------
To create a template:
t = Template()
To add a conversion step to a template:
t.append(command, kind)
where kind is a string of two characters: the first is '-' if the
command reads its standard input or 'f' if it requires a file; the
second likewise for the output. The command must be valid /bin/sh
syntax. If input or output files are required, they are passed as
$IN and $OUT; otherwise, it must be possible to use the command in
a pipeline.
To add a conversion step at the beginning:
t.prepend(command, kind)
To convert a file to another file using a template:
sts = t.copy(infile, outfile)
If infile or outfile are the empty string, standard input is read or
standard output is written, respectively. The return value is the
exit status of the conversion pipeline.
To open a file for reading or writing through a conversion pipeline:
fp = t.open(file, mode)
where mode is 'r' to read the file, or 'w' to write it -- just like
for the built-in function open() or for os.popen().
To create a new template object initialized to a given one:
t2 = t.clone()
""" # '
import re
import os
import tempfile
import string
__all__ = ["Template"]
# Conversion step kinds
FILEIN_FILEOUT = 'ff' # Must read & write real files
STDIN_FILEOUT = '-f' # Must write a real file
FILEIN_STDOUT = 'f-' # Must read a real file
STDIN_STDOUT = '--' # Normal pipeline element
SOURCE = '.-' # Must be first, writes stdout
SINK = '-.' # Must be last, reads stdin
stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
SOURCE, SINK]
class Template:
"""Class representing a pipeline template."""
def __init__(self):
"""Template() returns a fresh pipeline template."""
self.debugging = 0
self.reset()
def __repr__(self):
"""t.__repr__() implements repr(t)."""
return '<Template instance, steps=%r>' % (self.steps,)
def reset(self):
"""t.reset() restores a pipeline template to its initial state."""
self.steps = []
def clone(self):
"""t.clone() returns a new pipeline template with identical
initial state as the current one."""
t = Template()
t.steps = self.steps[:]
t.debugging = self.debugging
return t
def debug(self, flag):
"""t.debug(flag) turns debugging on or off."""
self.debugging = flag
def append(self, cmd, kind):
"""t.append(cmd, kind) adds a new step at the end."""
if type(cmd) is not type(''):
raise TypeError, \
'Template.append: cmd must be a string'
if kind not in stepkinds:
raise ValueError, \
'Template.append: bad kind %r' % (kind,)
if kind == SOURCE:
raise ValueError, \
'Template.append: SOURCE can only be prepended'
if self.steps and self.steps[-1][1] == SINK:
raise ValueError, \
'Template.append: already ends with SINK'
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError, \
'Template.append: missing $IN in cmd'
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError, \
'Template.append: missing $OUT in cmd'
self.steps.append((cmd, kind))
def prepend(self, cmd, kind):
"""t.prepend(cmd, kind) adds a new step at the front."""
if type(cmd) is not type(''):
raise TypeError, \
'Template.prepend: cmd must be a string'
if kind not in stepkinds:
raise ValueError, \
'Template.prepend: bad kind %r' % (kind,)
if kind == SINK:
raise ValueError, \
'Template.prepend: SINK can only be appended'
if self.steps and self.steps[0][1] == SOURCE:
raise ValueError, \
'Template.prepend: already begins with SOURCE'
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError, \
'Template.prepend: missing $IN in cmd'
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError, \
'Template.prepend: missing $OUT in cmd'
self.steps.insert(0, (cmd, kind))
def open(self, file, rw):
"""t.open(file, rw) returns a pipe or file object open for
reading or writing; the file is the other end of the pipeline."""
if rw == 'r':
return self.open_r(file)
if rw == 'w':
return self.open_w(file)
raise ValueError, \
'Template.open: rw must be \'r\' or \'w\', not %r' % (rw,)
def open_r(self, file):
"""t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively."""
if not self.steps:
return open(file, 'r')
if self.steps[-1][1] == SINK:
raise ValueError, \
'Template.open_r: pipeline ends width SINK'
cmd = self.makepipeline(file, '')
return os.popen(cmd, 'r')
def open_w(self, file):
if not self.steps:
return open(file, 'w')
if self.steps[0][1] == SOURCE:
raise ValueError, \
'Template.open_w: pipeline begins with SOURCE'
cmd = self.makepipeline('', file)
return os.popen(cmd, 'w')
def copy(self, infile, outfile):
return os.system(self.makepipeline(infile, outfile))
def makepipeline(self, infile, outfile):
cmd = makepipeline(infile, self.steps, outfile)
if self.debugging:
print cmd
cmd = 'set -x; ' + cmd
return cmd
def makepipeline(infile, steps, outfile):
# Build a list with for each command:
# [input filename or '', command string, kind, output filename or '']
list = []
for cmd, kind in steps:
list.append(['', cmd, kind, ''])
#
# Make sure there is at least one step
#
if not list:
list.append(['', 'cat', '--', ''])
#
# Take care of the input and output ends
#
[cmd, kind] = list[0][1:3]
if kind[0] == 'f' and not infile:
list.insert(0, ['', 'cat', '--', ''])
list[0][0] = infile
#
[cmd, kind] = list[-1][1:3]
if kind[1] == 'f' and not outfile:
list.append(['', 'cat', '--', ''])
list[-1][-1] = outfile
#
# Invent temporary files to connect stages that need files
#
garbage = []
for i in range(1, len(list)):
lkind = list[i-1][2]
rkind = list[i][2]
if lkind[1] == 'f' or rkind[0] == 'f':
(fd, temp) = tempfile.mkstemp()
os.close(fd)
garbage.append(temp)
list[i-1][-1] = list[i][0] = temp
#
for item in list:
[inf, cmd, kind, outf] = item
if kind[1] == 'f':
cmd = 'OUT=' + quote(outf) + '; ' + cmd
if kind[0] == 'f':
cmd = 'IN=' + quote(inf) + '; ' + cmd
if kind[0] == '-' and inf:
cmd = cmd + ' <' + quote(inf)
if kind[1] == '-' and outf:
cmd = cmd + ' >' + quote(outf)
item[1] = cmd
#
cmdlist = list[0][1]
for item in list[1:]:
[cmd, kind] = item[1:3]
if item[0] == '':
if 'f' in kind:
cmd = '{ ' + cmd + '; }'
cmdlist = cmdlist + ' |\n' + cmd
else:
cmdlist = cmdlist + '\n' + cmd
#
if garbage:
rmcmd = 'rm -f'
for file in garbage:
rmcmd = rmcmd + ' ' + quote(file)
trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
#
return cmdlist
# Reliably quote a string as a single argument for /bin/sh
# Safe unquoted
_safechars = frozenset(string.ascii_letters + string.digits + '@%_-+=:,./')
def quote(file):
"""Return a shell-escaped version of the file string."""
for c in file:
if c not in _safechars:
break
else:
if not file:
return "''"
return file
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + file.replace("'", "'\"'\"'") + "'"
|
AngelTerrones/Algol | refs/heads/master | Simulation/modules/__init__.py | 12133432 | |
Pkthunder/geoq | refs/heads/develop | geoq/recolor/__init__.py | 12133432 | |
Osmose/kitsune | refs/heads/master | kitsune/upload/__init__.py | 12133432 | |
cyanna/edx-platform | refs/heads/master | common/djangoapps/student/tests/__init__.py | 12133432 | |
wangjild/python-jsonpath-rw | refs/heads/master | tests/bin/__init__.py | 6 | # Use modern python
from __future__ import absolute_import, print_function, unicode_literals
|
axbaretto/beam | refs/heads/master | sdks/python/.tox/lint/lib/python2.7/site-packages/google/protobuf/internal/any_test_pb2.py | 29 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/any_test.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/any_test.proto',
package='google.protobuf.internal',
syntax='proto2',
serialized_pb=_b('\n\'google/protobuf/internal/any_test.proto\x12\x18google.protobuf.internal\x1a\x19google/protobuf/any.proto\"K\n\x07TestAny\x12#\n\x05value\x18\x01 \x01(\x0b\x32\x14.google.protobuf.Any\x12\x11\n\tint_value\x18\x02 \x01(\x05*\x08\x08\n\x10\x80\x80\x80\x80\x02\"\x85\x01\n\x11TestAnyExtension1\x12\t\n\x01i\x18\x0f \x01(\x05\x32\x65\n\nextension1\x12!.google.protobuf.internal.TestAny\x18\xab\xff\xf6. \x01(\x0b\x32+.google.protobuf.internal.TestAnyExtension1')
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TESTANY = _descriptor.Descriptor(
name='TestAny',
full_name='google.protobuf.internal.TestAny',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.internal.TestAny.value', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='int_value', full_name='google.protobuf.internal.TestAny.int_value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(10, 536870912), ],
oneofs=[
],
serialized_start=96,
serialized_end=171,
)
_TESTANYEXTENSION1 = _descriptor.Descriptor(
name='TestAnyExtension1',
full_name='google.protobuf.internal.TestAnyExtension1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='i', full_name='google.protobuf.internal.TestAnyExtension1.i', index=0,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='extension1', full_name='google.protobuf.internal.TestAnyExtension1.extension1', index=0,
number=98418603, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=174,
serialized_end=307,
)
_TESTANY.fields_by_name['value'].message_type = google_dot_protobuf_dot_any__pb2._ANY
DESCRIPTOR.message_types_by_name['TestAny'] = _TESTANY
DESCRIPTOR.message_types_by_name['TestAnyExtension1'] = _TESTANYEXTENSION1
TestAny = _reflection.GeneratedProtocolMessageType('TestAny', (_message.Message,), dict(
DESCRIPTOR = _TESTANY,
__module__ = 'google.protobuf.internal.any_test_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestAny)
))
_sym_db.RegisterMessage(TestAny)
TestAnyExtension1 = _reflection.GeneratedProtocolMessageType('TestAnyExtension1', (_message.Message,), dict(
DESCRIPTOR = _TESTANYEXTENSION1,
__module__ = 'google.protobuf.internal.any_test_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestAnyExtension1)
))
_sym_db.RegisterMessage(TestAnyExtension1)
_TESTANYEXTENSION1.extensions_by_name['extension1'].message_type = _TESTANYEXTENSION1
TestAny.RegisterExtension(_TESTANYEXTENSION1.extensions_by_name['extension1'])
# @@protoc_insertion_point(module_scope)
|
kevin-coder/tensorflow-fork | refs/heads/master | tensorflow/compiler/tf2tensorrt/python/ops/trt_ops.py | 4 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Exposes the Python wrapper of TRTEngineOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import platform
from tensorflow.compiler.tf2tensorrt.wrap_py_utils import is_tensorrt_enabled
from tensorflow.python.framework import errors
_tf_trt_so = None
_module_lock = threading.Lock()
def load_trt_ops():
"""Load TF-TRT op libraries so if it hasn't been loaded already."""
global _tf_trt_so
if not is_tensorrt_enabled():
return
if platform.system() == "Windows":
raise RuntimeError("Windows platforms are not supported")
with _module_lock:
if _tf_trt_so:
return
try:
# pylint: disable=g-import-not-at-top,unused-variable
# This will call register_op_list() in
# tensorflow/python/framework/op_def_registry.py, but it doesn't register
# the op or the op kernel in C++ runtime.
from tensorflow.compiler.tf2tensorrt.ops.gen_trt_ops import trt_engine_op
# pylint: enable=g-import-not-at-top,unused-variable
except ImportError as e:
print("**** Failed to import TF-TRT ops. This is because the binary was "
"not built with CUDA or TensorRT enabled. ****")
raise e
try:
# pylint: disable=g-import-not-at-top
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
# pylint: enable=g-import-not-at-top
# Loading the shared object will cause registration of the op and the op
# kernel if we link TF-TRT dynamically.
_tf_trt_so = load_library.load_op_library(
resource_loader.get_path_to_datafile("libtftrt.so"))
except errors.NotFoundError as e:
no_trt_message = (
"**** Failed to initialize TensorRT. This is either because the "
"TensorRT installation path is not in LD_LIBRARY_PATH, or because "
"you do not have it installed. If not installed, please go to "
"https://developer.nvidia.com/tensorrt to download and install "
"TensorRT ****")
print(no_trt_message)
raise e
|
mapr/hue | refs/heads/hue-3.9.0-mapr | desktop/core/ext-py/pysqlite/doc/includes/sqlite3/adapter_datetime.py | 49 | from pysqlite2 import dbapi2 as sqlite3
import datetime, time
def adapt_datetime(ts):
return time.mktime(ts.timetuple())
sqlite3.register_adapter(datetime.datetime, adapt_datetime)
con = sqlite3.connect(":memory:")
cur = con.cursor()
now = datetime.datetime.now()
cur.execute("select ?", (now,))
print cur.fetchone()[0]
|
ex1usive-m4d/TemplateDocx | refs/heads/master | controllers/phpdocx/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/sqlite3/dbapi2.py | 161 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/dbapi2.py: the DB-API 2.0 interface
#
# Copyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import datetime
import time
from _sqlite3 import *
paramstyle = "qmark"
threadsafety = 1
apilevel = "2.0"
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
version_info = tuple([int(x) for x in version.split(".")])
sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")])
Binary = buffer
def register_adapters_and_converters():
def adapt_date(val):
return val.isoformat()
def adapt_datetime(val):
return val.isoformat(" ")
def convert_date(val):
return datetime.date(*map(int, val.split("-")))
def convert_timestamp(val):
datepart, timepart = val.split(" ")
year, month, day = map(int, datepart.split("-"))
timepart_full = timepart.split(".")
hours, minutes, seconds = map(int, timepart_full[0].split(":"))
if len(timepart_full) == 2:
microseconds = int(timepart_full[1])
else:
microseconds = 0
val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
return val
register_adapter(datetime.date, adapt_date)
register_adapter(datetime.datetime, adapt_datetime)
register_converter("date", convert_date)
register_converter("timestamp", convert_timestamp)
register_adapters_and_converters()
# Clean up namespace
del(register_adapters_and_converters)
|
ShipJ/Code | refs/heads/master | Projects/SpringAutumnFair/src/analysis/__init__.py | 12133432 | |
helenst/django | refs/heads/master | tests/deprecation/__init__.py | 12133432 | |
gcblue/gcblue | refs/heads/master | scenarios/EditorSaved/__init__.py | 12133432 | |
lovexiaov/SandwichApp | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/requests/certs.py | 1218 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
certs.py
~~~~~~~~
This module returns the preferred default CA certificate bundle.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
import os.path
try:
from certifi import where
except ImportError:
def where():
"""Return the preferred certificate bundle."""
# vendored bundle inside Requests
return os.path.join(os.path.dirname(__file__), 'cacert.pem')
if __name__ == '__main__':
print(where())
|
mobo95/pyload | refs/heads/stable | module/plugins/hooks/RapideoPl.py | 3 | # -*- coding: utf-8 -*-
from module.common.json_layer import json_loads
from module.plugins.internal.MultiHook import MultiHook
class RapideoPl(MultiHook):
__name__ = "RapideoPl"
__type__ = "hook"
__version__ = "0.03"
__config__ = [("pluginmode" , "all;listed;unlisted", "Use for plugins" , "all"),
("pluginlist" , "str" , "Plugin list (comma separated)" , "" ),
("revertfailed" , "bool" , "Revert to standard download if fails", True ),
("retry" , "int" , "Number of retries before revert" , 10 ),
("retryinterval" , "int" , "Retry interval in minutes" , 1 ),
("reload" , "bool" , "Reload plugin list" , True ),
("reloadinterval", "int" , "Reload interval in hours" , 12 )]
__description__ = """Rapideo.pl hook plugin"""
__license__ = "GPLv3"
__authors__ = [("goddie", "dev@rapideo.pl")]
def getHosters(self):
hostings = json_loads(self.getURL("https://www.rapideo.pl/clipboard.php?json=3").strip())
hostings_domains = [domain for row in hostings for domain in row["domains"] if row["sdownload"] == "0"]
self.logDebug(hostings_domains)
return hostings_domains
|
istio/tools | refs/heads/master | perf/load/pilot/load_test.py | 1 | #!/usr/bin/env python3
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# this program checks the config push latency for the pilot.
import check_metrics
from prometheus import Query, Alarm, Prometheus
import sys
import os
import time
import typing
import subprocess
import argparse
cwd = os.getcwd()
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../metrics'))
sys.path.insert(0, path)
# count(envoy_cluster_upstream_cx_total{cluster_name="outbound|890||svc-0.pilot-load.svc.cluster.local"})
# envoy_cluster_manager_cds_version is not reliable due to region/zone is not consistently populated.
def config_push_converge_query(prom: Prometheus, svc: str = "svc-0", namespace: str = 'pilot-load'):
cluster_name = 'outbound|890||{0}.{1}.svc.cluster.local'.format(
svc, namespace
)
result = prom.fetch_by_query(
'count(envoy_cluster_upstream_cx_total{cluster_name=~".*pilot-load.*"}) by (cluster_name)')
if not result:
return []
return [(point['metric'], point['value'][1])
for point in result['data']['result']]
def setup_pilot_loadtest(instance, svc_entry: int):
helm = 'serviceEntries=%d,instances=%d' % (svc_entry, instance)
print('setup the loads, %s' % helm)
env = os.environ
env['HELM_FLAGS'] = helm
p = subprocess.Popen([
'./setup.sh',
], env=env)
p.wait()
def wait_till_converge(prom: Prometheus):
'''Confirm all the Envoy config has been converged to a single version.'''
occurrence = 0
start = time.time()
while True:
count = config_push_converge_query(prom)
print('[Query] ', int(time.time() - start), 'seconds, ', count)
time.sleep(5)
def testall(svc: int, se: int):
prom = check_metrics.setup_promethus()
print('finished promethus setup', prom.url)
setup_pilot_loadtest(svc, se)
start = time.time()
# ensure version is converged.
wait_till_converge(prom)
print('version converged in %s seconds ' % (time.time() - start))
def init_parser():
parser = argparse.ArgumentParser(
description='Program for load test.')
parser.add_argument('-s', '--start',
nargs=2, type=int,
default=[1000, 0],
help='initial number of the services and service entries')
return parser.parse_args()
if __name__ == '__main__':
result = init_parser()
testall(result.start[0], result.start[1])
|
sumihai-tekindo/account_sicepat | refs/heads/master | account_asset_analytic/models/__init__.py | 1 | from . import asset |
freakboy3742/django | refs/heads/main | tests/empty/no_models/__init__.py | 12133432 | |
py-geek/City-Air | refs/heads/master | venv/lib/python2.7/site-packages/allauth/socialaccount/providers/mailru/__init__.py | 12133432 | |
juanalfonsopr/odoo | refs/heads/8.0 | addons/mrp_repair/mrp_repair.py | 148 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from datetime import datetime
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class mrp_repair(osv.osv):
_name = 'mrp.repair'
_inherit = 'mail.thread'
_description = 'Repair Order'
def _amount_untaxed(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates untaxed amount.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param field_name: Name of field.
@param arg: Argument
@param context: A standard dictionary for contextual values
@return: Dictionary of values.
"""
res = {}
cur_obj = self.pool.get('res.currency')
for repair in self.browse(cr, uid, ids, context=context):
res[repair.id] = 0.0
for line in repair.operations:
res[repair.id] += line.price_subtotal
for line in repair.fees_lines:
res[repair.id] += line.price_subtotal
cur = repair.pricelist_id.currency_id
res[repair.id] = cur_obj.round(cr, uid, cur, res[repair.id])
return res
def _amount_tax(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates taxed amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
#return {}.fromkeys(ids, 0)
cur_obj = self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for repair in self.browse(cr, uid, ids, context=context):
val = 0.0
cur = repair.pricelist_id.currency_id
for line in repair.operations:
#manage prices with tax included use compute_all instead of compute
if line.to_invoice:
tax_calculate = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, repair.partner_id)
for c in tax_calculate['taxes']:
val += c['amount']
for line in repair.fees_lines:
if line.to_invoice:
tax_calculate = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, repair.partner_id)
for c in tax_calculate['taxes']:
val += c['amount']
res[repair.id] = cur_obj.round(cr, uid, cur, val)
return res
def _amount_total(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates total amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
untax = self._amount_untaxed(cr, uid, ids, field_name, arg, context=context)
tax = self._amount_tax(cr, uid, ids, field_name, arg, context=context)
cur_obj = self.pool.get('res.currency')
for id in ids:
repair = self.browse(cr, uid, id, context=context)
cur = repair.pricelist_id.currency_id
res[id] = cur_obj.round(cr, uid, cur, untax.get(id, 0.0) + tax.get(id, 0.0))
return res
def _get_default_address(self, cr, uid, ids, field_name, arg, context=None):
res = {}
partner_obj = self.pool.get('res.partner')
for data in self.browse(cr, uid, ids, context=context):
adr_id = False
if data.partner_id:
adr_id = partner_obj.address_get(cr, uid, [data.partner_id.id], ['default'])['default']
res[data.id] = adr_id
return res
def _get_lines(self, cr, uid, ids, context=None):
return self.pool['mrp.repair'].search(cr, uid, [('operations', 'in', ids)], context=context)
def _get_fee_lines(self, cr, uid, ids, context=None):
return self.pool['mrp.repair'].search(cr, uid, [('fees_lines', 'in', ids)], context=context)
_columns = {
'name': fields.char('Repair Reference', required=True, states={'confirmed': [('readonly', True)]}, copy=False),
'product_id': fields.many2one('product.product', string='Product to Repair', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'partner_id': fields.many2one('res.partner', 'Partner', select=True, help='Choose partner for whom the order will be invoiced and delivered.', states={'confirmed': [('readonly', True)]}),
'address_id': fields.many2one('res.partner', 'Delivery Address', domain="[('parent_id','=',partner_id)]", states={'confirmed': [('readonly', True)]}),
'default_address_id': fields.function(_get_default_address, type="many2one", relation="res.partner"),
'state': fields.selection([
('draft', 'Quotation'),
('cancel', 'Cancelled'),
('confirmed', 'Confirmed'),
('under_repair', 'Under Repair'),
('ready', 'Ready to Repair'),
('2binvoiced', 'To be Invoiced'),
('invoice_except', 'Invoice Exception'),
('done', 'Repaired')
], 'Status', readonly=True, track_visibility='onchange', copy=False,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed repair order. \
\n* The \'Confirmed\' status is used when a user confirms the repair order. \
\n* The \'Ready to Repair\' status is used to start to repairing, user can start repairing only after repair order is confirmed. \
\n* The \'To be Invoiced\' status is used to generate the invoice before or after repairing done. \
\n* The \'Done\' status is set when repairing is completed.\
\n* The \'Cancelled\' status is used when user cancel repair order.'),
'location_id': fields.many2one('stock.location', 'Current Location', select=True, required=True, readonly=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}),
'location_dest_id': fields.many2one('stock.location', 'Delivery Location', readonly=True, required=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}),
'lot_id': fields.many2one('stock.production.lot', 'Repaired Lot', domain="[('product_id','=', product_id)]", help="Products repaired are all belonging to this lot", oldname="prodlot_id"),
'guarantee_limit': fields.date('Warranty Expiration', states={'confirmed': [('readonly', True)]}),
'operations': fields.one2many('mrp.repair.line', 'repair_id', 'Operation Lines', readonly=True, states={'draft': [('readonly', False)]}, copy=True),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', help='Pricelist of the selected partner.'),
'partner_invoice_id': fields.many2one('res.partner', 'Invoicing Address'),
'invoice_method': fields.selection([
("none", "No Invoice"),
("b4repair", "Before Repair"),
("after_repair", "After Repair")
], "Invoice Method",
select=True, required=True, states={'draft': [('readonly', False)]}, readonly=True, help='Selecting \'Before Repair\' or \'After Repair\' will allow you to generate invoice before or after the repair is done respectively. \'No invoice\' means you don\'t want to generate invoice for this repair order.'),
'invoice_id': fields.many2one('account.invoice', 'Invoice', readonly=True, track_visibility="onchange", copy=False),
'move_id': fields.many2one('stock.move', 'Move', readonly=True, help="Move created by the repair order", track_visibility="onchange", copy=False),
'fees_lines': fields.one2many('mrp.repair.fee', 'repair_id', 'Fees', readonly=True, states={'draft': [('readonly', False)]}, copy=True),
'internal_notes': fields.text('Internal Notes'),
'quotation_notes': fields.text('Quotation Notes'),
'company_id': fields.many2one('res.company', 'Company'),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'repaired': fields.boolean('Repaired', readonly=True, copy=False),
'amount_untaxed': fields.function(_amount_untaxed, string='Untaxed Amount',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
'amount_tax': fields.function(_amount_tax, string='Taxes',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
'amount_total': fields.function(_amount_total, string='Total',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
}
def _default_stock_location(self, cr, uid, context=None):
try:
warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0')
return warehouse.lot_stock_id.id
except:
return False
_defaults = {
'state': lambda *a: 'draft',
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'mrp.repair'),
'invoice_method': lambda *a: 'none',
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.repair', context=context),
'pricelist_id': lambda self, cr, uid, context: self.pool.get('product.pricelist').search(cr, uid, [('type', '=', 'sale')])[0],
'product_qty': 1.0,
'location_id': _default_stock_location,
}
_sql_constraints = [
('name', 'unique (name)', 'The name of the Repair Order must be unique!'),
]
def onchange_product_id(self, cr, uid, ids, product_id=None):
""" On change of product sets some values.
@param product_id: Changed product
@return: Dictionary of values.
"""
product = False
if product_id:
product = self.pool.get("product.product").browse(cr, uid, product_id)
return {'value': {
'guarantee_limit': False,
'lot_id': False,
'product_uom': product and product.uom_id.id or False,
}
}
def onchange_product_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def onchange_location_id(self, cr, uid, ids, location_id=None):
""" On change of location
"""
return {'value': {'location_dest_id': location_id}}
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_partner_id(self, cr, uid, ids, part, address_id):
""" On change of partner sets the values of partner address,
partner invoice address and pricelist.
@param part: Changed id of partner.
@param address_id: Address id from current record.
@return: Dictionary of values.
"""
part_obj = self.pool.get('res.partner')
pricelist_obj = self.pool.get('product.pricelist')
if not part:
return {'value': {
'address_id': False,
'partner_invoice_id': False,
'pricelist_id': pricelist_obj.search(cr, uid, [('type', '=', 'sale')])[0]
}
}
addr = part_obj.address_get(cr, uid, [part], ['delivery', 'invoice', 'default'])
partner = part_obj.browse(cr, uid, part)
pricelist = partner.property_product_pricelist and partner.property_product_pricelist.id or False
return {'value': {
'address_id': addr['delivery'] or addr['default'],
'partner_invoice_id': addr['invoice'],
'pricelist_id': pricelist
}
}
def action_cancel_draft(self, cr, uid, ids, *args):
""" Cancels repair order when it is in 'Draft' state.
@param *arg: Arguments
@return: True
"""
if not len(ids):
return False
mrp_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids):
mrp_line_obj.write(cr, uid, [l.id for l in repair.operations], {'state': 'draft'})
self.write(cr, uid, ids, {'state': 'draft'})
return self.create_workflow(cr, uid, ids)
def action_confirm(self, cr, uid, ids, *args):
""" Repair order state is set to 'To be invoiced' when invoice method
is 'Before repair' else state becomes 'Confirmed'.
@param *arg: Arguments
@return: True
"""
mrp_line_obj = self.pool.get('mrp.repair.line')
for o in self.browse(cr, uid, ids):
if (o.invoice_method == 'b4repair'):
self.write(cr, uid, [o.id], {'state': '2binvoiced'})
else:
self.write(cr, uid, [o.id], {'state': 'confirmed'})
for line in o.operations:
if line.product_id.track_production and not line.lot_id:
raise osv.except_osv(_('Warning!'), _("Serial number is required for operation line with product '%s'") % (line.product_id.name))
mrp_line_obj.write(cr, uid, [l.id for l in o.operations], {'state': 'confirmed'})
return True
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels repair order.
@return: True
"""
mrp_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
if not repair.invoiced:
mrp_line_obj.write(cr, uid, [l.id for l in repair.operations], {'state': 'cancel'}, context=context)
else:
raise osv.except_osv(_('Warning!'), _('Repair order is already invoiced.'))
return self.write(cr, uid, ids, {'state': 'cancel'})
def wkf_invoice_create(self, cr, uid, ids, *args):
self.action_invoice_create(cr, uid, ids)
return True
def action_invoice_create(self, cr, uid, ids, group=False, context=None):
""" Creates invoice(s) for repair order.
@param group: It is set to true when group invoice is to be generated.
@return: Invoice Ids.
"""
res = {}
invoices_group = {}
inv_line_obj = self.pool.get('account.invoice.line')
inv_obj = self.pool.get('account.invoice')
repair_line_obj = self.pool.get('mrp.repair.line')
repair_fee_obj = self.pool.get('mrp.repair.fee')
for repair in self.browse(cr, uid, ids, context=context):
res[repair.id] = False
if repair.state in ('draft', 'cancel') or repair.invoice_id:
continue
if not (repair.partner_id.id and repair.partner_invoice_id.id):
raise osv.except_osv(_('No partner!'), _('You have to select a Partner Invoice Address in the repair form!'))
comment = repair.quotation_notes
if (repair.invoice_method != 'none'):
if group and repair.partner_invoice_id.id in invoices_group:
inv_id = invoices_group[repair.partner_invoice_id.id]
invoice = inv_obj.browse(cr, uid, inv_id)
invoice_vals = {
'name': invoice.name + ', ' + repair.name,
'origin': invoice.origin + ', ' + repair.name,
'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''),
}
inv_obj.write(cr, uid, [inv_id], invoice_vals, context=context)
else:
if not repair.partner_id.property_account_receivable:
raise osv.except_osv(_('Error!'), _('No account defined for partner "%s".') % repair.partner_id.name)
account_id = repair.partner_id.property_account_receivable.id
inv = {
'name': repair.name,
'origin': repair.name,
'type': 'out_invoice',
'account_id': account_id,
'partner_id': repair.partner_invoice_id.id or repair.partner_id.id,
'currency_id': repair.pricelist_id.currency_id.id,
'comment': repair.quotation_notes,
'fiscal_position': repair.partner_id.property_account_position.id
}
inv_id = inv_obj.create(cr, uid, inv)
invoices_group[repair.partner_invoice_id.id] = inv_id
self.write(cr, uid, repair.id, {'invoiced': True, 'invoice_id': inv_id})
for operation in repair.operations:
if operation.to_invoice:
if group:
name = repair.name + '-' + operation.name
else:
name = operation.name
if operation.product_id.property_account_income:
account_id = operation.product_id.property_account_income.id
elif operation.product_id.categ_id.property_account_income_categ:
account_id = operation.product_id.categ_id.property_account_income_categ.id
else:
raise osv.except_osv(_('Error!'), _('No account defined for product "%s".') % operation.product_id.name)
invoice_line_id = inv_line_obj.create(cr, uid, {
'invoice_id': inv_id,
'name': name,
'origin': repair.name,
'account_id': account_id,
'quantity': operation.product_uom_qty,
'invoice_line_tax_id': [(6, 0, [x.id for x in operation.tax_id])],
'uos_id': operation.product_uom.id,
'price_unit': operation.price_unit,
'price_subtotal': operation.product_uom_qty * operation.price_unit,
'product_id': operation.product_id and operation.product_id.id or False
})
repair_line_obj.write(cr, uid, [operation.id], {'invoiced': True, 'invoice_line_id': invoice_line_id})
for fee in repair.fees_lines:
if fee.to_invoice:
if group:
name = repair.name + '-' + fee.name
else:
name = fee.name
if not fee.product_id:
raise osv.except_osv(_('Warning!'), _('No product defined on Fees!'))
if fee.product_id.property_account_income:
account_id = fee.product_id.property_account_income.id
elif fee.product_id.categ_id.property_account_income_categ:
account_id = fee.product_id.categ_id.property_account_income_categ.id
else:
raise osv.except_osv(_('Error!'), _('No account defined for product "%s".') % fee.product_id.name)
invoice_fee_id = inv_line_obj.create(cr, uid, {
'invoice_id': inv_id,
'name': name,
'origin': repair.name,
'account_id': account_id,
'quantity': fee.product_uom_qty,
'invoice_line_tax_id': [(6, 0, [x.id for x in fee.tax_id])],
'uos_id': fee.product_uom.id,
'product_id': fee.product_id and fee.product_id.id or False,
'price_unit': fee.price_unit,
'price_subtotal': fee.product_uom_qty * fee.price_unit
})
repair_fee_obj.write(cr, uid, [fee.id], {'invoiced': True, 'invoice_line_id': invoice_fee_id})
inv_obj.button_reset_taxes(cr, uid, inv_id, context=context)
res[repair.id] = inv_id
return res
def action_repair_ready(self, cr, uid, ids, context=None):
""" Writes repair order state to 'Ready'
@return: True
"""
for repair in self.browse(cr, uid, ids, context=context):
self.pool.get('mrp.repair.line').write(cr, uid, [l.id for
l in repair.operations], {'state': 'confirmed'}, context=context)
self.write(cr, uid, [repair.id], {'state': 'ready'})
return True
def action_repair_start(self, cr, uid, ids, context=None):
""" Writes repair order state to 'Under Repair'
@return: True
"""
repair_line = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
repair_line.write(cr, uid, [l.id for
l in repair.operations], {'state': 'confirmed'}, context=context)
repair.write({'state': 'under_repair'})
return True
def action_repair_end(self, cr, uid, ids, context=None):
""" Writes repair order state to 'To be invoiced' if invoice method is
After repair else state is set to 'Ready'.
@return: True
"""
for order in self.browse(cr, uid, ids, context=context):
val = {}
val['repaired'] = True
if (not order.invoiced and order.invoice_method == 'after_repair'):
val['state'] = '2binvoiced'
elif (not order.invoiced and order.invoice_method == 'b4repair'):
val['state'] = 'ready'
else:
pass
self.write(cr, uid, [order.id], val)
return True
def wkf_repair_done(self, cr, uid, ids, *args):
self.action_repair_done(cr, uid, ids)
return True
def action_repair_done(self, cr, uid, ids, context=None):
""" Creates stock move for operation and stock move for final product of repair order.
@return: Move ids of final products
"""
res = {}
move_obj = self.pool.get('stock.move')
repair_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
move_ids = []
for move in repair.operations:
move_id = move_obj.create(cr, uid, {
'name': move.name,
'product_id': move.product_id.id,
'restrict_lot_id': move.lot_id.id,
'product_uom_qty': move.product_uom_qty,
'product_uom': move.product_uom.id,
'partner_id': repair.address_id and repair.address_id.id or False,
'location_id': move.location_id.id,
'location_dest_id': move.location_dest_id.id,
})
move_ids.append(move_id)
repair_line_obj.write(cr, uid, [move.id], {'move_id': move_id, 'state': 'done'}, context=context)
move_id = move_obj.create(cr, uid, {
'name': repair.name,
'product_id': repair.product_id.id,
'product_uom': repair.product_uom.id or repair.product_id.uom_id.id,
'product_uom_qty': repair.product_qty,
'partner_id': repair.address_id and repair.address_id.id or False,
'location_id': repair.location_id.id,
'location_dest_id': repair.location_dest_id.id,
'restrict_lot_id': repair.lot_id.id,
})
move_ids.append(move_id)
move_obj.action_done(cr, uid, move_ids, context=context)
self.write(cr, uid, [repair.id], {'state': 'done', 'move_id': move_id}, context=context)
res[repair.id] = move_id
return res
class ProductChangeMixin(object):
def product_id_change(self, cr, uid, ids, pricelist, product, uom=False,
product_uom_qty=0, partner_id=False, guarantee_limit=False, context=None):
""" On change of product it sets product quantity, tax account, name,
uom of product, unit price and price subtotal.
@param pricelist: Pricelist of current record.
@param product: Changed id of product.
@param uom: UoM of current record.
@param product_uom_qty: Quantity of current record.
@param partner_id: Partner of current record.
@param guarantee_limit: Guarantee limit of current record.
@return: Dictionary of values and warning message.
"""
result = {}
warning = {}
ctx = context and context.copy() or {}
ctx['uom'] = uom
if not product_uom_qty:
product_uom_qty = 1
result['product_uom_qty'] = product_uom_qty
if product:
product_obj = self.pool.get('product.product').browse(cr, uid, product, context=ctx)
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id)
result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, partner.property_account_position, product_obj.taxes_id, context=ctx)
result['name'] = product_obj.display_name
result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id or False
if not pricelist:
warning = {
'title': _('No Pricelist!'),
'message':
_('You have to select a pricelist in the Repair form !\n'
'Please set one before choosing a product.')
}
else:
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product, product_uom_qty, partner_id, context=ctx)[pricelist]
if price is False:
warning = {
'title': _('No valid pricelist line found !'),
'message':
_("Couldn't find a pricelist line matching this product and quantity.\n"
"You have to change either the product, the quantity or the pricelist.")
}
else:
result.update({'price_unit': price, 'price_subtotal': price * product_uom_qty})
return {'value': result, 'warning': warning}
class mrp_repair_line(osv.osv, ProductChangeMixin):
_name = 'mrp.repair.line'
_description = 'Repair Line'
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids, context=context):
if line.to_invoice:
taxes = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, line.repair_id.partner_id)
cur = line.repair_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
else:
res[line.id] = 0
return res
_columns = {
'name': fields.char('Description', required=True),
'repair_id': fields.many2one('mrp.repair', 'Repair Order Reference', ondelete='cascade', select=True),
'type': fields.selection([('add', 'Add'), ('remove', 'Remove')], 'Type', required=True),
'to_invoice': fields.boolean('To Invoice'),
'product_id': fields.many2one('product.product', 'Product', required=True),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'price_unit': fields.float('Unit Price', required=True, digits_compute=dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute=dp.get_precision('Account')),
'tax_id': fields.many2many('account.tax', 'repair_operation_line_tax', 'repair_operation_line_id', 'tax_id', 'Taxes'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'invoice_line_id': fields.many2one('account.invoice.line', 'Invoice Line', readonly=True, copy=False),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True),
'location_dest_id': fields.many2one('stock.location', 'Dest. Location', required=True, select=True),
'move_id': fields.many2one('stock.move', 'Inventory Move', readonly=True, copy=False),
'lot_id': fields.many2one('stock.production.lot', 'Lot'),
'state': fields.selection([
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('done', 'Done'),
('cancel', 'Cancelled')], 'Status', required=True, readonly=True, copy=False,
help=' * The \'Draft\' status is set automatically as draft when repair order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when repair order in confirm status. \
\n* The \'Done\' status is set automatically when repair order is completed.\
\n* The \'Cancelled\' status is set automatically when user cancel repair order.'),
}
_defaults = {
'state': lambda *a: 'draft',
'product_uom_qty': lambda *a: 1,
}
def onchange_operation_type(self, cr, uid, ids, type, guarantee_limit, company_id=False, context=None):
""" On change of operation type it sets source location, destination location
and to invoice field.
@param product: Changed operation type.
@param guarantee_limit: Guarantee limit of current record.
@return: Dictionary of values.
"""
if not type:
return {'value': {
'location_id': False,
'location_dest_id': False
}}
location_obj = self.pool.get('stock.location')
warehouse_obj = self.pool.get('stock.warehouse')
location_id = location_obj.search(cr, uid, [('usage', '=', 'production')], context=context)
location_id = location_id and location_id[0] or False
if type == 'add':
# TOCHECK: Find stock location for user's company warehouse or
# repair order's company's warehouse (company_id field is added in fix of lp:831583)
args = company_id and [('company_id', '=', company_id)] or []
warehouse_ids = warehouse_obj.search(cr, uid, args, context=context)
stock_id = False
if warehouse_ids:
stock_id = warehouse_obj.browse(cr, uid, warehouse_ids[0], context=context).lot_stock_id.id
to_invoice = (guarantee_limit and datetime.strptime(guarantee_limit, '%Y-%m-%d') < datetime.now())
return {'value': {
'to_invoice': to_invoice,
'location_id': stock_id,
'location_dest_id': location_id
}}
scrap_location_ids = location_obj.search(cr, uid, [('scrap_location', '=', True)], context=context)
return {'value': {
'to_invoice': False,
'location_id': location_id,
'location_dest_id': scrap_location_ids and scrap_location_ids[0] or False,
}}
class mrp_repair_fee(osv.osv, ProductChangeMixin):
_name = 'mrp.repair.fee'
_description = 'Repair Fees Line'
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids, context=context):
if line.to_invoice:
taxes = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, line.repair_id.partner_id)
cur = line.repair_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
else:
res[line.id] = 0
return res
_columns = {
'repair_id': fields.many2one('mrp.repair', 'Repair Order Reference', required=True, ondelete='cascade', select=True),
'name': fields.char('Description', select=True, required=True),
'product_id': fields.many2one('product.product', 'Product'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'price_unit': fields.float('Unit Price', required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute=dp.get_precision('Account')),
'tax_id': fields.many2many('account.tax', 'repair_fee_line_tax', 'repair_fee_line_id', 'tax_id', 'Taxes'),
'invoice_line_id': fields.many2one('account.invoice.line', 'Invoice Line', readonly=True, copy=False),
'to_invoice': fields.boolean('To Invoice'),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
}
_defaults = {
'to_invoice': lambda *a: True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
anryko/ansible | refs/heads/devel | test/units/modules/network/edgeswitch/__init__.py | 12133432 | |
gtest-org/test15 | refs/heads/master | tests/yamlparser/__init__.py | 12133432 | |
OSSOS/MOP | refs/heads/master | src/ossos/core/ossos/tools/__init__.py | 12133432 | |
xme1226/horizon | refs/heads/master | openstack_dashboard/management/__init__.py | 12133432 | |
JeyZeta/Dangerous | refs/heads/master | Dangerous/Golismero/misc/old_tests/plugin_tests/__init__.py | 12133432 | |
whs/django | refs/heads/master | tests/servers/__init__.py | 12133432 | |
jreback/pandas | refs/heads/master | pandas/tests/tslibs/test_ccalendar.py | 3 | from datetime import date, datetime
from hypothesis import given, strategies as st
import numpy as np
import pytest
from pandas._libs.tslibs import ccalendar
import pandas as pd
@pytest.mark.parametrize(
"date_tuple,expected",
[
((2001, 3, 1), 60),
((2004, 3, 1), 61),
((1907, 12, 31), 365), # End-of-year, non-leap year.
((2004, 12, 31), 366), # End-of-year, leap year.
],
)
def test_get_day_of_year_numeric(date_tuple, expected):
assert ccalendar.get_day_of_year(*date_tuple) == expected
def test_get_day_of_year_dt():
dt = datetime.fromordinal(1 + np.random.randint(365 * 4000))
result = ccalendar.get_day_of_year(dt.year, dt.month, dt.day)
expected = (dt - dt.replace(month=1, day=1)).days + 1
assert result == expected
@pytest.mark.parametrize(
"input_date_tuple, expected_iso_tuple",
[
[(2020, 1, 1), (2020, 1, 3)],
[(2019, 12, 31), (2020, 1, 2)],
[(2019, 12, 30), (2020, 1, 1)],
[(2009, 12, 31), (2009, 53, 4)],
[(2010, 1, 1), (2009, 53, 5)],
[(2010, 1, 3), (2009, 53, 7)],
[(2010, 1, 4), (2010, 1, 1)],
[(2006, 1, 1), (2005, 52, 7)],
[(2005, 12, 31), (2005, 52, 6)],
[(2008, 12, 28), (2008, 52, 7)],
[(2008, 12, 29), (2009, 1, 1)],
],
)
def test_dt_correct_iso_8601_year_week_and_day(input_date_tuple, expected_iso_tuple):
result = ccalendar.get_iso_calendar(*input_date_tuple)
expected_from_date_isocalendar = date(*input_date_tuple).isocalendar()
assert result == expected_from_date_isocalendar
assert result == expected_iso_tuple
@given(
st.datetimes(
min_value=pd.Timestamp.min.to_pydatetime(warn=False),
max_value=pd.Timestamp.max.to_pydatetime(warn=False),
)
)
def test_isocalendar(dt):
expected = dt.isocalendar()
result = ccalendar.get_iso_calendar(dt.year, dt.month, dt.day)
assert result == expected
|
ainstushar/qBittorrent | refs/heads/master | src/searchengine/nova/engines/demonoid.py | 20 | #VERSION: 1.1
#AUTHORS: Douman (custparasite@gmx.se)
#CONTRIBUTORS: Diego de las Heras (diegodelasheras@gmail.com)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from HTMLParser import HTMLParser
from httplib import HTTPSConnection as https
from re import compile as re_compile
from re import DOTALL
from itertools import islice
#qBt
from novaprinter import prettyPrinter
from helpers import download_file
class demonoid(object):
""" Search engine class """
url = "https://www.demonoid.pw"
name = "Demonoid"
supported_categories = {'all': '0',
'music': '2',
'movies': '1',
'games': '4',
'software': '5',
'books': '11',
'anime': '9',
'tv': '3'}
def download_torrent(self, info):
""" Downloader """
print(download_file(info))
class MyHtmlParseWithBlackJack(HTMLParser):
""" Parser class """
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
self.current_item = None
self.save_data = None
self.seeds_leech = False
self.size_repl = re_compile(",")
def handle_starttag(self, tag, attrs):
""" Parser's start tag handler """
if tag == "a":
params = dict(attrs)
if "href" in params:
link = params["href"]
if link.startswith("/files/details"):
self.current_item = dict()
self.current_item["desc_link"] = "".join((self.url, link))
self.current_item["engine_url"] = self.url
self.save_data = "name"
elif link.startswith("/files/download"):
self.current_item["link"] = "".join((self.url, link))
elif self.current_item:
if tag == "td":
params = dict(attrs)
if "class" in params and "align" in params:
if params["class"].startswith("tone"):
if params["align"] == "right":
self.save_data = "size"
elif params["align"] == "center":
self.seeds_leech = True
elif self.seeds_leech and tag == "font":
for attr in attrs:
if "class" in attr:
if attr[1] == "green":
self.save_data = "seeds"
elif attr[1] == "red":
self.save_data = "leech"
self.seeds_leech = False
def handle_data(self, data):
""" Parser's data handler """
if self.save_data:
if self.save_data == "name":
# names with special characters like '&' are splitted in several pieces
if 'name' not in self.current_item:
self.current_item['name'] = ''
self.current_item['name'] += data
else:
self.current_item[self.save_data] = data
self.save_data = None
if self.current_item.__len__() == 7:
self.current_item["size"] = self.size_repl.sub("", self.current_item["size"])
prettyPrinter(self.current_item)
self.current_item = None
def handle_endtag(self, tag):
""" Parser's end tag handler """
if self.save_data == "name":
self.save_data = None
def search(self, what, cat='all'):
""" Performs search """
connection = https("www.demonoid.pw")
#prepare query
cat = self.supported_categories[cat.lower()]
query = "".join(("/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&to=1&uid=0&sort=S"))
connection.request("GET", query)
response = connection.getresponse()
if response.status != 200:
return
data = response.read().decode("utf-8")
add_res_list = re_compile("/files.*page=[0-9]+")
torrent_list = re_compile("start torrent list -->(.*)<!-- end torrent", DOTALL)
data = torrent_list.search(data).group(0)
list_results = add_res_list.findall(data)
parser = self.MyHtmlParseWithBlackJack(self.url)
parser.feed(data)
del data
if list_results:
for search_query in islice((add_res_list.search(result).group(0) for result in list_results[1].split(" | ")), 0, 5):
connection.request("GET", search_query)
response = connection.getresponse()
parser.feed(torrent_list.search(response.read().decode('utf-8')).group(0))
parser.close()
connection.close()
return
|
aisipos/django | refs/heads/master | django/db/backends/oracle/operations.py | 8 | from __future__ import unicode_literals
import datetime
import re
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.backends.utils import truncate_name
from django.utils import six, timezone
from django.utils.encoding import force_bytes, force_text
from .base import Database
from .utils import InsertIdVar, Oracle_datetime, convert_unicode
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.oracle.compiler"
# Oracle uses NUMBER(11) and NUMBER(19) for integer fields.
integer_field_ranges = {
'SmallIntegerField': (-99999999999, 99999999999),
'IntegerField': (-99999999999, 99999999999),
'BigIntegerField': (-9999999999999999999, 9999999999999999999),
'PositiveSmallIntegerField': (0, 99999999999),
'PositiveIntegerField': (0, 99999999999),
}
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
_sequence_reset_sql = """
DECLARE
table_value integer;
seq_value integer;
BEGIN
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = '%(sequence)s';
WHILE table_value > seq_value LOOP
SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
END LOOP;
END;
/"""
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Oracle, we have to
# create a sequence and a trigger.
args = {
'sq_name': self._get_sequence_name(table),
'tr_name': self._get_trigger_name(table),
'tbl_name': self.quote_name(table),
'col_name': self.quote_name(column),
}
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 0 THEN
EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % args
trigger_sql = """
CREATE OR REPLACE TRIGGER "%(tr_name)s"
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT "%(sq_name)s".nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % args
return sequence_sql, trigger_sql
def cache_key_culling_sql(self):
return """
SELECT cache_key
FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s)
WHERE rank = %%s + 1
"""
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
else:
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_interval_sql(self, timedelta):
"""
Implements the interval functionality for expressions
format for Oracle:
INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6)
"""
minutes, seconds = divmod(timedelta.seconds, 60)
hours, minutes = divmod(minutes, 60)
days = str(timedelta.days)
day_precision = len(days)
fmt = "INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6)"
return fmt % (days, hours, minutes, seconds, timedelta.microseconds, day_precision), []
def date_trunc_sql(self, lookup_type, field_name):
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
else:
return "TRUNC(%s)" % field_name
# Oracle crashes with "ORA-03113: end-of-file on communication channel"
# if the time zone name is passed in parameter. Use interpolation instead.
# https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
# This regexp matches all time zone names from the zoneinfo database.
_tzname_re = re.compile(r'^[\w/:+-]+$')
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
if not self._tzname_re.match(tzname):
raise ValueError("Invalid time zone name: %s" % tzname)
# Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE.
field_name = "(FROM_TZ(%s, '0:00') AT TIME ZONE '%s')" % (field_name, tzname)
# Extracting from a TIMESTAMP WITH TIME ZONE ignore the time zone.
# Convert to a DATETIME, which is called DATE by Oracle. There's no
# built-in function to do that; the easiest is to go through a string.
field_name = "TO_CHAR(%s, 'YYYY-MM-DD HH24:MI:SS')" % field_name
field_name = "TO_DATE(%s, 'YYYY-MM-DD HH24:MI:SS')" % field_name
# Re-convert to a TIMESTAMP because EXTRACT only handles the date part
# on DATE values, even though they actually store the time part.
return "CAST(%s AS TIMESTAMP)" % field_name
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
sql = 'TRUNC(%s)' % field_name
return sql, []
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, []
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'day':
sql = "TRUNC(%s)" % field_name
elif lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
else:
sql = field_name # Cast to DATE removes sub-second precision.
return sql, []
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type == 'BinaryField':
converters.append(self.convert_binaryfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
converters.append(self.convert_empty_values)
return converters
def convert_textfield_value(self, value, expression, connection, context):
if isinstance(value, Database.LOB):
value = force_text(value.read())
return value
def convert_binaryfield_value(self, value, expression, connection, context):
if isinstance(value, Database.LOB):
value = force_bytes(value.read())
return value
def convert_booleanfield_value(self, value, expression, connection, context):
if value in (0, 1):
value = bool(value)
return value
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime.
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection, context):
if isinstance(value, Database.Timestamp):
value = value.date()
return value
def convert_timefield_value(self, value, expression, connection, context):
if isinstance(value, Database.Timestamp):
value = value.time()
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def convert_empty_values(self, value, expression, connection, context):
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
field = expression.output_field
if value is None and field.empty_strings_allowed:
value = ''
if field.get_internal_type() == 'BinaryField':
value = b''
return value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(self._get_sequence_name(table))
def fetch_returned_insert_id(self, cursor):
return int(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_executed_query(self, cursor, sql, params):
# http://cx-oracle.sourceforge.net/html/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
if statement and six.PY2 and not isinstance(statement, unicode): # NOQA: unicode undefined on PY3
statement = statement.decode('utf-8')
# Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's
# `statement` doesn't contain the query parameters. refs #20010.
return super(DatabaseOperations, self).last_executed_query(cursor, statement, params)
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(table_name)
cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def pk_default_value(self):
return "NULL"
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return force_text(value.read())
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % truncate_name(name.upper(), self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%', '%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return convert_unicode("SAVEPOINT " + self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
sequence_name = self._get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = self._sequence_reset_sql % {
'sequence': sequence_name,
'table': table_name,
'column': column_name,
}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = self._sequence_reset_sql
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = self._get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.remote_field.through:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = self._get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def adapt_datefield_value(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
The default implementation transforms the date to text, but that is not
necessary for Oracle.
"""
return value
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
If naive datetime is passed assumes that is in UTC. Normally Django
models.DateTimeField makes sure that if USE_TZ is True passed datetime
is timezone aware.
"""
if value is None:
return None
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
return Oracle_datetime.from_datetime(value)
def adapt_timefield_value(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
return datetime.datetime.strptime(value, '%H:%M:%S')
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return Oracle_datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def combine_expression(self, connector, sub_expressions):
"Oracle requires special cases for %% and & operators in query expressions"
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
raise NotImplementedError("Bit-wise or is not supported in Oracle.")
elif connector == '^':
return 'POWER(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def _get_sequence_name(self, table):
name_length = self.max_name_length() - 3
return '%s_SQ' % truncate_name(table, name_length).upper()
def _get_trigger_name(self, table):
name_length = self.max_name_length() - 3
return '%s_TR' % truncate_name(table, name_length).upper()
def bulk_insert_sql(self, fields, placeholder_rows):
return " UNION ALL ".join(
"SELECT %s FROM DUAL" % ", ".join(row)
for row in placeholder_rows
)
def subtract_temporals(self, internal_type, lhs, rhs):
if internal_type == 'DateField':
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "NUMTODSINTERVAL(%s - %s, 'DAY')" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return super(DatabaseOperations, self).subtract_temporals(internal_type, lhs, rhs)
|
brinbois/Sick-Beard | refs/heads/development | lib/hachoir_core/profiler.py | 186 | from hotshot import Profile
from hotshot.stats import load as loadStats
from os import unlink
def runProfiler(func, args=tuple(), kw={}, verbose=True, nb_func=25, sort_by=('cumulative', 'calls')):
profile_filename = "/tmp/profiler"
prof = Profile(profile_filename)
try:
if verbose:
print "[+] Run profiler"
result = prof.runcall(func, *args, **kw)
prof.close()
if verbose:
print "[+] Stop profiler"
print "[+] Process data..."
stat = loadStats(profile_filename)
if verbose:
print "[+] Strip..."
stat.strip_dirs()
if verbose:
print "[+] Sort data..."
stat.sort_stats(*sort_by)
if verbose:
print
print "[+] Display statistics"
print
stat.print_stats(nb_func)
return result
finally:
unlink(profile_filename)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.