code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015 FactorLibre (http://www.factorlibre.com)
# Hugo Santos <hugo.santos@factorlibre.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class DeliveryCarrier(models.Model):
_inherit = 'delivery.carrier'
@api.model
def _get_carrier_type_selection(self):
""" Add UPS carrier type """
res = super(DeliveryCarrier, self)._get_carrier_type_selection()
res.append(('ups', 'UPS'))
return res
ups_config_id = fields.Many2one('ups.config', string='UPS Config')
|
factorlibre/carrier-delivery
|
delivery_carrier_ups/model/delivery.py
|
Python
|
agpl-3.0
| 1,429
|
# Copyright 2017-2018 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
{
'name': 'Directory Files Download',
'summary': 'Download all files of a directory on server',
'author': 'Onestein',
'website': 'http://www.onestein.eu',
'category': 'Tools',
'version': '11.0.1.0.0',
'license': 'AGPL-3',
'depends': [
'base_setup',
],
'data': [
'security/groups.xml',
'security/ir.model.access.csv',
'views/ir_filesystem_directory.xml',
],
'installable': True,
}
|
onesteinbv/addons-onestein
|
base_directory_file_download/__manifest__.py
|
Python
|
agpl-3.0
| 584
|
# coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
# 導入 random 模組
import random
import math
from cherrypy.lib.static import serve_file
# 導入 gear 模組
#import gear
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
def downloadlist_access_list(files, starti, endi):
# different extension files, associated links were provided
# popup window to view images, video or STL files, other files can be downloaded directly
# files are all the data to list, from starti to endi
# add file size
outstring = ""
for index in range(int(starti)-1, int(endi)):
fileName, fileExtension = os.path.splitext(files[index])
fileExtension = fileExtension.lower()
fileSize = sizeof_fmt(os.path.getsize(download_root_dir+"downloads/"+files[index]))
# images files
if fileExtension == ".png" or fileExtension == ".jpg" or fileExtension == ".gif":
outstring += '<input type="checkbox" name="filename" value="'+files[index]+'"><a href="javascript:;" onClick="window.open(\'/downloads/'+ \
files[index]+'\',\'images\', \'catalogmode\',\'scrollbars\')">'+files[index]+'</a> ('+str(fileSize)+')<br />'
# stl files
elif fileExtension == ".stl":
outstring += '<input type="checkbox" name="filename" value="'+files[index]+'"><a href="javascript:;" onClick="window.open(\'/static/viewstl.html?src=/downloads/'+ \
files[index]+'\',\'images\', \'catalogmode\',\'scrollbars\')">'+files[index]+'</a> ('+str(fileSize)+')<br />'
# flv files
elif fileExtension == ".flv":
outstring += '<input type="checkbox" name="filename" value="'+files[index]+'"><a href="javascript:;" onClick="window.open(\'/flvplayer?filepath=/downloads/'+ \
files[index]+'\',\'images\', \'catalogmode\',\'scrollbars\')">'+files[index]+'</a> ('+str(fileSize)+')<br />'
# direct download files
else:
outstring += "<input type='checkbox' name='filename' value='"+files[index]+"'><a href='/download/?filepath="+download_root_dir.replace('\\', '/')+ \
"downloads/"+files[index]+"'>"+files[index]+"</a> ("+str(fileSize)+")<br />"
return outstring
def sizeof_fmt(num):
for x in ['bytes','KB','MB','GB']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Midterm(object):
# Midterm 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
#'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index(self):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
</head>
<body>
<a href="spur">spur</a><br />
<a href="drawspur">drawspur</a><br />
<a href="spur1">spur1</a><br />
<a href="drawspur1">drawspur1</a><br />
<a href="fileuploadform">上傳檔案</a><br />
<a href="download_list">列出上傳檔案</a><br />
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def spur(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=spuraction>
齒數:<input type=text name=N value='''+str(N)+'''><br />
模數:<input type=text name=M value = '''+str(M)+'''><br />
壓力角:<input type=text name=P value = '''+str(P)+'''><br />
<input type=submit value=send>
</form>
<br /><a href="index">index</a><br />
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def spuraction(self, N=20, M=5, P=15):
output = '''
<!doctype html><html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>2015CD Midterm</title>
</head>
<body>
'''
output += "齒數為"+str(N)+"<br />"
output += "模數為"+str(M)+"<br />"
output += "壓力角為"+str(P)+"<br />"
output +='''<br /><a href="/spur">spur</a>(按下後再輸入)<br />'''
output +='''<br /><a href="index">index</a><br />
</body>
</html>
'''
return output
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def drawspur(self, N=20, O=20, I=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
</head>
<body>
<form method=POST action=drawspuraction>
齒數:<input type=text name=N value='''+str(N)+'''><br />
模數:<input type=text name=M value = '''+str(M)+'''><br />
壓力角:<input type=text name=P value = '''+str(P)+'''><br />
<input type=submit value=畫出正齒輪輪廓>
</form>
<br /><a href="index">index</a><br />
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script>
window.onload=function(){
brython();
}
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def drawspuraction(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
</head>
<body>
<a href="index">index</a><br />
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案
import spur
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 以下利用 spur.py 程式進行繪圖
# N 為齒數
N = '''+str(N)+'''
# M 為模數
M = '''+str(M)+'''
# 壓力角 P 單位為角度
P = '''+str(P)+'''
# 計算兩齒輪的節圓半徑
rp = N*M/2
spur.Spur(ctx).Gear(600, 600, rp, N, P, "blue")
</script>
<canvas id="plotarea" width="1200" height="1200"></canvas>
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script>
window.onload=function(){
brython();
}
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def spur1(self, N1=20, N2=30, N3=40, N4=50, N5=60, N6=70, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=spuraction1>
齒數1:<input type=text name=N1 value='''+str(N1)+'''><br />
齒數2:<input type=text name=N2 value='''+str(N2)+'''><br />
齒數3:<input type=text name=N3 value='''+str(N3)+'''><br />
齒數4:<input type=text name=N4 value='''+str(N4)+'''><br />
齒數5:<input type=text name=N5 value='''+str(N5)+'''><br />
齒數6:<input type=text name=N6 value='''+str(N6)+'''><br />
模數:<input type=text name=M value = '''+str(M)+'''><br />
壓力角:<input type=text name=P value = '''+str(P)+'''><br />
<input type=submit value=send>
</form>
<br /><a href="index">index</a><br />
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def spuraction1(self, N1=20, N2=30, N3=40, N4=50, N5=60, N6=70, M=5, P=15):
output = '''
<!doctype html><html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>2015CD Midterm</title>
</head>
<body>
'''
output += "齒數1為"+str(N1)+"<br />"
output += "齒數2為"+str(N2)+"<br />"
output += "齒數3為"+str(N3)+"<br />"
output += "齒數4為"+str(N4)+"<br />"
output += "齒數5為"+str(N5)+"<br />"
output += "齒數6為"+str(N6)+"<br />"
output += "模數為"+str(M)+"<br />"
output += "壓力角為"+str(P)+"<br />"
output +='''<br /><a href="/spur1">spur1</a>(按下後再輸入)<br />'''
output +='''<br /><a href="index">index</a><br />
</body>
</html>
'''
return output
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def drawspur1(self, N1=20, N2=30, N3=40, N4=50, N5=60, N6=70, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
</head>
<body>
<form method=POST action=drawspuraction1>
齒數1:<input type=text name=N1 value='''+str(N1)+'''><br />
齒數2:<input type=text name=N2 value='''+str(N2)+'''><br />
齒數3:<input type=text name=N3 value='''+str(N3)+'''><br />
齒數4:<input type=text name=N4 value='''+str(N4)+'''><br />
齒數5:<input type=text name=N5 value='''+str(N5)+'''><br />
齒數6:<input type=text name=N6 value='''+str(N6)+'''><br />
模數:<input type=text name=M value = '''+str(M)+'''><br />
壓力角:<input type=text name=P value = '''+str(P)+'''><br />
<input type=submit value=畫出正齒輪輪廓>
</form>
<br /><a href="index">index</a><br />
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script>
window.onload=function(){
brython();
}
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def drawspuraction1(self, N1=20, N2=30, N3=40, N4=50, N5=60, N6=70, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<a href="index">index</a><br />
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案
import spur
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖
# 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組
# midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色
# Gear(midx, midy, rp, n=20, pa=20, color="black"):
# 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角
# 壓力角 P 單位為角度
M = '''+str(M)+'''
# 壓力角 pa 單位為角度
pa = '''+str(P)+'''
# 齒輪齒數
n_g1 = '''+str(N1)+'''
n_g2 = '''+str(N2)+'''
n_g3 = '''+str(N3)+'''
n_g4 = '''+str(N4)+'''
n_g5 = '''+str(N5)+'''
n_g6 = '''+str(N6)+'''
# 計算兩齒輪的節圓半徑
rp_g1 = M*n_g1/2
rp_g2 = M*n_g2/2
rp_g3 = M*n_g3/2
rp_g4 = M*n_g4/2
rp_g5= M*n_g5/2
rp_g6= M*n_g6/2
# 繪圖第1齒輪的圓心座標
x_g1 = 400
y_g1 = 400
# 第2齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同
x_g2 = x_g1 + rp_g1 + rp_g2
y_g2 = y_g1
# 第3齒輪的圓心座標
x_g3 = x_g1 + rp_g1 + 2*rp_g2 + rp_g3
y_g3 = y_g1
# 第4齒輪的圓心座標
x_g4 = x_g1 + rp_g1 + 2*rp_g2 + 2* rp_g3 + rp_g4
y_g4 = y_g1
# 第5齒輪的圓心座標
x_g5= x_g1 + rp_g1 + 2*rp_g2 + 2* rp_g3 +2* rp_g4+ rp_g5
y_g5 = y_g1
# 第6齒輪的圓心座標
x_g6= x_g1 + rp_g1 + 2*rp_g2 + 2* rp_g3 +2* rp_g4+2* rp_g5+rp_g6
y_g6= y_g1
# 將第1齒輪順時鐘轉 90 度
# 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g1, y_g1)
# rotate to engage
ctx.rotate(pi/2)
# put it back
ctx.translate(-x_g1, -y_g1)
spur.Spur(ctx).Gear(x_g1, y_g1, rp_g1, n_g1, pa, "blue")
ctx.restore()
# 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g2, y_g2)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g2)
# put it back
ctx.translate(-x_g2, -y_g2)
spur.Spur(ctx).Gear(x_g2, y_g2, rp_g2, n_g2, pa, "black")
ctx.restore()
# 將第3齒輪逆時鐘轉 90 度之後, 再往回轉第2齒輪定位帶動轉角, 然後再逆時鐘多轉一齒, 以便與第2齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g3, y_g3)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g3+(pi+pi/n_g2)*n_g2/n_g3)
# put it back
ctx.translate(-x_g3, -y_g3)
spur.Spur(ctx).Gear(x_g3, y_g3, rp_g3, n_g3, pa, "red")
ctx.restore()
#第4齒輪
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g4, y_g4)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g4+(pi+pi/n_g3)*n_g3/n_g4-(pi+pi/n_g2)*n_g2/n_g4)
# put it back
ctx.translate(-x_g4, -y_g4)
spur.Spur(ctx).Gear(x_g4, y_g4, rp_g4, n_g4, pa, "green")
ctx.restore()
#第5齒輪
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g5, y_g5)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g5+(pi+pi/n_g4)*n_g4/n_g5-(pi+pi/n_g3)*n_g3/n_g5+(pi+pi/n_g2)*n_g2/n_g5)
# put it back
ctx.translate(-x_g5, -y_g5)
spur.Spur(ctx).Gear(x_g5, y_g5, rp_g5, n_g5, pa, "purple")
ctx.restore()
#第6齒輪
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g6, y_g6)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g6+(pi+pi/n_g5)*n_g5/n_g6-
(pi+pi/n_g4)*n_g4/n_g6+(pi+pi/n_g3)*n_g3/n_g6-
(pi+pi/n_g2)*n_g2/n_g6)
# put it back
ctx.translate(-x_g6, -y_g6)
spur.Spur(ctx).Gear(x_g6, y_g6, rp_g6, n_g6, pa, "blue")
ctx.restore()
</script>
<canvas id="plotarea" width="3000" height="3000"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# W 為正方體的邊長
def cube(self, W=10):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
</head>
<body>
<!-- 使用者輸入表單的參數交由 cubeaction 方法處理 -->
<form method=POST action=cubeaction>
正方體邊長:<input type=text name=W value='''+str(W)+'''><br />
<input type=submit value=送出>
</form>
<br /><a href="index">index</a><br />
</body>
</html>
'''
return outstring
def __init__(self):
# hope to create downloads and images directories
if not os.path.isdir(download_root_dir+"downloads"):
try:
os.makedirs(download_root_dir+"downloads")
except:
print("mkdir error")
if not os.path.isdir(download_root_dir+"images"):
try:
os.makedirs(download_root_dir+"images")
except:
print("mkdir error")
if not os.path.isdir(download_root_dir+"tmp"):
try:
os.makedirs(download_root_dir+"tmp")
except:
print("mkdir error")
@cherrypy.expose
# W 為正方體邊長, 內定值為 10
def cubeaction(self, W=10):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 先載入 pfcUtils.js 與 wl_header.js -->
<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
document.writeln ("Error loading Pro/Web.Link header!");
</script>
<script>
window.onload=function(){
brython();
}
</script>
</head>
<!-- 不要使用 body 啟動 brython() 改為 window level 啟動 -->
<body onload="">
<h1>Creo 參數化零件</h1>
<a href="index">index</a><br />
<!-- 以下為 Creo Pro/Web.Link 程式, 將 JavaScrip 改為 Brython 程式 -->
<script type="text/python">
from browser import document, window
from math import *
# 這個區域為 Brython 程式範圍, 註解必須採用 Python 格式
# 因為 pfcIsWindows() 為原生的 JavaScript 函式, 在 Brython 中引用必須透過 window 物件
if (!window.pfcIsWindows()) window.netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
# 若第三輸入為 false, 表示僅載入 session, 但是不顯示
# ret 為 model open return
ret = document.pwl.pwlMdlOpen("cube.prt", "v:/tmp", false)
if (!ret.Status):
window.alert("pwlMdlOpen failed (" + ret.ErrorCode + ")")
# 將 ProE 執行階段設為變數 session
session = window.pfcGetProESession()
# 在視窗中打開零件檔案, 並且顯示出來
pro_window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("cube.prt"))
solid = session.GetModel("cube.prt", window.pfcCreate("pfcModelType").MDL_PART)
# 在 Brython 中與 Python 語法相同, 只有初值設定問題, 無需宣告變數
# length, width, myf, myn, i, j, volume, count, d1Value, d2Value
# 將模型檔中的 length 變數設為 javascript 中的 length 變數
length = solid.GetParam("a1")
# 將模型檔中的 width 變數設為 javascript 中的 width 變數
width = solid.GetParam("a2")
# 改變零件尺寸
# myf=20
# myn=20
volume = 0
count = 0
try:
# 以下採用 URL 輸入對應變數
# createParametersFromArguments ();
# 以下則直接利用 javascript 程式改變零件參數
for i in range(5):
myf ='''+str(W)+'''
myn ='''+str(W)+''' + i*2.0
# 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
d1Value = window.pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf)
d2Value = window.pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn)
# 將處理好的變數值, 指定給對應的零件變數
length.Value = d1Value
width.Value = d2Value
# 零件尺寸重新設定後, 呼叫 Regenerate 更新模型
# 在 JavaScript 為 null 在 Brython 為 None
solid.Regenerate(None)
# 利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(None)
# volume = volume + properties.Volume
volume = properties.Volume
count = count + 1
window.alert("執行第"+count+"次,零件總體積:"+volume)
# 將零件存為新檔案
newfile = document.pwl.pwlMdlSaveAs("cube.prt", "v:/tmp", "cube"+count+".prt")
if (!newfile.Status):
window.alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")")
# window.alert("共執行:"+count+"次,零件總體積:"+volume)
# window.alert("零件體積:"+properties.Volume)
# window.alert("零件體積取整數:"+Math.round(properties.Volume));
except:
window.alert ("Exception occurred: "+window.pfcGetExceptionType (err))
</script>
'''
return outstring
@cherrypy.expose
def fileuploadform(self):
return '''<h1>file upload</h1>
<script src="/static/jquery.js" type="text/javascript"></script>
<script src="/static/axuploader.js" type="text/javascript"></script>
<script>
$(document).ready(function(){
$('.prova').axuploader({url:'fileaxupload', allowExt:['jpg','png','gif','7z','pdf','zip','flv','stl','swf'],
finish:function(x,files)
{
alert('All files have been uploaded: '+files);
},
enable:true,
remotePath:function(){
return 'downloads/';
}
});
});
</script>
<div class="prova"></div>
<input type="button" onclick="$('.prova').axuploader('disable')" value="asd" />
<input type="button" onclick="$('.prova').axuploader('enable')" value="ok" />
</section></body></html>
'''
@cherrypy.expose
def fileaxupload(self, *args, **kwargs):
filename = kwargs["ax-file-name"]
flag = kwargs["start"]
if flag == "0":
file = open(download_root_dir+"downloads/"+filename, "wb")
else:
file = open(download_root_dir+"downloads/"+filename, "ab")
file.write(cherrypy.request.body.read())
file.close()
return "files uploaded!"
@cherrypy.expose
def download_list(self, item_per_page=5, page=1, keyword=None, *args, **kwargs):
files = os.listdir(download_root_dir+"downloads/")
total_rows = len(files)
totalpage = math.ceil(total_rows/int(item_per_page))
starti = int(item_per_page) * (int(page) - 1) + 1
endi = starti + int(item_per_page) - 1
outstring = "<form method='post' action='delete_file'>"
notlast = False
if total_rows > 0:
outstring += "<br />"
if (int(page) * int(item_per_page)) < total_rows:
notlast = True
if int(page) > 1:
outstring += "<a href='"
outstring += "download_list?&page=1&item_per_page="+str(item_per_page)+"&keyword="+str(cherrypy.session.get('download_keyword'))
outstring += "'><<</a> "
page_num = int(page) - 1
outstring += "<a href='"
outstring += "download_list?&page="+str(page_num)+"&item_per_page="+str(item_per_page)+"&keyword="+str(cherrypy.session.get('download_keyword'))
outstring += "'>Previous</a> "
span = 10
for index in range(int(page)-span, int(page)+span):
if index>= 0 and index< totalpage:
page_now = index + 1
if page_now == int(page):
outstring += "<font size='+1' color='red'>"+str(page)+" </font>"
else:
outstring += "<a href='"
outstring += "download_list?&page="+str(page_now)+"&item_per_page="+str(item_per_page)+"&keyword="+str(cherrypy.session.get('download_keyword'))
outstring += "'>"+str(page_now)+"</a> "
if notlast == True:
nextpage = int(page) + 1
outstring += " <a href='"
outstring += "download_list?&page="+str(nextpage)+"&item_per_page="+str(item_per_page)+"&keyword="+str(cherrypy.session.get('download_keyword'))
outstring += "'>Next</a>"
outstring += " <a href='"
outstring += "download_list?&page="+str(totalpage)+"&item_per_page="+str(item_per_page)+"&keyword="+str(cherrypy.session.get('download_keyword'))
outstring += "'>>></a><br /><br />"
if (int(page) * int(item_per_page)) < total_rows:
notlast = True
outstring += downloadlist_access_list(files, starti, endi)+"<br />"
else:
outstring += "<br /><br />"
outstring += downloadlist_access_list(files, starti, total_rows)+"<br />"
if int(page) > 1:
outstring += "<a href='"
outstring += "download_list?&page=1&item_per_page="+str(item_per_page)+"&keyword="+str(cherrypy.session.get('download_keyword'))
outstring += "'><<</a> "
page_num = int(page) - 1
outstring += "<a href='"
outstring += "download_list?&page="+str(page_num)+"&item_per_page="+str(item_per_page)+"&keyword="+str(cherrypy.session.get('download_keyword'))
outstring += "'>Previous</a> "
span = 10
for index in range(int(page)-span, int(page)+span):
#for ($j=$page-$range;$j<$page+$range;$j++)
if index >=0 and index < totalpage:
page_now = index + 1
if page_now == int(page):
outstring += "<font size='+1' color='red'>"+str(page)+" </font>"
else:
outstring += "<a href='"
outstring += "download_list?&page="+str(page_now)+"&item_per_page="+str(item_per_page)+"&keyword="+str(cherrypy.session.get('download_keyword'))
outstring += "'>"+str(page_now)+"</a> "
if notlast == True:
nextpage = int(page) + 1
outstring += " <a href='"
outstring += "download_list?&page="+str(nextpage)+"&item_per_page="+str(item_per_page)+"&keyword="+str(cherrypy.session.get('download_keyword'))
outstring += "'>Next</a>"
outstring += " <a href='"
outstring += "download_list?&page="+str(totalpage)+"&item_per_page="+str(item_per_page)+"&keyword="+str(cherrypy.session.get('download_keyword'))
outstring += "'>>></a>"
else:
outstring += "no data!"
outstring += "<br /><br /><input type='submit' value='delete'><input type='reset' value='reset'></form>"
return "<div class='container'><nav>"+ \
"</nav><section><h1>Download List</h1>"+outstring+"<br/><br /></body></html>"
class Download:
@cherrypy.expose
def index(self, filepath):
return serve_file(filepath, "application/x-download", "attachment")
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
root = Midterm()
root.download = Download()
#root.gear = gear.Gear()
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(root, config=application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(root, config=application_conf)
|
40023255/2015cd_0505
|
wsgi.py
|
Python
|
agpl-3.0
| 29,993
|
from __future__ import unicode_literals
import calendar
import json
import logging
import os
import pycountry
import pytz
import random
import regex
import stripe
import traceback
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from decimal import Decimal
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse
from django.db import models, transaction, connection
from django.db.models import Sum, F, Q
from django.utils import timezone
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.utils.text import slugify
from django.contrib.auth.models import User, Group
from enum import Enum
from redis_cache import get_redis_connection
from smartmin.models import SmartModel
from temba.locations.models import AdminBoundary, BoundaryAlias
from temba.nexmo import NexmoClient
from temba.utils import analytics, str_to_datetime, get_datetime_format, datetime_to_str, random_string
from temba.utils import timezone_to_country_code, languages
from temba.utils.cache import get_cacheable_result, get_cacheable_attr, incrby_existing
from temba.utils.email import send_template_email
from temba.utils.currencies import currency_for_country
from twilio.rest import TwilioRestClient
from urlparse import urlparse
from uuid import uuid4
from .bundles import BUNDLE_MAP, WELCOME_TOPUP_SIZE
UNREAD_INBOX_MSGS = 'unread_inbox_msgs'
UNREAD_FLOW_MSGS = 'unread_flow_msgs'
CURRENT_EXPORT_VERSION = 10
EARLIEST_IMPORT_VERSION = 3
MT_SMS_EVENTS = 1 << 0
MO_SMS_EVENTS = 1 << 1
MT_CALL_EVENTS = 1 << 2
MO_CALL_EVENTS = 1 << 3
ALARM_EVENTS = 1 << 4
ALL_EVENTS = MT_SMS_EVENTS | MO_SMS_EVENTS | MT_CALL_EVENTS | MO_CALL_EVENTS | ALARM_EVENTS
FREE_PLAN = 'FREE'
TRIAL_PLAN = 'TRIAL'
TIER1_PLAN = 'TIER1'
TIER2_PLAN = 'TIER2'
TIER3_PLAN = 'TIER3'
TIER_39_PLAN = 'TIER_39'
TIER_249_PLAN = 'TIER_249'
TIER_449_PLAN = 'TIER_449'
DAYFIRST = 'D'
MONTHFIRST = 'M'
PLANS = ((FREE_PLAN, _("Free Plan")),
(TRIAL_PLAN, _("Trial")),
(TIER_39_PLAN, _("Bronze")),
(TIER1_PLAN, _("Silver")),
(TIER2_PLAN, _("Gold (Legacy)")),
(TIER3_PLAN, _("Platinum (Legacy)")),
(TIER_249_PLAN, _("Gold")),
(TIER_449_PLAN, _("Platinum")))
DATE_PARSING = ((DAYFIRST, "DD-MM-YYYY"),
(MONTHFIRST, "MM-DD-YYYY"))
APPLICATION_SID = 'APPLICATION_SID'
ACCOUNT_SID = 'ACCOUNT_SID'
ACCOUNT_TOKEN = 'ACCOUNT_TOKEN'
NEXMO_KEY = 'NEXMO_KEY'
NEXMO_SECRET = 'NEXMO_SECRET'
NEXMO_UUID = 'NEXMO_UUID'
TRANSFERTO_ACCOUNT_LOGIN = 'TRANSFERTO_ACCOUNT_LOGIN'
TRANSFERTO_AIRTIME_API_TOKEN = 'TRANSFERTO_AIRTIME_API_TOKEN'
ORG_STATUS = 'STATUS'
SUSPENDED = 'suspended'
RESTORED = 'restored'
WHITELISTED = 'whitelisted'
ORG_LOW_CREDIT_THRESHOLD = 500
ORG_CREDIT_OVER = 'O'
ORG_CREDIT_LOW = 'L'
ORG_CREDIT_EXPIRING = 'E'
# cache keys and TTLs
ORG_LOCK_KEY = 'org:%d:lock:%s'
ORG_CREDITS_TOTAL_CACHE_KEY = 'org:%d:cache:credits_total'
ORG_CREDITS_PURCHASED_CACHE_KEY = 'org:%d:cache:credits_purchased'
ORG_CREDITS_USED_CACHE_KEY = 'org:%d:cache:credits_used'
ORG_ACTIVE_TOPUP_KEY = 'org:%d:cache:active_topup'
ORG_ACTIVE_TOPUP_REMAINING = 'org:%d:cache:credits_remaining:%d'
ORG_CREDIT_EXPIRING_CACHE_KEY = 'org:%d:cache:credits_expiring_soon'
ORG_LOW_CREDIT_THRESHOLD_CACHE_KEY = 'org:%d:cache:low_credits_threshold'
ORG_LOCK_TTL = 60 # 1 minute
ORG_CREDITS_CACHE_TTL = 7 * 24 * 60 * 60 # 1 week
class OrgEvent(Enum):
"""
Represents an internal org event
"""
topup_new = 16
topup_updated = 17
class OrgLock(Enum):
"""
Org-level lock types
"""
contacts = 1
channels = 2
credits = 3
field = 4
class OrgCache(Enum):
"""
Org-level cache types
"""
display = 1
credits = 2
class Org(SmartModel):
"""
An Org can have several users and is the main component that holds all Flows, Messages, Contacts, etc. Orgs
know their country so they can deal with locally formatted numbers (numbers provided without a country code). As such,
each org can only add phone channels from one country.
Users will create new Org for Flows that should be kept separate (say for distinct projects), or for
each country where they are deploying messaging applications.
"""
name = models.CharField(verbose_name=_("Name"), max_length=128)
plan = models.CharField(verbose_name=_("Plan"), max_length=16, choices=PLANS, default=FREE_PLAN,
help_text=_("What plan your organization is on"))
plan_start = models.DateTimeField(verbose_name=_("Plan Start"), auto_now_add=True,
help_text=_("When the user switched to this plan"))
stripe_customer = models.CharField(verbose_name=_("Stripe Customer"), max_length=32, null=True, blank=True,
help_text=_("Our Stripe customer id for your organization"))
administrators = models.ManyToManyField(User, verbose_name=_("Administrators"), related_name="org_admins",
help_text=_("The administrators in your organization"))
viewers = models.ManyToManyField(User, verbose_name=_("Viewers"), related_name="org_viewers",
help_text=_("The viewers in your organization"))
editors = models.ManyToManyField(User, verbose_name=_("Editors"), related_name="org_editors",
help_text=_("The editors in your organization"))
surveyors = models.ManyToManyField(User, verbose_name=_("Surveyors"), related_name="org_surveyors",
help_text=_("The users can login via Android for your organization"))
language = models.CharField(verbose_name=_("Language"), max_length=64, null=True, blank=True,
choices=settings.LANGUAGES, help_text=_("The main language used by this organization"))
timezone = models.CharField(verbose_name=_("Timezone"), max_length=64)
date_format = models.CharField(verbose_name=_("Date Format"), max_length=1, choices=DATE_PARSING, default=DAYFIRST,
help_text=_("Whether day comes first or month comes first in dates"))
webhook = models.TextField(null=True, verbose_name=_("Webhook"),
help_text=_("Webhook endpoint and configuration"))
webhook_events = models.IntegerField(default=0, verbose_name=_("Webhook Events"),
help_text=_("Which type of actions will trigger webhook events."))
country = models.ForeignKey('locations.AdminBoundary', null=True, blank=True, on_delete=models.SET_NULL,
help_text="The country this organization should map results for.")
msg_last_viewed = models.DateTimeField(verbose_name=_("Message Last Viewed"), auto_now_add=True)
flows_last_viewed = models.DateTimeField(verbose_name=_("Flows Last Viewed"), auto_now_add=True)
config = models.TextField(null=True, verbose_name=_("Configuration"),
help_text=_("More Organization specific configuration"))
slug = models.SlugField(verbose_name=_("Slug"), max_length=255, null=True, blank=True, unique=True,
error_messages=dict(unique=_("This slug is not available")))
is_anon = models.BooleanField(default=False,
help_text=_("Whether this organization anonymizes the phone numbers of contacts within it"))
primary_language = models.ForeignKey('orgs.Language', null=True, blank=True, related_name='orgs',
help_text=_('The primary language will be used for contacts with no language preference.'),
on_delete=models.SET_NULL)
brand = models.CharField(max_length=128, default=settings.DEFAULT_BRAND, verbose_name=_("Brand"),
help_text=_("The brand used in emails"))
surveyor_password = models.CharField(null=True, max_length=128, default=None,
help_text=_('A password that allows users to register as surveyors'))
parent = models.ForeignKey('orgs.Org', null=True, blank=True, help_text=_('The parent org that manages this org'))
multi_org = models.BooleanField(default=False, help_text=_('Put this org on the multi org level'))
@classmethod
def get_unique_slug(cls, name):
slug = slugify(name)
unique_slug = slug
if unique_slug:
existing = Org.objects.filter(slug=unique_slug).exists()
count = 2
while existing:
unique_slug = "%s-%d" % (slug, count)
existing = Org.objects.filter(slug=unique_slug).exists()
count += 1
return unique_slug
def create_sub_org(self, name, timezone=None, created_by=None):
if self.is_multi_org_level() and not self.parent:
if not timezone:
timezone = self.timezone
if not created_by:
created_by = self.created_by
# generate a unique slug
slug = Org.get_unique_slug(name)
org = Org.objects.create(name=name, timezone=timezone, brand=self.brand, parent=self, slug=slug,
created_by=created_by, modified_by=created_by)
org.administrators.add(created_by)
# initialize our org, but without any credits
org.initialize(brand=org.get_branding(), topup_size=0)
return org
def get_branding(self):
from temba.middleware import BrandingMiddleware
return BrandingMiddleware.get_branding_for_host(self.brand)
def lock_on(self, lock, qualifier=None):
"""
Creates the requested type of org-level lock
"""
r = get_redis_connection()
lock_key = ORG_LOCK_KEY % (self.pk, lock.name)
if qualifier:
lock_key += (":%s" % qualifier)
return r.lock(lock_key, ORG_LOCK_TTL)
def has_contacts(self):
"""
Gets whether this org has any contacts
"""
from temba.contacts.models import ContactGroup
counts = ContactGroup.get_system_group_counts(self, (ContactGroup.TYPE_ALL, ContactGroup.TYPE_BLOCKED))
return (counts[ContactGroup.TYPE_ALL] + counts[ContactGroup.TYPE_BLOCKED]) > 0
def has_messages(self):
"""
Gets whether this org has any messages (or calls)
"""
from temba.msgs.models import SystemLabel
msg_counts = SystemLabel.get_counts(self, (SystemLabel.TYPE_INBOX,
SystemLabel.TYPE_OUTBOX,
SystemLabel.TYPE_CALLS))
return (msg_counts[SystemLabel.TYPE_INBOX] +
msg_counts[SystemLabel.TYPE_OUTBOX] +
msg_counts[SystemLabel.TYPE_CALLS]) > 0
def update_caches(self, event, entity):
"""
Update org-level caches in response to an event
"""
r = get_redis_connection()
if event in [OrgEvent.topup_new, OrgEvent.topup_updated]:
r.delete(ORG_CREDITS_TOTAL_CACHE_KEY % self.pk)
r.delete(ORG_CREDITS_PURCHASED_CACHE_KEY % self.pk)
r.delete(ORG_ACTIVE_TOPUP_KEY % self.pk)
r.delete(ORG_CREDIT_EXPIRING_CACHE_KEY % self.pk)
r.delete(ORG_LOW_CREDIT_THRESHOLD_CACHE_KEY % self.pk)
for topup in self.topups.all():
r.delete(ORG_ACTIVE_TOPUP_REMAINING % (self.pk, topup.pk))
def clear_caches(self, caches):
"""
Clears the given cache types (currently just credits) for this org. Returns number of keys actually deleted
"""
if OrgCache.credits in caches:
r = get_redis_connection()
active_topup_keys = [ORG_ACTIVE_TOPUP_REMAINING % (self.pk, topup.pk) for topup in self.topups.all()]
return r.delete(ORG_CREDITS_TOTAL_CACHE_KEY % self.pk,
ORG_CREDITS_USED_CACHE_KEY % self.pk,
ORG_CREDITS_PURCHASED_CACHE_KEY % self.pk,
ORG_ACTIVE_TOPUP_KEY % self.pk,
**active_topup_keys)
else:
return 0
def set_status(self, status):
config = self.config_json()
config[ORG_STATUS] = status
self.config = json.dumps(config)
self.save(update_fields=['config'])
def set_suspended(self):
self.set_status(SUSPENDED)
def set_whitelisted(self):
self.set_status(WHITELISTED)
def set_restored(self):
self.set_status(RESTORED)
def is_suspended(self):
return self.config_json().get(ORG_STATUS, None) == SUSPENDED
def is_whitelisted(self):
return self.config_json().get(ORG_STATUS, None) == WHITELISTED
@transaction.atomic
def import_app(self, data, user, site=None):
from temba.flows.models import Flow
from temba.campaigns.models import Campaign
from temba.triggers.models import Trigger
# determine if this app is being imported from the same site
data_site = data.get('site', None)
same_site = False
# compare the hosts of the sites to see if they are the same
if data_site and site:
same_site = urlparse(data_site).netloc == urlparse(site).netloc
# see if our export needs to be updated
export_version = data.get('version', 0)
from temba.orgs.models import EARLIEST_IMPORT_VERSION, CURRENT_EXPORT_VERSION
if export_version < EARLIEST_IMPORT_VERSION:
raise ValueError(_("Unknown version (%s)" % data.get('version', 0)))
if export_version < CURRENT_EXPORT_VERSION:
from temba.flows.models import FlowRevision
data = FlowRevision.migrate_export(self, data, same_site, export_version)
# we need to import flows first, they will resolve to
# the appropriate ids and update our definition accordingly
Flow.import_flows(data, self, user, same_site)
Campaign.import_campaigns(data, self, user, same_site)
Trigger.import_triggers(data, self, user, same_site)
@classmethod
def export_definitions(cls, site_link, flows=[], campaigns=[], triggers=[]):
# remove any triggers that aren't included in our flows
flow_uuids = set([f.uuid for f in flows])
filtered_triggers = []
for trigger in triggers:
if trigger.flow.uuid in flow_uuids:
filtered_triggers.append(trigger)
triggers = filtered_triggers
exported_flows = []
for flow in flows:
# only export current versions
flow.ensure_current_version()
exported_flows.append(flow.as_json(expand_contacts=True))
exported_campaigns = []
for campaign in campaigns:
for flow in campaign.get_flows():
flows.add(flow)
exported_campaigns.append(campaign.as_json())
exported_triggers = []
for trigger in triggers:
exported_triggers.append(trigger.as_json())
return dict(version=CURRENT_EXPORT_VERSION,
site=site_link,
flows=exported_flows,
campaigns=exported_campaigns,
triggers=exported_triggers)
def config_json(self):
if self.config:
return json.loads(self.config)
else:
return dict()
def can_add_sender(self):
"""
If an org's telephone send channel is an Android device, let them add a bulk sender
"""
from temba.contacts.models import TEL_SCHEME
from temba.channels.models import ANDROID
send_channel = self.get_send_channel(TEL_SCHEME)
return send_channel and send_channel.channel_type == ANDROID
def can_add_caller(self):
return not self.supports_ivr() and self.is_connected_to_twilio()
def supports_ivr(self):
return self.get_call_channel() or self.get_answer_channel()
def get_channel(self, scheme, country_code, role):
"""
Gets a channel for this org which supports the given scheme and role
"""
from temba.channels.models import SEND, CALL
channel = self.channels.filter(is_active=True, scheme=scheme, role__contains=role).order_by('-pk')
if country_code:
channel = channel.filter(country=country_code)
channel = channel.first()
# no channel? try without country
if not channel and country_code:
channel = self.channels.filter(is_active=True, scheme=scheme,
role__contains=role).order_by('-pk').first()
if channel and (role == SEND or role == CALL):
return channel.get_delegate(role)
else:
return channel
def get_channel_for_role(self, role, scheme=None, contact_urn=None, country_code=None):
from temba.contacts.models import TEL_SCHEME
from temba.channels.models import SEND
from temba.contacts.models import ContactURN
if not scheme and not contact_urn:
raise ValueError("Must specify scheme or contact URN")
if contact_urn:
if contact_urn:
scheme = contact_urn.scheme
# if URN has a previously used channel that is still active, use that
if contact_urn.channel and contact_urn.channel.is_active and role == SEND:
previous_sender = self.get_channel_delegate(contact_urn.channel, role)
if previous_sender:
return previous_sender
if scheme == TEL_SCHEME:
path = contact_urn.path
# we don't have a channel for this contact yet, let's try to pick one from the same carrier
# we need at least one digit to overlap to infer a channel
contact_number = path.strip('+')
prefix = 1
channel = None
# try to use only a channel in the same country
if not country_code:
country_code = ContactURN.derive_country_from_tel(path)
channels = []
if country_code:
for c in self.channels.all():
if c.country == country_code:
channels.append(c)
# no country specific channel, try to find any channel at all
if not channels:
channels = [c for c in self.channels.all()]
# filter based on role and activity (we do this in python as channels can be prefetched so it is quicker in those cases)
senders = []
for c in channels:
if c.is_active and c.address and role in c.role and not c.parent_id:
senders.append(c)
senders.sort(key=lambda chan: chan.id)
# if we have more than one match, find the one with the highest overlap
if len(senders) > 1:
for sender in senders:
channel_number = sender.address.strip('+')
for idx in range(prefix, len(channel_number)):
if idx >= prefix and channel_number[0:idx] == contact_number[0:idx]:
prefix = idx
channel = sender
else:
break
elif senders:
channel = senders[0]
if channel:
if role == SEND:
return self.get_channel_delegate(channel, SEND)
else:
return channel
# get any send channel without any country or URN hints
return self.get_channel(scheme, country_code, role)
def get_send_channel(self, scheme=None, contact_urn=None, country_code=None):
from temba.channels.models import SEND
return self.get_channel_for_role(SEND, scheme=scheme, contact_urn=contact_urn, country_code=country_code)
def get_receive_channel(self, scheme, contact_urn=None, country_code=None):
from temba.channels.models import RECEIVE
return self.get_channel_for_role(RECEIVE, scheme=scheme, contact_urn=contact_urn, country_code=country_code)
def get_call_channel(self, contact_urn=None, country_code=None):
from temba.contacts.models import TEL_SCHEME
from temba.channels.models import CALL
return self.get_channel_for_role(CALL, scheme=TEL_SCHEME, contact_urn=contact_urn, country_code=country_code)
def get_answer_channel(self, contact_urn=None, country_code=None):
from temba.contacts.models import TEL_SCHEME
from temba.channels.models import ANSWER
return self.get_channel_for_role(ANSWER, scheme=TEL_SCHEME, contact_urn=contact_urn, country_code=country_code)
def get_channel_delegate(self, channel, role):
"""
Gets a channel's delegate for the given role with caching on the org object
"""
cache_attr = '__%d__delegate_%s' % (channel.id, role)
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
delegate = channel.get_delegate(role)
setattr(self, cache_attr, delegate)
return delegate
def get_schemes(self, role):
"""
Gets all URN schemes which this org has org has channels configured for
"""
cache_attr = '__schemes__%s' % role
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
schemes = set()
for channel in self.channels.filter(is_active=True, role__contains=role):
schemes.add(channel.scheme)
setattr(self, cache_attr, schemes)
return schemes
def normalize_contact_tels(self):
"""
Attempts to normalize any contacts which don't have full e164 phone numbers
"""
from temba.contacts.models import ContactURN, TEL_SCHEME
# do we have an org-level country code? if so, try to normalize any numbers not starting with +
country_code = self.get_country_code()
if country_code:
urns = ContactURN.objects.filter(org=self, scheme=TEL_SCHEME).exclude(path__startswith="+")
for urn in urns:
urn.ensure_number_normalization(country_code)
def get_resthooks(self):
"""
Returns the resthooks configured on this Org
"""
return self.resthooks.filter(is_active=True).order_by('slug')
def get_webhook_url(self):
"""
Returns a string with webhook url.
"""
return json.loads(self.webhook).get('url') if self.webhook else None
def get_webhook_headers(self):
"""
Returns a dictionary of any webhook headers, e.g.:
{'Authorization': 'Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==',
'X-My-Special-Header': 'woo'}
"""
return json.loads(self.webhook).get('headers', dict()) if self.webhook else dict()
def get_channel_countries(self):
channel_countries = []
if not self.is_connected_to_transferto():
return channel_countries
channel_country_codes = self.channels.filter(is_active=True).exclude(country=None)
channel_country_codes = channel_country_codes.values_list('country', flat=True).distinct()
for country_code in channel_country_codes:
country_obj = pycountry.countries.get(alpha2=country_code)
country_name = country_obj.name
currency = currency_for_country(country_code)
channel_countries.append(dict(code=country_code, name=country_name, currency_code=currency.letter,
currency_name=currency.name))
return sorted(channel_countries, key=lambda k: k['name'])
@classmethod
def get_possible_countries(cls):
return AdminBoundary.objects.filter(level=0).order_by('name')
def trigger_send(self, msgs=None):
"""
Triggers either our Android channels to sync, or for all our pending messages to be queued
to send.
"""
from temba.msgs.models import Msg
from temba.channels.models import Channel, ANDROID
# if we have msgs, then send just those
if msgs:
ids = [m.id for m in msgs]
# trigger syncs for our android channels
for channel in self.channels.filter(is_active=True, channel_type=ANDROID, msgs__id__in=ids):
channel.trigger_sync()
# and send those messages
Msg.send_messages(msgs)
# otherwise, sync all pending messages and channels
else:
for channel in self.channels.filter(is_active=True, channel_type=ANDROID):
channel.trigger_sync()
# otherwise, send any pending messages on our channels
r = get_redis_connection()
key = 'trigger_send_%d' % self.pk
# only try to send all pending messages if nobody is doing so already
if not r.get(key):
with r.lock(key, timeout=900):
pending = Channel.get_pending_messages(self)
Msg.send_messages(pending)
def has_airtime_transfers(self):
from temba.airtime.models import AirtimeTransfer
return AirtimeTransfer.objects.filter(org=self).exists()
def connect_transferto(self, account_login, airtime_api_token, user):
transferto_config = {TRANSFERTO_ACCOUNT_LOGIN: account_login.strip(),
TRANSFERTO_AIRTIME_API_TOKEN: airtime_api_token.strip()}
config = self.config_json()
config.update(transferto_config)
self.config = json.dumps(config)
self.modified_by = user
self.save()
def is_connected_to_transferto(self):
if self.config:
config = self.config_json()
transferto_account_login = config.get(TRANSFERTO_ACCOUNT_LOGIN, None)
transferto_airtime_api_token = config.get(TRANSFERTO_AIRTIME_API_TOKEN, None)
return transferto_account_login and transferto_airtime_api_token
else:
return False
def remove_transferto_account(self, user):
if self.config:
config = self.config_json()
config[TRANSFERTO_ACCOUNT_LOGIN] = ''
config[TRANSFERTO_AIRTIME_API_TOKEN] = ''
self.config = json.dumps(config)
self.modified_by = user
self.save()
def connect_nexmo(self, api_key, api_secret, user):
nexmo_uuid = str(uuid4())
nexmo_config = {NEXMO_KEY: api_key.strip(), NEXMO_SECRET: api_secret.strip(), NEXMO_UUID: nexmo_uuid}
config = self.config_json()
config.update(nexmo_config)
self.config = json.dumps(config)
self.modified_by = user
self.save()
# clear all our channel configurations
self.clear_channel_caches()
def nexmo_uuid(self):
config = self.config_json()
return config.get(NEXMO_UUID, None)
def connect_twilio(self, account_sid, account_token, user):
client = TwilioRestClient(account_sid, account_token)
app_name = "%s/%d" % (settings.TEMBA_HOST.lower(), self.pk)
apps = client.applications.list(friendly_name=app_name)
if apps:
temba_app = apps[0]
else:
app_url = "https://" + settings.TEMBA_HOST + "%s" % reverse('handlers.twilio_handler')
# the the twiml to run when the voice app fails
fallback_url = "https://" + settings.AWS_BUCKET_DOMAIN + "/voice_unavailable.xml"
temba_app = client.applications.create(friendly_name=app_name,
voice_url=app_url,
voice_fallback_url=fallback_url,
voice_fallback_method='GET',
sms_url=app_url,
sms_method="POST")
application_sid = temba_app.sid
twilio_config = {ACCOUNT_SID: account_sid, ACCOUNT_TOKEN: account_token, APPLICATION_SID: application_sid}
config = self.config_json()
config.update(twilio_config)
self.config = json.dumps(config)
self.modified_by = user
self.save()
# clear all our channel configurations
self.clear_channel_caches()
def is_connected_to_nexmo(self):
if self.config:
config = self.config_json()
nexmo_key = config.get(NEXMO_KEY, None)
nexmo_secret = config.get(NEXMO_SECRET, None)
nexmo_uuid = config.get(NEXMO_UUID, None)
return nexmo_key and nexmo_secret and nexmo_uuid
else:
return False
def is_connected_to_twilio(self):
if self.config:
config = self.config_json()
account_sid = config.get(ACCOUNT_SID, None)
account_token = config.get(ACCOUNT_TOKEN, None)
application_sid = config.get(APPLICATION_SID, None)
if account_sid and account_token and application_sid:
return True
return False
def remove_nexmo_account(self, user):
if self.config:
config = self.config_json()
config[NEXMO_KEY] = ''
config[NEXMO_SECRET] = ''
self.config = json.dumps(config)
self.modified_by = user
self.save()
# release any nexmo channels
from temba.channels.models import NEXMO
channels = self.channels.filter(is_active=True, channel_type=NEXMO)
for channel in channels:
channel.release()
# clear all our channel configurations
self.clear_channel_caches()
def remove_twilio_account(self, user):
if self.config:
config = self.config_json()
config[ACCOUNT_SID] = ''
config[ACCOUNT_TOKEN] = ''
config[APPLICATION_SID] = ''
self.config = json.dumps(config)
self.modified_by = user
self.save()
# release any twilio channels
from temba.channels.models import TWILIO
channels = self.channels.filter(is_active=True, channel_type=TWILIO)
for channel in channels:
channel.release()
# clear all our channel configurations
self.clear_channel_caches()
def get_verboice_client(self):
from temba.ivr.clients import VerboiceClient
channel = self.get_call_channel()
from temba.channels.models import VERBOICE
if channel.channel_type == VERBOICE:
return VerboiceClient(channel)
return None
def get_twilio_client(self):
config = self.config_json()
from temba.ivr.clients import TwilioClient
if config:
account_sid = config.get(ACCOUNT_SID, None)
auth_token = config.get(ACCOUNT_TOKEN, None)
if account_sid and auth_token:
return TwilioClient(account_sid, auth_token, org=self)
return None
def get_nexmo_client(self):
config = self.config_json()
if config:
api_key = config.get(NEXMO_KEY, None)
api_secret = config.get(NEXMO_SECRET, None)
if api_key and api_secret:
return NexmoClient(api_key, api_secret)
return None
def clear_channel_caches(self):
"""
Clears any cached configurations we have for any of our channels.
"""
from temba.channels.models import Channel
for channel in self.channels.exclude(channel_type='A'):
Channel.clear_cached_channel(channel.pk)
def get_country_code(self):
"""
Gets the 2-digit country code, e.g. RW, US
"""
# first try the actual country field
if self.country:
try:
country = pycountry.countries.get(name=self.country.name)
if country:
return country.alpha2
except KeyError:
# pycountry blows up if we pass it a country name it doesn't know
pass
# if that isn't set and we only have have one country set for our channels, use that
countries = self.channels.filter(is_active=True).exclude(country=None).order_by('country')
countries = countries.distinct('country').values_list('country', flat=True)
if len(countries) == 1:
return countries[0]
return None
def get_language_codes(self):
return get_cacheable_attr(self, '_language_codes', lambda: {l.iso_code for l in self.languages.all()})
def set_languages(self, user, iso_codes, primary):
"""
Sets languages for this org, creating and deleting language objects as necessary
"""
for iso_code in iso_codes:
name = languages.get_language_name(iso_code)
language = self.languages.filter(iso_code=iso_code).first()
# if it's valid and doesn't exist yet, create it
if name and not language:
language = self.languages.create(iso_code=iso_code, name=name, created_by=user, modified_by=user)
if iso_code == primary:
self.primary_language = language
self.save(update_fields=('primary_language',))
# unset the primary language if not in the new list of codes
if self.primary_language and self.primary_language.iso_code not in iso_codes:
self.primary_language = None
self.save(update_fields=('primary_language',))
# remove any languages that are not in the new list
self.languages.exclude(iso_code__in=iso_codes).delete()
if hasattr(self, '_language_codes'): # invalidate language cache if set
delattr(self, '_language_codes')
def get_dayfirst(self):
return self.date_format == DAYFIRST
def get_tzinfo(self):
return pytz.timezone(self.timezone)
def format_date(self, datetime, show_time=True):
"""
Formats a datetime with or without time using this org's date format
"""
formats = get_datetime_format(self.get_dayfirst())
format = formats[1] if show_time else formats[0]
return datetime_to_str(datetime, format, False, self.get_tzinfo())
def parse_date(self, date_string):
if isinstance(date_string, datetime):
return date_string
return str_to_datetime(date_string, self.get_tzinfo(), self.get_dayfirst())
def parse_decimal(self, decimal_string):
parsed = None
try:
parsed = Decimal(decimal_string)
if not parsed.is_finite() or parsed > Decimal('999999999999999999999999'):
parsed = None
except Exception:
pass
return parsed
def generate_location_query(self, name, level, is_alias=False):
if is_alias:
query = dict(name__iexact=name, boundary__level=level)
query['__'.join(['boundary'] + ['parent'] * level)] = self.country
else:
query = dict(name__iexact=name, level=level)
query['__'.join(['parent'] * level)] = self.country
return query
def find_boundary_by_name(self, name, level, parent):
"""
Finds the boundary with the passed in name or alias on this organization at the stated level.
@returns Iterable of matching boundaries
"""
# first check if we have a direct name match
if parent:
boundary = parent.children.filter(name__iexact=name, level=level)
else:
query = self.generate_location_query(name, level)
boundary = AdminBoundary.objects.filter(**query)
# not found by name, try looking up by alias
if not boundary:
if parent:
alias = BoundaryAlias.objects.filter(name__iexact=name, boundary__level=level,
boundary__parent=parent).first()
else:
query = self.generate_location_query(name, level, True)
alias = BoundaryAlias.objects.filter(**query).first()
if alias:
boundary = [alias.boundary]
return boundary
def parse_location(self, location_string, level, parent=None):
"""
Attempts to parse the passed in location string at the passed in level. This does various tokenizing
of the string to try to find the best possible match.
@returns Iterable of matching boundaries
"""
# no country? bail
if not self.country or not isinstance(location_string, basestring):
return []
# now look up the boundary by full name
boundary = self.find_boundary_by_name(location_string, level, parent)
if not boundary:
# try removing punctuation and try that
bare_name = regex.sub(r"\W+", " ", location_string, flags=regex.UNICODE | regex.V0).strip()
boundary = self.find_boundary_by_name(bare_name, level, parent)
# if we didn't find it, tokenize it
if not boundary:
words = regex.split(r"\W+", location_string.lower(), flags=regex.UNICODE | regex.V0)
if len(words) > 1:
for word in words:
boundary = self.find_boundary_by_name(word, level, parent)
if not boundary:
break
if not boundary:
# still no boundary? try n-gram of 2
for i in range(0, len(words) - 1):
bigram = " ".join(words[i:i + 2])
boundary = self.find_boundary_by_name(bigram, level, parent)
if boundary:
break
return boundary
def get_org_admins(self):
return self.administrators.all()
def get_org_editors(self):
return self.editors.all()
def get_org_viewers(self):
return self.viewers.all()
def get_org_surveyors(self):
return self.surveyors.all()
def get_org_users(self):
org_users = self.get_org_admins() | self.get_org_editors() | self.get_org_viewers() | self.get_org_surveyors()
return org_users.distinct().order_by('email')
def latest_admin(self):
admin = self.get_org_admins().last()
# no admins? try editors
if not admin:
admin = self.get_org_editors().last()
# no editors? try viewers
if not admin:
admin = self.get_org_viewers().last()
return admin
def is_free_plan(self):
return self.plan == FREE_PLAN or self.plan == TRIAL_PLAN
def is_multi_user_level(self):
return self.get_purchased_credits() >= settings.MULTI_USER_THRESHOLD
def is_multi_org_level(self):
return not self.parent and (self.multi_org or self.get_purchased_credits() >= settings.MULTI_ORG_THRESHOLD)
def has_added_credits(self):
return self.get_credits_total() > WELCOME_TOPUP_SIZE
def get_user_org_group(self, user):
if user in self.get_org_admins():
user._org_group = Group.objects.get(name="Administrators")
elif user in self.get_org_editors():
user._org_group = Group.objects.get(name="Editors")
elif user in self.get_org_viewers():
user._org_group = Group.objects.get(name="Viewers")
elif user in self.get_org_surveyors():
user._org_group = Group.objects.get(name="Surveyors")
elif user.is_staff:
user._org_group = Group.objects.get(name="Administrators")
else:
user._org_group = None
return getattr(user, '_org_group', None)
def has_twilio_number(self):
from temba.channels.models import TWILIO
return self.channels.filter(channel_type=TWILIO)
def has_nexmo_number(self):
from temba.channels.models import NEXMO
return self.channels.filter(channel_type=NEXMO)
def create_welcome_topup(self, topup_size=WELCOME_TOPUP_SIZE):
if topup_size:
return TopUp.create(self.created_by, price=0, credits=topup_size, org=self)
return None
def create_system_labels_and_groups(self):
"""
Creates our system labels and groups for this organization so that we can keep track of counts etc..
"""
from temba.contacts.models import ContactGroup
from temba.msgs.models import SystemLabel
SystemLabel.create_all(self)
self.all_groups.create(name='All Contacts', group_type=ContactGroup.TYPE_ALL,
created_by=self.created_by, modified_by=self.modified_by)
self.all_groups.create(name='Blocked Contacts', group_type=ContactGroup.TYPE_BLOCKED,
created_by=self.created_by, modified_by=self.modified_by)
self.all_groups.create(name='Failed Contacts', group_type=ContactGroup.TYPE_STOPPED,
created_by=self.created_by, modified_by=self.modified_by)
def create_sample_flows(self, api_url):
import json
# get our sample dir
filename = os.path.join(settings.STATICFILES_DIRS[0], 'examples', 'sample_flows.json')
# for each of our samples
with open(filename, 'r') as example_file:
example = example_file.read()
user = self.get_user()
if user:
# some some substitutions
org_example = example.replace("{{EMAIL}}", user.username)
org_example = org_example.replace("{{API_URL}}", api_url)
try:
self.import_app(json.loads(org_example), user)
except Exception:
import traceback
logger = logging.getLogger(__name__)
msg = 'Failed creating sample flows'
logger.error(msg, exc_info=True, extra=dict(definition=json.loads(org_example)))
traceback.print_exc()
def is_notified_of_mt_sms(self):
return self.webhook_events & MT_SMS_EVENTS > 0
def is_notified_of_mo_sms(self):
return self.webhook_events & MO_SMS_EVENTS > 0
def is_notified_of_mt_call(self):
return self.webhook_events & MT_CALL_EVENTS > 0
def is_notified_of_mo_call(self):
return self.webhook_events & MO_CALL_EVENTS > 0
def is_notified_of_alarms(self):
return self.webhook_events & ALARM_EVENTS > 0
def get_user(self):
return self.administrators.filter(is_active=True).first()
def get_credits_expiring_soon(self):
"""
Get the number of credits expiring in less than a month.
"""
return get_cacheable_result(ORG_CREDIT_EXPIRING_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL,
self._calculate_credits_expiring_soon)
def _calculate_credits_expiring_soon(self):
now = timezone.now()
one_month_period = now + timedelta(days=30)
expiring_topups_qs = self.topups.filter(is_active=True,
expires_on__lte=one_month_period).exclude(expires_on__lte=now)
used_credits = TopUpCredits.objects.filter(topup__in=expiring_topups_qs).aggregate(Sum('used')).get('used__sum')
expiring_topups_credits = expiring_topups_qs.aggregate(Sum('credits')).get('credits__sum')
more_valid_credits_qs = self.topups.filter(is_active=True, expires_on__gt=one_month_period)
more_valid_credits = more_valid_credits_qs.aggregate(Sum('credits')).get('credits__sum')
if more_valid_credits or not expiring_topups_credits:
return 0
return expiring_topups_credits - used_credits
def has_low_credits(self):
return self.get_credits_remaining() <= self.get_low_credits_threshold()
def get_low_credits_threshold(self):
"""
Get the credits number to consider as low threshold to this org
"""
return get_cacheable_result(ORG_LOW_CREDIT_THRESHOLD_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL,
self._calculate_low_credits_threshold)
def _calculate_low_credits_threshold(self):
now = timezone.now()
last_topup_credits = self.topups.filter(is_active=True, expires_on__gte=now).aggregate(Sum('credits')).get('credits__sum')
return int(last_topup_credits * 0.15) if last_topup_credits else 0
def get_credits_total(self, force_dirty=False):
"""
Gets the total number of credits purchased or assigned to this org
"""
return get_cacheable_result(ORG_CREDITS_TOTAL_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL,
self._calculate_credits_total, force_dirty=force_dirty)
def get_purchased_credits(self):
"""
Returns the total number of credits purchased
:return:
"""
return get_cacheable_result(ORG_CREDITS_PURCHASED_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL,
self._calculate_purchased_credits)
def _calculate_purchased_credits(self):
purchased_credits = self.topups.filter(is_active=True, price__gt=0).aggregate(Sum('credits')).get('credits__sum')
return purchased_credits if purchased_credits else 0
def _calculate_credits_total(self):
active_credits = self.topups.filter(is_active=True, expires_on__gte=timezone.now()).aggregate(Sum('credits')).get('credits__sum')
active_credits = active_credits if active_credits else 0
# these are the credits that have been used in expired topups
expired_credits = TopUpCredits.objects.filter(
topup__org=self, topup__is_active=True, topup__expires_on__lte=timezone.now()
).aggregate(Sum('used')).get('used__sum')
expired_credits = expired_credits if expired_credits else 0
return active_credits + expired_credits
def get_credits_used(self):
"""
Gets the number of credits used by this org
"""
return get_cacheable_result(ORG_CREDITS_USED_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL,
self._calculate_credits_used)
def _calculate_credits_used(self):
used_credits_sum = TopUpCredits.objects.filter(topup__org=self, topup__is_active=True)
used_credits_sum = used_credits_sum.aggregate(Sum('used')).get('used__sum')
used_credits_sum = used_credits_sum if used_credits_sum else 0
unassigned_sum = self.msgs.filter(contact__is_test=False, topup=None, purged=False).count()
return used_credits_sum + unassigned_sum
def _calculate_credit_caches(self):
"""
Calculates both our total as well as our active topup
"""
get_cacheable_result(ORG_CREDITS_TOTAL_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL,
self._calculate_credits_total, force_dirty=True)
get_cacheable_result(ORG_CREDITS_USED_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL,
self._calculate_credits_used, force_dirty=True)
def get_credits_remaining(self):
"""
Gets the number of credits remaining for this org
"""
return self.get_credits_total() - self.get_credits_used()
def allocate_credits(self, user, org, amount):
"""
Allocates credits to a sub org of the current org, but only if it
belongs to us and we have enough credits to do so.
"""
if org.parent == self or self.parent == org.parent or self.parent == org:
if self.get_credits_remaining() >= amount:
with self.lock_on(OrgLock.credits):
# now debit our account
debited = None
while amount or debited == 0:
# remove the credits from ourselves
(topup_id, debited) = self.decrement_credit(amount)
if topup_id:
topup = TopUp.objects.get(id=topup_id)
# create the topup for our child, expiring on the same date
new_topup = TopUp.create(user, credits=debited, org=org, expires_on=topup.expires_on, price=None)
# create a debit for transaction history
Debit.objects.create(topup_id=topup_id, amount=debited, beneficiary=new_topup,
debit_type=Debit.TYPE_ALLOCATION, created_by=user, modified_by=user)
# decrease the amount of credits we need
amount -= debited
else:
break
# recalculate our caches
self._calculate_credit_caches()
org._calculate_credit_caches()
return True
# couldn't allocate credits
return False
def decrement_credit(self, amount=1):
"""
Decrements this orgs credit by amount.
Determines the active topup and returns that along with how many credits we were able
to decrement it by. Amount decremented is not guaranteed to be the full amount requested.
"""
total_used_key = ORG_CREDITS_USED_CACHE_KEY % self.pk
incrby_existing(total_used_key, amount)
r = get_redis_connection()
active_topup_key = ORG_ACTIVE_TOPUP_KEY % self.pk
active_topup_pk = r.get(active_topup_key)
if active_topup_pk:
remaining_key = ORG_ACTIVE_TOPUP_REMAINING % (self.pk, int(active_topup_pk))
# decrement our active # of credits
remaining = r.decr(remaining_key, amount)
# near the edge? calculate our active topup from scratch
if not remaining or int(remaining) < 100:
active_topup_pk = None
# calculate our active topup if we need to
if active_topup_pk is None:
active_topup = self._calculate_active_topup()
if active_topup:
active_topup_pk = active_topup.pk
r.set(active_topup_key, active_topup_pk, ORG_CREDITS_CACHE_TTL)
# can only reduce as much as we have available
if active_topup.get_remaining() < amount:
amount = active_topup.get_remaining()
remaining_key = ORG_ACTIVE_TOPUP_REMAINING % (self.pk, active_topup_pk)
r.set(remaining_key, active_topup.get_remaining() - amount, ORG_CREDITS_CACHE_TTL)
return (active_topup_pk, amount)
def _calculate_active_topup(self):
"""
Calculates the oldest non-expired topup that still has credits
"""
non_expired_topups = self.topups.filter(is_active=True, expires_on__gte=timezone.now()).order_by('expires_on', 'id')
active_topups = non_expired_topups.annotate(used_credits=Sum('topupcredits__used'))\
.filter(credits__gt=0)\
.filter(Q(used_credits__lt=F('credits')) | Q(used_credits=None))
return active_topups.first()
def apply_topups(self):
"""
We allow users to receive messages even if they're out of credit. Once they re-add credit, this function
retro-actively applies topups to any messages or IVR actions that don't have a topup
"""
from temba.msgs.models import Msg
with self.lock_on(OrgLock.credits):
# get all items that haven't been credited
msg_uncredited = self.msgs.filter(topup=None, contact__is_test=False, purged=False).order_by('created_on')
all_uncredited = list(msg_uncredited)
# get all topups that haven't expired
unexpired_topups = list(self.topups.filter(is_active=True, expires_on__gte=timezone.now()).order_by('-expires_on'))
# dict of topups to lists of their newly assigned items
new_topup_items = {topup: [] for topup in unexpired_topups}
# assign topup with credits to items...
current_topup = None
current_topup_remaining = 0
for item in all_uncredited:
# find a topup with remaining credit
while current_topup_remaining <= 0:
if not unexpired_topups:
break
current_topup = unexpired_topups.pop()
current_topup_remaining = current_topup.credits - current_topup.get_used()
if current_topup_remaining:
# if we found some credit, assign the item to the current topup
new_topup_items[current_topup].append(item)
current_topup_remaining -= 1
else:
# if not, then stop processing items
break
# update items in the database with their new topups
for topup, items in new_topup_items.iteritems():
Msg.all_messages.filter(id__in=[item.pk for item in items if isinstance(item, Msg)]).update(topup=topup)
# deactive all our credit alerts
CreditAlert.reset_for_org(self)
def current_plan_start(self):
today = timezone.now().date()
# move it to the same day our plan started (taking into account short months)
plan_start = today.replace(day=min(self.plan_start.day, calendar.monthrange(today.year, today.month)[1]))
if plan_start > today:
plan_start -= relativedelta(months=1)
return plan_start
def current_plan_end(self):
plan_start = self.current_plan_start()
plan_end = plan_start + relativedelta(months=1)
return plan_end
def get_stripe_customer(self): # pragma: no cover
# We can't test stripe in unit tests since it requires javascript tokens to be generated
if not self.stripe_customer:
return None
try:
stripe.api_key = get_stripe_credentials()[1]
customer = stripe.Customer.retrieve(self.stripe_customer)
return customer
except Exception:
traceback.print_exc()
return None
def add_credits(self, bundle, token, user):
# look up our bundle
if bundle not in BUNDLE_MAP:
raise ValidationError(_("Invalid bundle: %s, cannot upgrade.") % bundle)
bundle = BUNDLE_MAP[bundle]
# adds credits to this org
stripe.api_key = get_stripe_credentials()[1]
# our actual customer object
customer = self.get_stripe_customer()
# 3 possible cases
# 1. we already have a stripe customer and the token matches it
# 2. we already have a stripe customer, but they have just added a new card, we need to use that one
# 3. we don't have a customer, so we need to create a new customer and use that card
# for our purposes, #1 and #2 are treated the same, we just always update the default card
try:
if not customer:
# then go create a customer object for this user
customer = stripe.Customer.create(card=token, email=user,
description="{ org: %d }" % self.pk)
stripe_customer = customer.id
self.stripe_customer = stripe_customer
self.save()
# update the stripe card to the one they just entered
else:
# remove existing cards
# TODO: this is all a bit wonky because we are using the Stripe JS widget..
# if we instead used on our mechanism to display / edit cards we could be a bit smarter
existing_cards = [c for c in customer.cards.all().data]
for card in existing_cards:
card.delete()
card = customer.cards.create(card=token)
customer.default_card = card.id
customer.save()
stripe_customer = customer.id
charge = stripe.Charge.create(amount=bundle['cents'],
currency='usd',
customer=stripe_customer,
description=bundle['description'])
remaining = self.get_credits_remaining()
# create our top up
topup = TopUp.create(user, price=bundle['cents'], credits=bundle['credits'],
stripe_charge=charge.id, org=self)
context = dict(description=bundle['description'],
charge_id=charge.id,
charge_date=timezone.now().strftime("%b %e, %Y"),
amount=bundle['dollars'],
credits=bundle['credits'],
remaining=remaining,
org=self.name,
cc_last4=charge.card.last4,
cc_type=charge.card.type,
cc_name=charge.card.name)
from temba.middleware import BrandingMiddleware
branding = BrandingMiddleware.get_branding_for_host(self.brand)
subject = _("%(name)s Receipt") % branding
template = "orgs/email/receipt_email"
to_email = user.email
context['customer'] = user
context['branding'] = branding
context['subject'] = subject
send_template_email(to_email, subject, template, context, branding)
# apply our new topups
self.apply_topups()
return topup
except Exception as e:
traceback.print_exc(e)
raise ValidationError(_("Sorry, we were unable to charge your card, please try again later or contact us."))
def account_value(self):
"""
How much has this org paid to date in dollars?
"""
paid = TopUp.objects.filter(org=self).aggregate(paid=Sum('price'))['paid']
if not paid:
paid = 0
return paid / 100
def update_plan(self, new_plan, token, user): # pragma: no cover
# We can't test stripe in unit tests since it requires javascript tokens to be generated
stripe.api_key = get_stripe_credentials()[1]
# no plan change? do nothing
if new_plan == self.plan:
return None
# this is our stripe customer id
stripe_customer = None
# our actual customer object
customer = self.get_stripe_customer()
if customer:
stripe_customer = customer.id
# cancel our plan on our stripe customer
if new_plan == FREE_PLAN:
if customer:
analytics.track(user.username, 'temba.plan_cancelled', dict(cancelledPlan=self.plan))
try:
subscription = customer.cancel_subscription(at_period_end=True)
except Exception as e:
traceback.print_exc(e)
raise ValidationError(_("Sorry, we are unable to cancel your plan at this time. Please contact us."))
else:
raise ValidationError(_("Sorry, we are unable to cancel your plan at this time. Please contact us."))
else:
# we have a customer, try to upgrade them
if customer:
try:
subscription = customer.update_subscription(plan=new_plan)
analytics.track(user.username, 'temba.plan_upgraded', dict(previousPlan=self.plan, plan=new_plan))
except Exception as e:
# can't load it, oh well, we'll try to create one dynamically below
traceback.print_exc(e)
customer = None
# if we don't have a customer, go create one
if not customer:
try:
# then go create a customer object for this user
customer = stripe.Customer.create(card=token, plan=new_plan, email=user,
description="{ org: %d }" % self.pk)
stripe_customer = customer.id
subscription = customer['subscription']
analytics.track(user.username, 'temba.plan_upgraded', dict(previousPlan=self.plan, plan=new_plan))
except Exception as e:
traceback.print_exc(e)
raise ValidationError(_("Sorry, we were unable to charge your card, please try again later or contact us."))
# update our org
self.stripe_customer = stripe_customer
if subscription['status'] != 'active':
self.plan = FREE_PLAN
else:
self.plan = new_plan
self.plan_start = datetime.fromtimestamp(subscription['start'])
self.save()
return subscription
def get_export_flows(self, include_archived=False):
from temba.flows.models import Flow
flows = self.flows.all().exclude(is_active=False).exclude(flow_type=Flow.MESSAGE).order_by('-modified_on')
if not include_archived:
flows = flows.filter(is_archived=False)
return flows
def get_recommended_channel(self):
from temba.channels.views import TWILIO_SEARCH_COUNTRIES
NEXMO_RECOMMEND_COUNTRIES = ['US', 'CA', 'GB', 'AU', 'AT', 'FI', 'DE', 'HK', 'HU',
'LT', 'NL', 'NO', 'PL', 'SE', 'CH', 'BE', 'ES', 'ZA']
countrycode = timezone_to_country_code(self.timezone)
recommended = 'android'
if countrycode in [country[0] for country in TWILIO_SEARCH_COUNTRIES]:
recommended = 'twilio'
elif countrycode in NEXMO_RECOMMEND_COUNTRIES:
recommended = 'nexmo'
elif countrycode == 'KE':
recommended = 'africastalking'
elif countrycode == 'ID':
recommended = 'hub9'
elif countrycode == 'SO':
recommended = 'shaqodoon'
elif countrycode == 'NP':
recommended = 'blackmyna'
elif countrycode == 'UG':
recommended = 'yo'
elif countrycode == 'PH':
recommended = 'globe'
return recommended
def increment_unread_msg_count(self, type):
"""
Increments our redis cache of how many unread messages exist for this org and type.
@param type: either UNREAD_INBOX_MSGS or UNREAD_FLOW_MSGS
"""
r = get_redis_connection()
r.hincrby(type, self.id, 1)
def get_unread_msg_count(self, msg_type):
"""
Gets the value of our redis cache of how many unread messages exist for this org and type.
@param msg_type: either UNREAD_INBOX_MSGS or UNREAD_FLOW_MSGS
"""
r = get_redis_connection()
count = r.hget(msg_type, self.id)
return 0 if count is None else int(count)
def clear_unread_msg_count(self, msg_type):
"""
Clears our redis cache of how many unread messages exist for this org and type.
@param msg_type: either UNREAD_INBOX_MSGS or UNREAD_FLOW_MSGS
"""
r = get_redis_connection()
r.hdel(msg_type, self.id)
def initialize(self, brand=None, topup_size=WELCOME_TOPUP_SIZE):
"""
Initializes an organization, creating all the dependent objects we need for it to work properly.
"""
from temba.middleware import BrandingMiddleware
if not brand:
brand = BrandingMiddleware.get_branding_for_host('')
self.create_system_labels_and_groups()
self.create_sample_flows(brand.get('api_link', ""))
self.create_welcome_topup(topup_size)
def save_media(self, file, extension):
"""
Saves the given file data with the extension and returns an absolute url to the result
"""
random_file = str(uuid4())
random_dir = random_file[0:4]
filename = '%s/%s' % (random_dir, random_file)
if extension:
filename = '%s.%s' % (filename, extension)
path = '%s/%d/media/%s' % (settings.STORAGE_ROOT_DIR, self.pk, filename)
location = default_storage.save(path, file)
return "https://%s/%s" % (settings.AWS_BUCKET_DOMAIN, location)
@classmethod
def create_user(cls, email, password):
user = User.objects.create_user(username=email, email=email, password=password)
return user
@classmethod
def get_org(cls, user):
if not user:
return None
if not hasattr(user, '_org'):
org = Org.objects.filter(administrators=user, is_active=True).first()
if org:
user._org = org
return getattr(user, '_org', None)
def __unicode__(self):
return self.name
# ===================== monkey patch User class with a few extra functions ========================
def get_user_orgs(user, brand=None):
org = user.get_org()
if not brand:
brand = org.brand if org else settings.DEFAULT_BRAND
if user.is_superuser:
return Org.objects.all()
user_orgs = user.org_admins.all() | user.org_editors.all() | user.org_viewers.all() | user.org_surveyors.all()
return user_orgs.filter(brand=brand).distinct().order_by('name')
def get_org(obj):
return getattr(obj, '_org', None)
def is_alpha_user(user):
return user.groups.filter(name='Alpha')
def is_beta_user(user):
return user.groups.filter(name='Beta')
def get_settings(user):
if not user:
return None
settings = UserSettings.objects.filter(user=user).first()
if not settings:
settings = UserSettings.objects.create(user=user)
return settings
def set_org(obj, org):
obj._org = org
def get_org_group(obj):
org_group = None
org = obj.get_org()
if org:
org_group = org.get_user_org_group(obj)
return org_group
def _user_has_org_perm(user, org, permission):
"""
Determines if a user has the given permission in this org
"""
if user.is_superuser:
return True
if user.is_anonymous():
return False
# has it innately? (customer support)
if user.has_perm(permission):
return True
org_group = org.get_user_org_group(user)
if not org_group:
return False
(app_label, codename) = permission.split(".")
return org_group.permissions.filter(content_type__app_label=app_label, codename=codename).exists()
User.get_org = get_org
User.set_org = set_org
User.is_alpha = is_alpha_user
User.is_beta = is_beta_user
User.get_settings = get_settings
User.get_user_orgs = get_user_orgs
User.get_org_group = get_org_group
User.has_org_perm = _user_has_org_perm
USER_GROUPS = (('A', _("Administrator")),
('E', _("Editor")),
('V', _("Viewer")),
('S', _("Surveyor")))
def get_stripe_credentials():
public_key = os.environ.get('STRIPE_PUBLIC_KEY', getattr(settings, 'STRIPE_PUBLIC_KEY', 'MISSING_STRIPE_PUBLIC_KEY'))
private_key = os.environ.get('STRIPE_PRIVATE_KEY', getattr(settings, 'STRIPE_PRIVATE_KEY', 'MISSING_STRIPE_PRIVATE_KEY'))
return (public_key, private_key)
class Language(SmartModel):
"""
A Language that has been added to the org. In the end and language is just an iso_code and name
and it is not really restricted to real-world languages at this level. Instead we restrict the
language selection options to real-world languages.
"""
name = models.CharField(max_length=128)
iso_code = models.CharField(max_length=4)
org = models.ForeignKey(Org, verbose_name=_("Org"), related_name="languages")
@classmethod
def create(cls, org, user, name, iso_code):
return cls.objects.create(org=org, name=name, iso_code=iso_code, created_by=user, modified_by=user)
def as_json(self):
return dict(name=self.name, iso_code=self.iso_code)
@classmethod
def get_localized_text(cls, text_translations, preferred_languages, default_text):
"""
Returns the appropriate translation to use.
:param text_translations: A dictionary (or plain text) which contains our message indexed by language iso code
:param preferred_languages: The prioritized list of language preferences (list of iso codes)
:param default_text: default text to use if no match is found
"""
# No translations, return our default text
if not text_translations:
return default_text
# If we are handed raw text without translations, just return that
if not isinstance(text_translations, dict):
return text_translations
# otherwise, find the first preferred language
for lang in preferred_languages:
localized = text_translations.get(lang, None)
if localized:
return localized
return default_text
def __unicode__(self):
return '%s' % self.name
class Invitation(SmartModel):
"""
An Invitation to an e-mail address to join an Org with specific roles.
"""
org = models.ForeignKey(Org, verbose_name=_("Org"), related_name="invitations",
help_text=_("The organization to which the account is invited to view"))
email = models.EmailField(verbose_name=_("Email"), help_text=_("The email to which we send the invitation of the viewer"))
secret = models.CharField(verbose_name=_("Secret"), max_length=64, unique=True,
help_text=_("a unique code associated with this invitation"))
host = models.CharField(max_length=32, help_text=_("The host this invitation was created on"))
user_group = models.CharField(max_length=1, choices=USER_GROUPS, default='V', verbose_name=_("User Role"))
@classmethod
def create(cls, org, user, email, user_group, host):
return cls.objects.create(org=org, email=email, user_group=user_group, host=host,
created_by=user, modified_by=user)
def save(self, *args, **kwargs):
if not self.secret:
secret = random_string(64)
while Invitation.objects.filter(secret=secret):
secret = random_string(64)
self.secret = secret
return super(Invitation, self).save(*args, **kwargs)
@classmethod
def generate_random_string(cls, length):
"""
Generates a [length] characters alpha numeric secret
"""
letters = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ" # avoid things that could be mistaken ex: 'I' and '1'
return ''.join([random.choice(letters) for _ in range(length)])
def send_invitation(self):
from .tasks import send_invitation_email_task
send_invitation_email_task(self.id)
def send_email(self):
# no=op if we do not know the email
if not self.email:
return
from temba.middleware import BrandingMiddleware
branding = BrandingMiddleware.get_branding_for_host(self.host)
subject = _("%(name)s Invitation") % branding
template = "orgs/email/invitation_email"
to_email = self.email
context = dict(org=self.org, now=timezone.now(), branding=branding, invitation=self)
context['subject'] = subject
send_template_email(to_email, subject, template, context, branding)
class UserSettings(models.Model):
"""
User specific configuration
"""
user = models.ForeignKey(User, related_name='settings')
language = models.CharField(max_length=8, choices=settings.LANGUAGES, default="en-us",
help_text=_('Your preferred language'))
tel = models.CharField(verbose_name=_("Phone Number"), max_length=16, null=True, blank=True,
help_text=_("Phone number for testing and recording voice flows"))
def get_tel_formatted(self):
if self.tel:
import phonenumbers
normalized = phonenumbers.parse(self.tel, None)
return phonenumbers.format_number(normalized, phonenumbers.PhoneNumberFormat.INTERNATIONAL)
class TopUp(SmartModel):
"""
TopUps are used to track usage across the platform. Each TopUp represents a certain number of
credits that can be consumed by messages.
"""
org = models.ForeignKey(Org, related_name='topups',
help_text="The organization that was toppped up")
price = models.IntegerField(null=True, blank=True, verbose_name=_("Price Paid"),
help_text=_("The price paid for the messages in this top up (in cents)"))
credits = models.IntegerField(verbose_name=_("Number of Credits"),
help_text=_("The number of credits bought in this top up"))
expires_on = models.DateTimeField(verbose_name=_("Expiration Date"),
help_text=_("The date that this top up will expire"))
stripe_charge = models.CharField(verbose_name=_("Stripe Charge Id"), max_length=32, null=True, blank=True,
help_text=_("The Stripe charge id for this charge"))
comment = models.CharField(max_length=255, null=True, blank=True,
help_text="Any comment associated with this topup, used when we credit accounts")
@classmethod
def create(cls, user, price, credits, stripe_charge=None, org=None, expires_on=None):
"""
Creates a new topup
"""
if not org:
org = user.get_org()
if not expires_on:
expires_on = timezone.now() + timedelta(days=365) # credits last 1 year
topup = TopUp.objects.create(org=org, price=price, credits=credits, expires_on=expires_on,
stripe_charge=stripe_charge, created_by=user, modified_by=user)
org.update_caches(OrgEvent.topup_new, topup)
return topup
def get_ledger(self):
debits = self.debits.filter(debit_type=Debit.TYPE_ALLOCATION).order_by('-created_by')
balance = self.credits
ledger = []
for debit in debits:
balance -= debit.amount
ledger.append(dict(date=debit.created_on,
comment=_('Transfer to %(org)s') % dict(org=debit.beneficiary.org.name),
amount=-debit.amount,
balance=balance))
now = timezone.now()
expired = self.expires_on < now
# add a line for used message credits
if self.get_remaining() < balance:
ledger.append(dict(date=self.expires_on if expired else now,
comment=_('Messaging credits used'),
amount=self.get_remaining() - balance,
balance=self.get_remaining()))
# add a line for expired credits
if expired and self.get_remaining() > 0:
ledger.append(dict(date=self.expires_on,
comment=_('Expired credits'),
amount=-self.get_remaining(),
balance=0))
return ledger
def get_price_display(self):
if self.price is None:
return ""
elif self.price == 0:
return _("Free")
return "$%.2f" % self.dollars()
def dollars(self):
if self.price == 0:
return 0
else:
return Decimal(self.price) / Decimal(100)
def revert_topup(self):
# unwind any items that were assigned to this topup
self.msgs.update(topup=None)
# mark this topup as inactive
self.is_active = False
self.save()
def get_stripe_charge(self):
try:
stripe.api_key = get_stripe_credentials()[1]
return stripe.Charge.retrieve(self.stripe_charge)
except Exception:
traceback.print_exc()
return None
def get_used(self):
"""
Calculates how many topups have actually been used
"""
used = TopUpCredits.objects.filter(topup=self).aggregate(used=Sum('used'))
return 0 if not used['used'] else used['used']
def get_remaining(self):
"""
Returns how many credits remain on this topup
"""
return self.credits - self.get_used()
def __unicode__(self):
return "%s Credits" % self.credits
class Debit(SmartModel):
"""
Transactional history of credits allocated to other topups or chunks of archived messages
"""
TYPE_ALLOCATION = 'A'
TYPE_PURGE = 'P'
DEBIT_TYPES = ((TYPE_ALLOCATION, 'Allocation'),
(TYPE_PURGE, 'Purge'))
topup = models.ForeignKey(TopUp, related_name="debits", help_text=_("The topup these credits are applied against"))
amount = models.IntegerField(help_text=_('How many credits were debited'))
beneficiary = models.ForeignKey(TopUp, null=True,
related_name="allocations",
help_text=_('Optional topup that was allocated with these credits'))
debit_type = models.CharField(max_length=1, choices=DEBIT_TYPES, null=False, help_text=_('What caused this debit'))
class TopUpCredits(models.Model):
"""
Used to track number of credits used on a topup, mostly maintained by triggers on Msg insertion.
"""
topup = models.ForeignKey(TopUp,
help_text=_("The topup these credits are being used against"))
used = models.IntegerField(help_text=_("How many credits were used, can be negative"))
LAST_SQUASH_KEY = 'last_topupcredits_squash'
@classmethod
def squash_credits(cls):
# get the id of the last count we squashed
r = get_redis_connection()
last_squash = r.get(TopUpCredits.LAST_SQUASH_KEY)
if not last_squash:
last_squash = 0
# get the unique flow ids for all new ones
squash_count = 0
for credits in TopUpCredits.objects.filter(id__gt=last_squash).order_by('topup_id').distinct('topup_id'):
# perform our atomic squash in SQL by calling our squash method
with connection.cursor() as c:
c.execute("SELECT temba_squash_topupcredits(%s);", (credits.topup_id,))
squash_count += 1
# insert our new top squashed id
max_id = TopUpCredits.objects.all().order_by('-id').first()
if max_id:
r.set(TopUpCredits.LAST_SQUASH_KEY, max_id.id)
class CreditAlert(SmartModel):
"""
Tracks when we have sent alerts to organization admins about low credits.
"""
ALERT_TYPES_CHOICES = ((ORG_CREDIT_OVER, _("Credits Over")),
(ORG_CREDIT_LOW, _("Low Credits")),
(ORG_CREDIT_EXPIRING, _("Credits expiring soon")))
org = models.ForeignKey(Org, help_text="The organization this alert was triggered for")
alert_type = models.CharField(max_length=1, choices=ALERT_TYPES_CHOICES,
help_text="The type of this alert")
@classmethod
def trigger_credit_alert(cls, org, alert_type):
# is there already an active alert at this threshold? if so, exit
if CreditAlert.objects.filter(is_active=True, org=org, alert_type=alert_type):
return None
print "triggering %s credits alert type for %s" % (alert_type, org.name)
admin = org.get_org_admins().first()
if admin:
# Otherwise, create our alert objects and trigger our event
alert = CreditAlert.objects.create(org=org, alert_type=alert_type,
created_by=admin, modified_by=admin)
alert.send_alert()
def send_alert(self):
from .tasks import send_alert_email_task
send_alert_email_task(self.id)
def send_email(self):
email = self.created_by.email
if not email:
return
from temba.middleware import BrandingMiddleware
branding = BrandingMiddleware.get_branding_for_host(self.org.brand)
subject = _("%(name)s Credits Alert") % branding
template = "orgs/email/alert_email"
to_email = email
context = dict(org=self.org, now=timezone.now(), branding=branding, alert=self, customer=self.created_by)
context['subject'] = subject
send_template_email(to_email, subject, template, context, branding)
@classmethod
def reset_for_org(cls, org):
CreditAlert.objects.filter(org=org).update(is_active=False)
@classmethod
def check_org_credits(cls):
from temba.msgs.models import Msg
# all active orgs in the last hour
active_orgs = Msg.current_messages.filter(created_on__gte=timezone.now() - timedelta(hours=1))
active_orgs = active_orgs.order_by('org').distinct('org')
for msg in active_orgs:
org = msg.org
# does this org have less than 0 messages?
org_remaining_credits = org.get_credits_remaining()
org_low_credits = org.has_low_credits()
org_credits_expiring = org.get_credits_expiring_soon()
if org_remaining_credits <= 0:
CreditAlert.trigger_credit_alert(org, ORG_CREDIT_OVER)
elif org_low_credits:
CreditAlert.trigger_credit_alert(org, ORG_CREDIT_LOW)
elif org_credits_expiring > 0:
CreditAlert.trigger_credit_alert(org, ORG_CREDIT_EXPIRING)
|
ewheeler/rapidpro
|
temba/orgs/models.py
|
Python
|
agpl-3.0
| 82,570
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Delivery Costs',
'version': '1.0',
'category': 'Sales Management',
'description': """
Allows you to add delivery methods in sale orders and picking.
==============================================================
You can define your own carrier and delivery grids for prices. When creating
invoices from picking, the system is able to add and compute the shipping line.
""",
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'delivery_view.xml',
'partner_view.xml',
'delivery_data.xml',
'views/report_shipping.xml',
'views/report_deliveryslip.xml'
],
'demo': ['delivery_demo.xml'],
'test': [
'../account/test/account_minimal_test.xml',
'test/delivery_cost.yml',
'test/stock_move_values_with_invoice_before_delivery.yml',
],
'installable': True,
'auto_install': False,
}
|
mkieszek/odoo
|
addons/delivery/__openerp__.py
|
Python
|
agpl-3.0
| 1,023
|
# -*- coding: utf-8 -*-
import APITaxi_models as models
from . import manager
from sqlalchemy import distinct
from geoalchemy2 import shape
from shapely import geometry, ops
import json
from operator import itemgetter
from flask import current_app
@manager.command
def update_zupc():
insee_list = map(itemgetter(0), current_app.extensions['sqlalchemy'].db\
.session.query(distinct(models.ADS.insee)).all())
for insee in insee_list:
zupc = models.ZUPC.query.filter_by(insee=insee).order_by(
models.ZUPC.id.desc()).first()
for ads in models.ADS.query.filter_by(insee=insee).all():
ads.zupc_id = zupc.id
current_app.extensions['sqlalchemy'].db.session.commit()
def add_zupc(wkb, insee, parent):
zupc = models.ZUPC.query.filter_by(insee=insee).first()
if not zupc:
zupc = models.ZUPC()
zupc.insee = insee
zupc.departement = parent.departement
zupc.nom = parent.nom
current_app.extensions['sqlalchemy'].db.session.add(zupc)
#This is the case in Paris and Lyon, but it's not important
zupc.shape = wkb
zupc.parent_id = parent.id
current_app.extensions['sqlalchemy'].db.session.add(zupc)
@manager.command
def load_zupc(zupc_path):
with open(zupc_path) as f:
for feature in json.load(f)['features']:
wkb = shape.from_shape(geometry.shape(feature['geometry']))
properties = feature['properties']
for p in properties:
parent = models.ZUPC.query.filter_by(insee=p).first()
if parent:
break
if not parent:
current_app.logger.error(
'Unable to get a insee code in : {}'.format(properties)
)
return
for insee in properties:
add_zupc(wkb, insee, parent)
current_app.extensions['sqlalchemy'].db.session.commit()
update_zupc()
@manager.command
def add_airport_zupc(zupc_file_path, insee):
if isinstance(insee, str) or isinstance(insee, unicode):
insee = [insee]
with open(zupc_file_path) as f_zupc:
geojson = json.load(f_zupc)
if geojson['type'] == 'FeatureCollection':
geojson = geojson['features'][0]
wkb_airport = geometry.shape(geojson['geometry'])
for i in insee:
parent = models.ZUPC.query.filter_by(insee=i).first()
if not parent:
current_app.logger.error('Unable to find parent ZUPC: {}'.format(
i)
)
current_app.logger.info('Begin to compute union')
l = [wkb_airport] + list(shape.to_shape(parent.shape).geoms)
wkb = geometry.MultiPolygon([ops.cascaded_union(l)])
current_app.logger.info('Finished to compute union')
add_zupc(shape.from_shape(wkb), i + 'A', parent)
current_app.extensions['sqlalchemy'].db.session.commit()
|
l-vincent-l/APITaxi
|
APITaxi/commands/load_zupc.py
|
Python
|
agpl-3.0
| 2,976
|
__copyright__ = "Copyright 2017 Birkbeck, University of London"
__author__ = "Martin Paul Eve & Andy Byers"
__license__ = "AGPL v3"
__maintainer__ = "Birkbeck Centre for Technology and Publishing"
from django.conf.urls import url
from reports import views
urlpatterns = [
# Editor URLs
url(r'^$', views.index, name='reports_index'),
url(r'^metrics/$', views.metrics, name='reports_metrics'),
url(r'^doiresolution/$', views.dois, name='reports_dois'),
]
|
BirkbeckCTP/janeway
|
src/reports/urls.py
|
Python
|
agpl-3.0
| 476
|
""" URLs for User Authentication """
from django.conf import settings
from django.conf.urls import include, url
from openedx.core.djangoapps.user_api.accounts import settings_views
from lms.djangoapps.philu_overrides.views import login_and_registration_form
from .views import login, deprecated
urlpatterns = [
# TODO this should really be declared in the user_api app
url(r'^account/settings$', settings_views.account_settings, name='account_settings'),
# TODO move contents of urls_common here once CMS no longer has its own login
url(r'', include('openedx.core.djangoapps.user_authn.urls_common')),
url(r'^account/finish_auth$', login.finish_auth, name='finish_auth'),
]
if settings.FEATURES.get('ENABLE_COMBINED_LOGIN_REGISTRATION'):
# Backwards compatibility with old URL structure, but serve the new views
urlpatterns += [
url(r'^login$', login_and_registration_form,
{'initial_mode': 'login'}, name="signin_user"),
url(r'^register$', login_and_registration_form,
{'initial_mode': 'register'}, name="register_user"),
url(r'^register/(?P<org_name>[^/]*)/(?P<admin_email>[^/]*)/$',
login_and_registration_form,
{'initial_mode': 'register'}, name="register_user"),
]
else:
# Serve the old views
urlpatterns += [
url(r'^login$', deprecated.signin_user, name='signin_user'),
url(r'^register$', deprecated.register_user, name='register_user'),
]
|
philanthropy-u/edx-platform
|
openedx/core/djangoapps/user_authn/urls.py
|
Python
|
agpl-3.0
| 1,485
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
from typing import List
from django.db.models.expressions import RawSQL, Subquery, OuterRef
from django.template.defaultfilters import yesno
from django.test import TestCase
from django.utils.translation import gettext_lazy as _
from assessments.tests.factories.score_responsible import ScoreResponsibleOfClassFactory
from attribution.business import attribution_charge_new
from attribution.models.enums.function import COORDINATOR
from attribution.tests.factories.attribution_charge_new import AttributionChargeNewFactory
from attribution.tests.factories.attribution_class import AttributionClassFactory
from attribution.tests.factories.attribution_new import AttributionNewFactory
from base.business.learning_unit_xls import DEFAULT_LEGEND_FILLS, SPACES, PROPOSAL_LINE_STYLES, \
prepare_proposal_legend_ws_data, _get_wrapped_cells, \
_get_font_rows, _get_attribution_line, _add_training_data, \
get_data_part1, _get_parameters_configurable_list, WRAP_TEXT_ALIGNMENT, HEADER_PROGRAMS, XLS_DESCRIPTION, \
get_data_part2, annotate_qs, learning_unit_titles_part1, prepare_xls_content, _get_attribution_detail, \
prepare_xls_content_with_attributions, BOLD_FONT, _prepare_titles, HEADER_TEACHERS, _get_class_score_responsibles
from base.business.learning_unit_xls import _get_col_letter
from base.business.learning_unit_xls import get_significant_volume
from base.models.entity_version import EntityVersion
from base.models.enums import education_group_categories
from base.models.enums import entity_type, organization_type
from base.models.enums import learning_component_year_type
from base.models.enums import learning_unit_year_periodicity
from base.models.enums import proposal_type, proposal_state
from base.models.enums.learning_unit_year_periodicity import PERIODICITY_TYPES
from base.models.learning_unit_year import LearningUnitYear, SQL_RECURSIVE_QUERY_EDUCATION_GROUP_TO_CLOSEST_TRAININGS
from base.tests.factories.academic_year import AcademicYearFactory
from base.tests.factories.business.learning_units import GenerateContainer
from base.tests.factories.campus import CampusFactory
from base.tests.factories.education_group_type import EducationGroupTypeFactory
from base.tests.factories.education_group_year import EducationGroupYearFactory
from base.tests.factories.entity_version import EntityVersionFactory
from base.tests.factories.external_learning_unit_year import ExternalLearningUnitYearFactory
from base.tests.factories.group_element_year import GroupElementYearFactory
from base.tests.factories.learning_component_year import LearningComponentYearFactory, \
LecturingLearningComponentYearFactory, PracticalLearningComponentYearFactory
from base.tests.factories.learning_container_year import LearningContainerYearFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFactory, LearningUnitYearFullFactory
from base.tests.factories.organization import OrganizationFactory
from base.tests.factories.person import PersonFactory
from base.tests.factories.proposal_learning_unit import ProposalLearningUnitFactory
from base.tests.factories.tutor import TutorFactory
from base.tests.factories.user import UserFactory
from education_group.tests.factories.group_year import GroupYearFactory
from learning_unit.tests.factories.learning_class_year import LearningClassYearFactory
from osis_common.document import xls_build
from program_management.tests.factories.education_group_version import \
ParticularTransitionEducationGroupVersionFactory, StandardEducationGroupVersionFactory
from program_management.tests.factories.element import ElementFactory
COL_TEACHERS_LETTER = 'L'
COL_PROGRAMS_LETTER = 'Z'
PARENT_PARTIAL_ACRONYM = 'LDROI'
PARENT_ACRONYM = 'LBIR'
PARENT_TITLE = 'TITLE 1'
ROOT_ACRONYM = 'DRTI'
VERSION_ACRONYM = 'CRIM'
ALL_COLUMNS_FOR_ATTRIBUTIONS_LIST = [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG'
]
class TestLearningUnitXls(TestCase):
@classmethod
def setUpTestData(cls):
cls.academic_year = AcademicYearFactory(year=2017)
cls.learning_container_luy1 = LearningContainerYearFactory(academic_year=cls.academic_year)
cls.learning_unit_yr_1 = LearningUnitYearFactory(academic_year=cls.academic_year,
learning_container_year=cls.learning_container_luy1,
credits=10)
cls.learning_unit_yr_1_element = ElementFactory(learning_unit_year=cls.learning_unit_yr_1)
cls.learning_container_luy_external = LearningContainerYearFactory(academic_year=cls.academic_year)
cls.learning_unit_yr_external = LearningUnitYearFactory(
academic_year=cls.academic_year,
learning_container_year=cls.learning_container_luy_external,
credits=10,
campus=CampusFactory(organization=OrganizationFactory(type=organization_type.MAIN))
)
cls.external_luy = ExternalLearningUnitYearFactory(learning_unit_year=cls.learning_unit_yr_external,
external_credits=15.25)
cls.learning_unit_yr_external_element = ElementFactory(learning_unit_year=cls.learning_unit_yr_external)
cls.learning_unit_yr_2 = LearningUnitYearFactory()
cls.proposal_creation_1 = ProposalLearningUnitFactory(
state=proposal_state.ProposalState.ACCEPTED.name,
type=proposal_type.ProposalType.CREATION.name,
)
cls.proposal_creation_2 = ProposalLearningUnitFactory(
state=proposal_state.ProposalState.ACCEPTED.name,
type=proposal_type.ProposalType.CREATION.name,
)
direct_parent_type = EducationGroupTypeFactory(name='Bachelor', category=education_group_categories.TRAINING)
cls.an_education_group_parent = EducationGroupYearFactory(academic_year=cls.academic_year,
education_group_type=direct_parent_type,
acronym=ROOT_ACRONYM)
cls.a_group_year_parent = GroupYearFactory(academic_year=cls.academic_year, acronym=ROOT_ACRONYM)
cls.a_group_year_parent_element = ElementFactory(group_year=cls.a_group_year_parent)
cls.standard_version = StandardEducationGroupVersionFactory(
offer=cls.an_education_group_parent, root_group=cls.a_group_year_parent
)
cls.group_element_child = GroupElementYearFactory(
parent_element=cls.a_group_year_parent_element,
child_element=cls.learning_unit_yr_1_element,
relative_credits=cls.learning_unit_yr_1.credits,
)
cls.group_element_child = GroupElementYearFactory(
parent_element=cls.a_group_year_parent_element,
child_element=cls.learning_unit_yr_external_element,
relative_credits=cls.learning_unit_yr_external.credits,
)
# Particular OF
cls.create_version(direct_parent_type)
cls.old_academic_year = AcademicYearFactory(year=datetime.date.today().year - 2)
cls.current_academic_year = AcademicYearFactory(year=datetime.date.today().year)
generatorContainer = GenerateContainer(cls.old_academic_year, cls.current_academic_year)
cls.learning_unit_year_with_entities = generatorContainer.generated_container_years[0].learning_unit_year_full
entities = [
EntityVersionFactory(
start_date=datetime.datetime(1900, 1, 1),
end_date=None,
entity_type=entity_type.FACULTY,
entity__organization__type=organization_type.MAIN
) for _ in range(4)
]
cls.learning_unit_year_with_entities.entity_requirement = entities[0]
cls.learning_unit_year_with_entities.entity_allocation = entities[1]
cls.proposal_creation_3 = ProposalLearningUnitFactory(
learning_unit_year=cls.learning_unit_year_with_entities,
state=proposal_state.ProposalState.ACCEPTED.name,
type=proposal_type.ProposalType.CREATION.name,
)
cls.learning_container_luy = LearningContainerYearFactory(academic_year=cls.academic_year)
cls.luy_with_attribution = LearningUnitYearFactory(academic_year=cls.academic_year,
learning_container_year=cls.learning_container_luy,
periodicity=learning_unit_year_periodicity.ANNUAL,
status=True,
language=None,
)
cls.luy_with_attribution.entity_requirement = entities[0]
cls.luy_with_attribution.entity_allocation = entities[1]
cls.component_lecturing = LearningComponentYearFactory(
learning_unit_year=cls.luy_with_attribution,
type=learning_component_year_type.LECTURING,
hourly_volume_total_annual=15,
hourly_volume_partial_q1=10,
hourly_volume_partial_q2=5,
planned_classes=1
)
cls.component_practical = LearningComponentYearFactory(
learning_unit_year=cls.luy_with_attribution,
type=learning_component_year_type.PRACTICAL_EXERCISES,
hourly_volume_total_annual=15,
hourly_volume_partial_q1=10,
hourly_volume_partial_q2=5,
planned_classes=1
)
a_person_tutor_1 = PersonFactory(last_name='Dupuis', first_name='Tom', email="dupuis@gmail.com")
cls.a_tutor_1 = TutorFactory(person=a_person_tutor_1)
cls.an_attribution_1 = AttributionNewFactory(
tutor=cls.a_tutor_1,
start_year=2017,
function=COORDINATOR
)
cls.attribution_charge_new_lecturing_1 = AttributionChargeNewFactory(
learning_component_year=cls.component_lecturing,
attribution=cls.an_attribution_1,
allocation_charge=15.0)
cls.attribution_charge_new_practical_1 = AttributionChargeNewFactory(
learning_component_year=cls.component_practical,
attribution=cls.an_attribution_1,
allocation_charge=5.0)
cls.a_tutor_2 = TutorFactory(person=PersonFactory(last_name='Maréchal', first_name='Didier'))
cls.an_attribution_2 = AttributionNewFactory(
tutor=cls.a_tutor_2,
start_year=2017
)
cls.attribution_charge_new_lecturing_2 = AttributionChargeNewFactory(
learning_component_year=cls.component_lecturing,
attribution=cls.an_attribution_2,
allocation_charge=15.0)
cls.attribution_charge_new_practical_2 = AttributionChargeNewFactory(
learning_component_year=cls.component_practical,
attribution=cls.an_attribution_2,
allocation_charge=5.0)
cls.entity_requirement = EntityVersion.objects.filter(
entity=OuterRef('learning_container_year__requirement_entity'),
).current(
OuterRef('academic_year__start_date')
).values('acronym')[:1]
cls.entity_allocation = EntityVersion.objects.filter(
entity=OuterRef('learning_container_year__allocation_entity'),
).current(
OuterRef('academic_year__start_date')
).values('acronym')[:1]
@classmethod
def create_version(cls, direct_parent_type):
cls.learning_unit_yr_version = LearningUnitYearFactory(
academic_year=cls.academic_year,
learning_container_year=LearningContainerYearFactory(academic_year=cls.academic_year),
credits=10
)
cls.learning_unit_yr_version_element = ElementFactory(learning_unit_year=cls.learning_unit_yr_version)
cls.an_education_group_parent_for_particular_version = EducationGroupYearFactory(
academic_year=cls.academic_year,
education_group_type=direct_parent_type,
acronym=VERSION_ACRONYM)
cls.a_group_year_parent_for_particular_version = GroupYearFactory(academic_year=cls.academic_year,
acronym=VERSION_ACRONYM)
cls.a_group_year_parent_element_for_particular_version = ElementFactory(
group_year=cls.a_group_year_parent_for_particular_version)
cls.particular_education_group_version = ParticularTransitionEducationGroupVersionFactory(
offer=cls.an_education_group_parent_for_particular_version,
root_group=cls.a_group_year_parent_for_particular_version)
cls.group_element_particular = GroupElementYearFactory(
parent_element=cls.a_group_year_parent_element_for_particular_version,
child_element=cls.learning_unit_yr_version_element,
relative_credits=15
)
def test_get_wrapped_cells_with_teachers_and_programs(self):
styles = _get_wrapped_cells([self.learning_unit_yr_1, self.learning_unit_yr_2],
COL_TEACHERS_LETTER,
COL_PROGRAMS_LETTER)
self.assertCountEqual(styles, ['{}2'.format(COL_TEACHERS_LETTER),
'{}2'.format(COL_PROGRAMS_LETTER),
'{}3'.format(COL_TEACHERS_LETTER),
'{}3'.format(COL_PROGRAMS_LETTER)])
def test_get_wrapped_cells_with_teachers(self):
styles = _get_wrapped_cells([self.learning_unit_yr_1, self.learning_unit_yr_2], COL_TEACHERS_LETTER, None)
self.assertCountEqual(styles, ['{}2'.format(COL_TEACHERS_LETTER),
'{}3'.format(COL_TEACHERS_LETTER)])
def test_get_wrapped_cells_with_programs(self):
styles = _get_wrapped_cells([self.learning_unit_yr_1, self.learning_unit_yr_2], None, COL_PROGRAMS_LETTER)
self.assertCountEqual(styles, ['{}2'.format(COL_PROGRAMS_LETTER),
'{}3'.format(COL_PROGRAMS_LETTER)])
def test_get_col_letter(self):
title_searched = 'title 2'
titles = ['title 1', title_searched, 'title 3']
self.assertEqual(_get_col_letter(titles, title_searched), 'B')
self.assertIsNone(_get_col_letter(titles, 'whatever'))
def test_get_colored_rows(self):
self.assertEqual(_get_font_rows([self.learning_unit_yr_1,
self.learning_unit_yr_2,
self.proposal_creation_1.learning_unit_year,
self.proposal_creation_2.learning_unit_year]),
{PROPOSAL_LINE_STYLES.get(self.proposal_creation_1.type): [3, 4],
BOLD_FONT: [0]})
# self.assertEqual(param.get(xls_build.FONT_ROWS), {BOLD_FONT: [0]})
def test_get_attributions_line(self):
a_person = PersonFactory(last_name="Smith", first_name='Aaron')
attribution_dict = {
'LECTURING': 10,
'substitute': None,
'duration': 3,
'PRACTICAL_EXERCISES': 15,
'person': a_person,
'function': 'CO_HOLDER',
'start_year': self.academic_year
}
self.assertEqual(
_get_attribution_line(attribution_dict),
"{} - {} : {} - {} : {} - {} : {} - {} : {} - {} : {} - {} : {} ".format(
'SMITH, Aaron', _('Function'),
_('Co-holder'), _('Substitute'),
'', _('Beg. of attribution'),
self.academic_year, _('Attribution duration'),
3, _('Attrib. vol1'),
10, _('Attrib. vol2'),
15,
)
)
def test_get_significant_volume(self):
self.assertEqual(get_significant_volume(10), 10)
self.assertEqual(get_significant_volume(None), '')
self.assertEqual(get_significant_volume(0), '')
def test_prepare_legend_ws_data(self):
expected = {
xls_build.HEADER_TITLES_KEY: [str(_('Legend'))],
xls_build.CONTENT_KEY: [
[SPACES, _('Proposal of creation')],
[SPACES, _('Proposal for modification')],
[SPACES, _('Suppression proposal')],
[SPACES, _('Transformation proposal')],
[SPACES, _('Transformation/modification proposal')],
],
xls_build.WORKSHEET_TITLE_KEY: _('Legend'),
xls_build.STYLED_CELLS:
DEFAULT_LEGEND_FILLS
}
self.assertEqual(prepare_proposal_legend_ws_data(), expected)
def test_add_training_data(self):
luy_1 = LearningUnitYear.objects.filter(pk=self.learning_unit_yr_1.pk).annotate(
closest_trainings=RawSQL(SQL_RECURSIVE_QUERY_EDUCATION_GROUP_TO_CLOSEST_TRAININGS, ())
).get()
formations = _add_training_data(luy_1)
expected = "{} ({}) - {} - {}".format(
self.a_group_year_parent.partial_acronym,
"{0:.2f}".format(self.group_element_child.relative_credits),
self.a_group_year_parent.acronym,
self.a_group_year_parent.title_fr + ' [{}]'.format(self.standard_version.title_fr)
)
self.assertEqual(formations, expected)
def test_get_data_part1(self):
luy = self.proposal_creation_3.learning_unit_year
data = get_data_part1(learning_unit_yr=luy, effective_class=None, is_external_ue_list=False)
self.assertEqual(data[0], luy.acronym)
self.assertEqual(data[1], luy.academic_year.name)
self.assertEqual(data[2], luy.complete_title)
self.assertEqual(data[6], _(self.proposal_creation_1.type.title()))
self.assertEqual(data[7], _(self.proposal_creation_1.state.title()))
def test_get_parameters_configurable_list(self):
an_user = UserFactory()
titles = ['title1', 'title2']
learning_units = [self.learning_unit_yr_1, self.learning_unit_yr_2]
param = _get_parameters_configurable_list(learning_units, titles, an_user)
self.assertEqual(param.get(xls_build.DESCRIPTION), XLS_DESCRIPTION)
self.assertEqual(param.get(xls_build.USER), an_user.username)
self.assertEqual(param.get(xls_build.HEADER_TITLES), titles)
self.assertEqual(param.get(xls_build.ALIGN_CELLS), {WRAP_TEXT_ALIGNMENT: []})
self.assertEqual(param.get(xls_build.FONT_ROWS), {BOLD_FONT: [0]})
titles.append(HEADER_PROGRAMS)
param = _get_parameters_configurable_list(learning_units, titles, an_user)
self.assertEqual(param.get(xls_build.ALIGN_CELLS), {WRAP_TEXT_ALIGNMENT: ['C2', 'C3']})
def test_get_data_part2(self):
learning_container_luy = LearningContainerYearFactory(academic_year=self.academic_year)
luy = LearningUnitYearFactory(academic_year=self.academic_year,
learning_container_year=learning_container_luy,
periodicity=learning_unit_year_periodicity.ANNUAL,
status=True,
language=None,
)
component_lecturing = LearningComponentYearFactory(
learning_unit_year=luy,
type=learning_component_year_type.LECTURING,
hourly_volume_total_annual=15,
hourly_volume_partial_q1=10,
hourly_volume_partial_q2=5,
planned_classes=1
)
component_practical = LearningComponentYearFactory(
learning_unit_year=luy,
type=learning_component_year_type.PRACTICAL_EXERCISES,
hourly_volume_total_annual=15,
hourly_volume_partial_q1=10,
hourly_volume_partial_q2=5,
planned_classes=1
)
a_tutor = TutorFactory(person__last_name="Dupuis", person__first_name="Tom", person__email="dupuis@gmail.com")
an_attribution_1 = AttributionNewFactory(
tutor=a_tutor,
start_year=2017
)
AttributionChargeNewFactory(
learning_component_year=component_lecturing,
attribution=an_attribution_1,
)
AttributionChargeNewFactory(
learning_component_year=component_practical,
attribution=an_attribution_1
)
a_tutor2 = TutorFactory(person__last_name="Tosson", person__first_name="Ivan", person__email="tosson@gmail.com")
an_attribution_2 = AttributionNewFactory(
tutor=a_tutor2,
start_year=2017
)
AttributionChargeNewFactory(
learning_component_year=component_lecturing,
attribution=an_attribution_2,
)
AttributionChargeNewFactory(
learning_component_year=component_practical,
attribution=an_attribution_2
)
# Simulate annotate
luy = annotate_qs(LearningUnitYear.objects.filter(pk=luy.pk)).first()
luy.entity_requirement = EntityVersionFactory()
luy.attribution_charge_news = attribution_charge_new.find_attribution_charge_new_by_learning_unit_year_as_dict(
luy)
expected_common = [
dict(PERIODICITY_TYPES)[luy.periodicity],
str(_('yes')) if luy.status else str(_('no')),
component_lecturing.hourly_volume_total_annual,
component_lecturing.hourly_volume_partial_q1,
component_lecturing.hourly_volume_partial_q2,
component_lecturing.planned_classes,
component_practical.hourly_volume_total_annual,
component_practical.hourly_volume_partial_q1,
component_practical.hourly_volume_partial_q2,
component_practical.planned_classes,
luy.get_quadrimester_display() or '',
luy.get_session_display() or '',
"",
str(_('yes')) if luy.english_friendly else str(_('no')),
str(_('yes')) if luy.french_friendly else str(_('no')),
str(_('yes')) if luy.exchange_students else str(_('no')),
str(_('yes')) if luy.individual_loan else str(_('no')),
str(_('yes')) if luy.learning_container_year.team else str(_('no')),
str(_('yes')) if luy.stage_dimona else str(_('no')),
luy.other_remark,
luy.other_remark_english,
]
self.assertEqual(
get_data_part2(learning_unit_yr=luy, effective_class=None, with_attributions=False),
expected_common
)
self.assertListEqual(
get_data_part2(learning_unit_yr=luy, effective_class=None, with_attributions=True)[2:],
_expected_attribution_data(expected_common, luy)[2:]
)
self.assertCountEqual(
get_data_part2(learning_unit_yr=luy, effective_class=None, with_attributions=True)[0].split(';'),
_expected_attribution_data(expected_common, luy)[0].split(';')
)
self.assertCountEqual(
get_data_part2(learning_unit_yr=luy, effective_class=None,with_attributions=True)[1].split(';'),
_expected_attribution_data(expected_common, luy)[1].split(';')
)
def test_learning_unit_titles_part1(self):
self.assertEqual(
learning_unit_titles_part1(),
[
str(_('Code')),
str(_('Ac yr.')),
str(_('Title')),
str(_('Type')),
str(_('Subtype')),
"{} ({})".format(_('Req. Entity'), _('fac. level')),
str(_('Proposal type')),
str(_('Proposal status')),
str(_('Credits')),
str(_('Alloc. Ent.')),
str(_('Title in English')),
]
)
def test_prepare_xls_content(self):
qs = LearningUnitYear.objects.filter(pk=self.learning_unit_yr_1.pk).annotate(
entity_requirement=Subquery(self.entity_requirement),
entity_allocation=Subquery(self.entity_allocation),
)
result = prepare_xls_content(qs, is_external_ue_list=False, with_grp=True, with_attributions=True)
self.assertEqual(len(result), 1)
luy = annotate_qs(qs).get()
self.assertListEqual(
result[0],
self._get_luy_expected_data(luy, is_external_ue=False)
)
def test_prepare_xls_content_for_external_ue(self):
qs = LearningUnitYear.objects.filter(pk=self.learning_unit_yr_external.pk).annotate(
entity_requirement=Subquery(self.entity_requirement),
entity_allocation=Subquery(self.entity_allocation),
).select_related(
'externallearningunityear')
result = prepare_xls_content(qs, is_external_ue_list=True, with_grp=True, with_attributions=True)
self.assertEqual(len(result), 1)
luy = annotate_qs(qs).get()
self.assertListEqual(
result[0],
self._get_luy_expected_data(luy, is_external_ue=True)
)
def test_titles(self):
# without grp/attribution
# no external
titles = _prepare_titles(is_external_ue_list=False, with_grp=False, with_attributions=False)
self.assertListEqual(titles,
_expected_titles_common_part1() + _expected_titles_for_proposals() +
_expected_titles_common_part2_a() + _expected_titles_common_part2_b())
# without grp/attribution
# and external
titles = _prepare_titles(is_external_ue_list=True, with_grp=False, with_attributions=False)
self.assertListEqual(titles,
_expected_titles_common_part1() + _expected_titles_common_part2_a() +
_expected_titles_common_part2_b() + _expected_titles_for_external())
# with grp and without attribution
# no external
titles = _prepare_titles(is_external_ue_list=False, with_grp=True, with_attributions=False)
self.assertListEqual(titles,
_expected_titles_common_part1() + _expected_titles_for_proposals() +
_expected_titles_common_part2_a() + _expected_titles_common_part2_b() + [HEADER_PROGRAMS]
)
# with grp and without attribution
# and external
titles = _prepare_titles(is_external_ue_list=True, with_grp=True, with_attributions=False)
self.assertListEqual(titles,
_expected_titles_common_part1() + _expected_titles_common_part2_a() +
_expected_titles_common_part2_b() + [HEADER_PROGRAMS] + _expected_titles_for_external()
)
# with grp/attribution
# no external
titles = _prepare_titles(is_external_ue_list=False, with_grp=True, with_attributions=True)
self.assertListEqual(titles,
_expected_titles_common_part1() + _expected_titles_for_proposals() +
_expected_titles_common_part2_a() + HEADER_TEACHERS + _expected_titles_common_part2_b() +
[HEADER_PROGRAMS]
)
# with grp/attribution
# and external
titles = _prepare_titles(is_external_ue_list=True, with_grp=True, with_attributions=True)
self.assertListEqual(titles,
_expected_titles_common_part1() + _expected_titles_common_part2_a() + HEADER_TEACHERS +
_expected_titles_common_part2_b() + [HEADER_PROGRAMS] + _expected_titles_for_external()
)
def _get_luy_expected_data(self, luy, is_external_ue: bool):
common_part1 = [
luy.acronym,
luy.academic_year.__str__(),
luy.complete_title,
luy.get_container_type_display(),
luy.get_subtype_display(),
luy.entity_requirement,
]
if is_external_ue:
proposal_data = []
else:
proposal_data = [
'', # Proposal
'', # Proposal state
]
common_part2 = [
luy.credits,
luy.entity_allocation,
luy.complete_title_english,
'',
'',
'',
'',
luy.get_periodicity_display(),
yesno(luy.status),
get_significant_volume(luy.pm_vol_tot or 0),
get_significant_volume(luy.pm_vol_q1 or 0),
get_significant_volume(luy.pm_vol_q2 or 0),
luy.pm_classes or 0,
get_significant_volume(luy.pp_vol_tot or 0),
get_significant_volume(luy.pp_vol_q1 or 0),
get_significant_volume(luy.pp_vol_q2 or 0),
luy.pp_classes or 0,
luy.get_quadrimester_display() or '',
luy.get_session_display() or '',
luy.language or "",
str(_('yes')) if luy.english_friendly else str(_('no')),
str(_('yes')) if luy.french_friendly else str(_('no')),
str(_('yes')) if luy.exchange_students else str(_('no')),
str(_('yes')) if luy.individual_loan else str(_('no')),
str(_('yes')) if luy.stage_dimona else str(_('no')),
str(_('yes')) if luy.learning_container_year.team else str(_('no')),
luy.other_remark,
luy.other_remark_english,
"{} ({}) - {} - {}".format(
self.a_group_year_parent.partial_acronym,
"{0:.2f}".format(self.group_element_child.relative_credits),
self.a_group_year_parent.acronym,
self.a_group_year_parent.title_fr + ' [{}]'.format(self.standard_version.title_fr)
)
]
external_data = []
if is_external_ue:
organization = luy.campus.organization
external_data = [
organization.country or '',
organization.main_address.city if organization.main_address else '',
organization.name,
self.external_luy.external_acronym,
self.external_luy.url,
"{0:.2f}".format(self.external_luy.external_credits)
]
return common_part1 + proposal_data + common_part2 + external_data
def test_get_attribution_detail(self):
a_person = PersonFactory(last_name="Smith", first_name='Aaron', email='smith@google.com')
attribution_dict = {
'LECTURING': 10,
'substitute': None,
'duration': 3,
'PRACTICAL_EXERCISES': 15,
'person': a_person,
'function': 'CO_HOLDER',
'start_year': self.academic_year
}
self.assertCountEqual(
_get_attribution_detail(attribution_dict),
['Smith Aaron',
'smith@google.com',
_('Co-holder'),
'',
self.academic_year,
3,
10,
15]
)
def test_prepare_xls_content_with_attributions(self):
qs = LearningUnitYear.objects.filter(pk=self.luy_with_attribution.pk).annotate(
entity_requirement=Subquery(self.entity_requirement),
entity_allocation=Subquery(self.entity_allocation),
)
result = prepare_xls_content_with_attributions(qs, 34)
self.assertEqual(len(result.get('data')), 2)
self.assertCountEqual(result.get('cells_with_top_border'), ['A2', 'B2', 'C2', 'D2', 'E2', 'F2', 'G2', 'H2',
'I2', 'J2', 'K2', 'L2', 'M2', 'N2', 'O2', 'P2',
'Q2', 'R2', 'S2', 'T2', 'U2', 'V2', 'W2', 'X2',
'Y2', 'Z2', 'AA2', 'AB2', 'AC2', 'AD2', 'AE2',
'AF2', 'AG2', 'AH2'
]
)
self.assertCountEqual(result.get('cells_with_white_font'),
[
'A3', 'B3', 'C3', 'D3', 'E3', 'F3', 'G3', 'H3', 'I3', 'J3', 'K3', 'L3', 'M3', 'N3',
'O3', 'P3', 'Q3', 'R3', 'S3', 'T3', 'U3', 'V3', 'W3', 'X3', 'Y3', 'Z3', 'AA3', 'AB3',
'AC3', 'AD3', 'AE3', 'AF3'
]
)
first_attribution = result.get('data')[0]
attribution_first_column = 32
self.assertEqual(first_attribution[attribution_first_column], 'Dupuis Tom')
self.assertEqual(first_attribution[attribution_first_column+1], 'dupuis@gmail.com')
self.assertEqual(first_attribution[attribution_first_column+2], _("Coordinator"))
self.assertEqual(first_attribution[attribution_first_column+3], "")
self.assertEqual(first_attribution[attribution_first_column+4], 2017)
self.assertEqual(first_attribution[attribution_first_column+5], '')
self.assertEqual(first_attribution[attribution_first_column+6], 15)
self.assertEqual(first_attribution[attribution_first_column+7], 5)
def test_add_training_data_for_version(self):
luy = LearningUnitYear.objects.filter(pk=self.learning_unit_yr_version.pk).annotate(
closest_trainings=RawSQL(SQL_RECURSIVE_QUERY_EDUCATION_GROUP_TO_CLOSEST_TRAININGS, ())
).get()
formations = _add_training_data(luy)
expected = "{} ({}) - {} - {}".format(
self.a_group_year_parent_for_particular_version.partial_acronym,
"{0:.2f}".format(self.group_element_particular.relative_credits),
"{}[{}-TRANSITION]".format(self.a_group_year_parent_for_particular_version.acronym,
self.particular_education_group_version.version_name),
self.a_group_year_parent_for_particular_version.title_fr + ' [{}]'.format(
self.particular_education_group_version.title_fr
)
)
self.assertEqual(expected, formations)
class TestLearningUnitXlsClassesDetail(TestCase):
@classmethod
def setUpTestData(cls):
cls.luy = LearningUnitYearFullFactory()
entities = [
EntityVersionFactory(
start_date=datetime.datetime(1900, 1, 1),
end_date=None,
entity_type=entity_type.FACULTY,
entity__organization__type=organization_type.MAIN
) for _ in range(4)
]
cls.luy.entity_requirement = entities[0]
cls.luy.entity_allocation = entities[0]
cls.lecturing_component = LecturingLearningComponentYearFactory(learning_unit_year=cls.luy)
cls.class_a = LearningClassYearFactory(learning_component_year=cls.lecturing_component, acronym="A")
cls.class_b = LearningClassYearFactory(learning_component_year=cls.lecturing_component, acronym="B")
cls.attribution_1 = AttributionChargeNewFactory(
learning_component_year=cls.lecturing_component,
attribution__tutor__person__last_name='Arnould'
)
cls.attribution_2 = AttributionChargeNewFactory(
learning_component_year=cls.lecturing_component,
attribution__tutor__person__last_name='Martin'
)
cls.attribution_3 = AttributionChargeNewFactory(learning_component_year=cls.lecturing_component)
cls.attribution_1_on_class_a = AttributionClassFactory(
learning_class_year=cls.class_a,
attribution_charge=cls.attribution_1,
allocation_charge=10
)
cls.attribution_2_on_class_a = AttributionClassFactory(
learning_class_year=cls.class_a,
attribution_charge=cls.attribution_2,
allocation_charge=20
)
cls.attribution_3_on_class_b = AttributionClassFactory(
learning_class_year=cls.class_b,
attribution_charge=cls.attribution_3,
allocation_charge=30
)
practical_component = PracticalLearningComponentYearFactory(learning_unit_year=cls.luy)
cls.class_practical_c = LearningClassYearFactory(learning_component_year=practical_component)
cls.attribution_practical_1 = AttributionChargeNewFactory(learning_component_year=practical_component)
cls.entity_requirement = EntityVersion.objects.filter(
entity=OuterRef('learning_container_year__requirement_entity'),
).current(
OuterRef('academic_year__start_date')
).values('acronym')[:1]
cls.entity_allocation = EntityVersion.objects.filter(
entity=OuterRef('learning_container_year__allocation_entity'),
).current(
OuterRef('academic_year__start_date')
).values('acronym')[:1]
cls.score_responsible = ScoreResponsibleOfClassFactory(
learning_unit_year=cls.luy,
learning_class_year=cls.class_a
)
def test_get_data_part1_with_effective_class_for_lecturing(self):
luy = self.luy
effective_class = self.class_a
data = get_data_part1(learning_unit_yr=luy, effective_class=effective_class, is_external_ue_list=False)
# Specific class data
self.assertEqual(data[0], "{}-{}".format(luy.acronym, effective_class.acronym))
self.assertEqual(data[2], "{} - {}".format(luy.complete_title, effective_class.title_fr))
self.assertEqual(data[3], _('Class'))
self.assertEqual(data[10], "{} - {}".format(luy.complete_title_english, effective_class.title_en))
# UE data
self.assertEqual(data[1], luy.academic_year.name)
self.assertEqual(data[4], _('Full'))
self.assertEqual(data[5], luy.entity_requirement)
self.assertEqual(data[6], '')
self.assertEqual(data[7], '')
self.assertEqual(data[8], luy.credits)
self.assertEqual(data[9], luy.entity_allocation)
def test_get_data_part1_with_effective_class_for_practical_acronym_column(self):
luy = self.luy
effective_class = self.class_practical_c
data = get_data_part1(learning_unit_yr=luy, effective_class=effective_class, is_external_ue_list=False)
self.assertEqual(data[0], "{}_{}".format(luy.acronym, effective_class.acronym))
def test_get_data_part2_with_effective_class_for_lecturing(self):
luy = self.luy
effective_class = self.class_a
expected_common = [
dict(PERIODICITY_TYPES)[luy.periodicity],
str(_('yes')) if luy.status else str(_('no')),
effective_class.hourly_volume_partial_q1 + effective_class.hourly_volume_partial_q2,
effective_class.hourly_volume_partial_q1 or '',
effective_class.hourly_volume_partial_q2 or '',
'',
'',
'',
'',
'',
effective_class.get_quadrimester_display() or '',
effective_class.get_session_display() or '',
luy.language,
yesno(luy.english_friendly).strip(),
yesno(luy.french_friendly).strip(),
yesno(luy.exchange_students).strip(),
yesno(luy.individual_loan).strip(),
yesno(luy.stage_dimona).strip(),
yesno(luy.learning_container_year.team).strip(),
luy.other_remark,
luy.other_remark_english,
]
self.assertEqual(
get_data_part2(learning_unit_yr=luy, effective_class=effective_class, with_attributions=False),
expected_common
)
def test_get_attribution_lines_with_effective_class_for_lecturing(self):
qs = LearningUnitYear.objects.filter(pk=self.luy.pk).annotate(
entity_requirement=Subquery(self.entity_requirement),
entity_allocation=Subquery(self.entity_allocation),
)
result = prepare_xls_content_with_attributions(qs, 33)
data = result.get('data')
# 4 UE attributions = 3 attributions on lecturing + 1 on practical
# 3 Effective Class attributions = 2 on class a and 1 on class b
# 1 Line for class c without attribution
self.assertEqual(len(result.get('data')), 8)
# Check classes content
xls_class_a_attribution_1 = data[4]
self.assertEqual(xls_class_a_attribution_1[0], "{}-{}".format(self.luy.acronym, self.class_a.acronym))
self.assertEqual(xls_class_a_attribution_1[2], "{} - {}".format(self.luy.complete_title, self.class_a.title_fr))
self.assertEqual(xls_class_a_attribution_1[3], _('Class'))
self.assertEqual(xls_class_a_attribution_1[10],
"{} - {}".format(self.luy.complete_title_english, self.class_a.title_en)
)
# Check classes attributions volumes
self._assert_class_attribution_volumes(xls_class_a_attribution_1, self.attribution_1_on_class_a)
self._assert_class_attribution_volumes(data[5], self.attribution_2_on_class_a)
self._assert_class_attribution_volumes(data[6], self.attribution_3_on_class_b)
# Check style
cells_with_top_border = result.get('cells_with_top_border')
cells_with_white_font = result.get('cells_with_white_font')
# xls structure
# titles - line 1
# UE attr 1- line 2
# UE attr 2- line 3
# UE attr 3- line 4
# UE attr 4- line 5
# Class A attr 1- line 6
# Class A attr 2 - line 7
# Class B attr 1 - line 8
# Class C no attr - line 9
expected = _build_cells_ref(ALL_COLUMNS_FOR_ATTRIBUTIONS_LIST, [2, 6, 8, 9])
self.assertSetEqual(cells_with_top_border, expected)
expected = _build_cells_ref(
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF'], [3, 4, 5, 7])
self.assertSetEqual(
cells_with_white_font,
expected
)
def _assert_class_attribution_volumes(self, class_attribution_line_data, attribution_class):
self.assertEqual(class_attribution_line_data[37], '')
self.assertEqual(class_attribution_line_data[38], attribution_class.allocation_charge)
def test_get_class_score_responsibles(self):
score_responsibles = _get_class_score_responsibles(self.class_a)
self.assertEqual(len(score_responsibles), 1)
def _expected_attribution_data(expected: List, luy: LearningUnitYear) -> List[str]:
expected_attributions = []
score_responsibles = []
for k, v in luy.attribution_charge_news.items():
expected_attributions.append(v)
if v.get('score_responsible'):
score_responsibles.append(v)
tutors_names_concat = _get_persons_names(expected_attributions)
tutors_emails_concat = _get_persons_emails(expected_attributions)
ex = [tutors_names_concat, tutors_emails_concat]
score_responsibles_names_concat = _get_persons_names(score_responsibles)
score_responsibles_emails_concat = _get_persons_emails(score_responsibles)
ex.extend([score_responsibles_names_concat, score_responsibles_emails_concat])
ex.extend(expected)
return ex
def _expected_titles_common_part1() -> List[str]:
return [
str(_('Code')),
str(_('Ac yr.')),
str(_('Title')),
str(_('Type')),
str(_('Subtype')),
str(_('Req. Entity')),
]
def _expected_titles_common_part2_a() -> List[str]:
return [
str(_('Credits')),
str(_('Alloc. Ent.')),
str(_('Title in English')),
]
def _expected_titles_common_part2_b() -> List[str]:
return [
str(_('Periodicity')),
str(_('Active')),
"{} - {}".format(_('Lecturing vol.'), _('Annual')),
"{} - {}".format(_('Lecturing vol.'), _('1st quadri')),
"{} - {}".format(_('Lecturing vol.'), _('2nd quadri')),
"{}".format(_('Lecturing planned classes')),
"{} - {}".format(_('Practical vol.'), _('Annual')),
"{} - {}".format(_('Practical vol.'), _('1st quadri')),
"{} - {}".format(_('Practical vol.'), _('2nd quadri')),
"{}".format(_('Practical planned classes')),
str(_('Quadrimester')),
str(_('Session derogation')),
str(_('Language')),
str(_('English-friendly')),
str(_('French-friendly')),
str(_('Exchange students')),
str(_('Individual loan')),
str(_('Stage-Dimona')),
str(_('Team management')),
str(_('Other remark (intended for publication)')),
str(_('Other remark in english (intended for publication)')),
]
def _expected_titles_for_proposals() -> List[str]:
return [
str(_('Proposal type')),
str(_('Proposal status')),
]
def _expected_titles_for_external() -> List[str]:
return [
str(_('Country')),
str(_('City of institution')),
str(_('Reference institution')),
str(_('External code')),
str(_('Url')),
str(_('Local credits')),
]
def _get_persons_emails(attributions):
return ";".join(expected_attribution.get('person').email for expected_attribution in attributions)
def _get_persons_names(attributions):
return ';'.join("{} {}".format(
expected_attribution.get('person').last_name.upper(),
expected_attribution.get('person').first_name
) for expected_attribution in attributions)
def _build_cells_ref(columns, line_numbers: List[int]):
references = []
for line_number in line_numbers:
for letter in columns:
references.append("{}{}".format(letter, line_number))
return set(references)
|
uclouvain/osis
|
base/tests/business/test_learning_unit_xls.py
|
Python
|
agpl-3.0
| 47,571
|
# -*- coding: utf-8 -*-
# This file is part of Shuup BR.
#
# Copyright (c) 2016, Rockho Team. All rights reserved.
# Author: Christian Hess
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.front.views.checkout import DefaultCheckoutView
from django.conf import settings
from django.contrib.auth.views import redirect_to_login
from django.contrib.auth.mixins import AccessMixin
class ShuupBRCheckoutView(DefaultCheckoutView, AccessMixin):
"""
View de checkout que precisa de um usuário
registrado antes de iniciar o processo de checkout.
A fase de endereço é levemente diferente do original.
"""
phase_specs = [
"shuup_br.checkout.addresses:ShuupBRAddressesPhase",
"shuup.front.checkout.methods:MethodsPhase",
"shuup.front.checkout.methods:ShippingMethodPhase",
"shuup.front.checkout.methods:PaymentMethodPhase",
"shuup.front.checkout.confirm:ConfirmPhase",
]
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated() and not settings.SHUUP_ALLOW_ANONYMOUS_ORDERS:
return redirect_to_login(self.request.get_full_path(), self.get_login_url(), self.get_redirect_field_name())
return super(ShuupBRCheckoutView, self).dispatch(request, *args, **kwargs)
|
rockho-team/shoop-br
|
shuup_br/views.py
|
Python
|
agpl-3.0
| 1,376
|
# Generated by Django 2.2.13 on 2020-10-08 11:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("services", "0077_unit_soft_delete"),
]
operations = [
migrations.CreateModel(
name="Announcement",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(blank=True, max_length=100)),
("title_fi", models.CharField(blank=True, max_length=100, null=True)),
("title_sv", models.CharField(blank=True, max_length=100, null=True)),
("title_en", models.CharField(blank=True, max_length=100, null=True)),
("content", models.TextField()),
("content_fi", models.TextField(null=True)),
("content_sv", models.TextField(null=True)),
("content_en", models.TextField(null=True)),
(
"active",
models.BooleanField(
default=False,
help_text="Only active objects are visible in the API.",
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="ErrorMessage",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(blank=True, max_length=100)),
("title_fi", models.CharField(blank=True, max_length=100, null=True)),
("title_sv", models.CharField(blank=True, max_length=100, null=True)),
("title_en", models.CharField(blank=True, max_length=100, null=True)),
("content", models.TextField()),
("content_fi", models.TextField(null=True)),
("content_sv", models.TextField(null=True)),
("content_en", models.TextField(null=True)),
(
"active",
models.BooleanField(
default=False,
help_text="Only active objects are visible in the API.",
),
),
],
options={
"abstract": False,
},
),
]
|
City-of-Helsinki/smbackend
|
services/migrations/0078_announcement_errormessage.py
|
Python
|
agpl-3.0
| 2,821
|
from django.forms.widgets import ChoiceInput, ChoiceFieldRenderer
from django.utils.encoding import force_text
from django.utils.html import format_html
from C4CApplication.meta.visitor import Visitor
from C4CApplication.meta.non_member import NonMember
from C4CApplication.meta.member import Member
from C4CApplication.meta.volunteer_member import VolunteerMember
from C4CApplication.meta.verified_member import VerifiedMember
from C4CApplication.meta.volunteer_verified import VolunteerVerified
from C4CApplication.meta.branch_officer import BranchOfficer
from C4CApplication.meta.bp_administrator import BPAdministrator
from C4CApplication import models
class UnlabelledChoiceInput(ChoiceInput):
input_type = None
def __init__(self, *args, **kwargs):
super(UnlabelledChoiceInput, self).__init__(*args, **kwargs)
def render(self, name=None, value=None, attrs=None, choices=()):
return format_html(
'{0} {1}', self.tag(), self.choice_label
)
class UnlabelledRadioChoiceInput(UnlabelledChoiceInput):
input_type = 'radio'
def __init__(self, *args, **kwargs):
super(UnlabelledRadioChoiceInput, self).__init__(*args, **kwargs)
self.value = force_text(self.value)
class UnlabelledCheckboxChoiceInput(UnlabelledChoiceInput):
input_type = 'checkbox'
def __init__(self, *args, **kwargs):
super(UnlabelledCheckboxChoiceInput, self).__init__(*args, **kwargs)
self.value = set(force_text(v) for v in self.value)
def is_checked(self):
return self.choice_value in self.value
class UnlabelledRadioFieldRenderer(ChoiceFieldRenderer):
choice_input_class = UnlabelledRadioChoiceInput
class UnlabelledCheckboxFieldRenderer(ChoiceFieldRenderer):
choice_input_class = UnlabelledCheckboxChoiceInput
def create_user(member_email):
"""
:param member_email:
:return: the object of the class representing the priviledge of the users
"""
if member_email is None:
member_tag = 0
db_member = None
else:
db_member = models.Member.objects.filter(mail=member_email)
if len(db_member) != 1 :
return None
db_member = db_member[0]
if db_member.deleted :
return None
member_tag = db_member.tag
#if db_member is None: # If the adress was faked
#return None
if member_tag & 32: # BP Administrator
user = BPAdministrator(db_member)
elif member_tag & 16: # Branch officer
user = BranchOfficer(db_member)
elif member_tag & 12: # Volunteer and Verfied member
user = VolunteerVerified(db_member)
elif member_tag & 8: # Volunteer
user = VolunteerMember(db_member)
elif member_tag & 4: # Verified member
user = VerifiedMember(db_member)
elif member_tag & 2: # Member
user = Member(db_member)
elif member_tag & 1: # NonMember
user = NonMember(db_member)
else: # To avoid to give BP admin rights by default if the member_tag is unknown
return Visitor()
return user
|
dsarkozi/care4care-sdp-grp4
|
Care4Care/C4CApplication/views/utils.py
|
Python
|
agpl-3.0
| 3,094
|
#!/usr/bin/env python
"""
Script to fix workflows with truncated course_ids from https://github.com/Stanford-Online/edx-ora2/pull/25.
AIClassifierSet, AIGradingWorkflow and AITrainingWorkflow excluded as they are not used by Stanford.
"""
from itertools import chain
import os
import django
from django.db.models.functions import Length
from openedx.core.djangoapps.monkey_patch import django_db_models_options
def main():
from openassessment.assessment.models import PeerWorkflow, StaffWorkflow, StudentTrainingWorkflow
from openassessment.workflow.models import AssessmentWorkflow
peer_workflows = PeerWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
staff_workflows = StaffWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
training_workflows = StudentTrainingWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
full_course_ids = {} # Keep local dict to avoid repeated database hits for the same course_id
for workflow in chain(peer_workflows, staff_workflows, training_workflows):
truncated_course = workflow.course_id
if truncated_course not in full_course_ids:
# Get full course_id from AssessmentWorkflow table
try:
assessment_workflow = AssessmentWorkflow.objects.filter(course_id__startswith=truncated_course)[:1].get()
full_course_ids[truncated_course] = assessment_workflow.course_id
except AssessmentWorkflow.DoesNotExist:
print("No assessment workflow matching truncated course_id: {}".format(truncated_course))
continue
workflow.course_id = full_course_ids[truncated_course]
workflow.save()
print("Script finished.")
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'openedx.stanford.lms.envs.aws')
os.environ.setdefault("SERVICE_VARIANT", 'lms')
django_db_models_options.patch()
django.setup()
main()
|
caesar2164/edx-platform
|
scripts/update_ora2_truncated_course_ids.py
|
Python
|
agpl-3.0
| 2,042
|
# Copyright 2021 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.exceptions import ValidationError
from odoo.tests.common import TransactionCase
class TestExtMandate(TransactionCase):
def setUp(self):
super(TestExtMandate, self).setUp()
self.committee_id = self.env.ref("mozaik_committee.sc_membre_effectif_ag")
self.ext_paul_id = self.env.ref("mozaik_committee.ext_paul_membre_ag")
self.ext_thierry_id = self.env.ref("mozaik_committee.ext_thierry_membre_ag")
def test_ext_candidature_process(self):
"""
Test the process of external candidatures until mandate creation
"""
candidature_ids = self.ext_thierry_id | self.ext_paul_id
# Thierry candidature is rejected
self.ext_thierry_id.button_reject()
self.assertEqual(self.ext_thierry_id.state, "rejected")
# Thierry candidature is set back to declare
self.ext_thierry_id.button_declare()
self.assertEqual(self.ext_thierry_id.state, "declared")
# Candidatures are designated
self.committee_id.button_designate_candidatures()
for candidature in candidature_ids:
self.assertEqual(candidature.state, "designated")
# Accept Thierry candidature
self.committee_id.write({"decision_date": "2014-04-01"})
self.ext_thierry_id.button_elected()
self.assertEqual(self.ext_thierry_id.state, "elected")
# Mandate is automatically created for Thierry candidature
# - mandate is linked to candidature
mandate_ids = self.env["ext.mandate"].search(
[("candidature_id", "in", candidature_ids.ids)]
)
self.assertEqual(len(mandate_ids), 1)
# Non elect others
self.committee_id.button_non_elect_candidatures()
self.assertEqual(self.ext_paul_id.state, "non-elected")
self.assertEqual(self.ext_thierry_id.state, "elected")
def test_no_decision_date(self):
"""
Test the process of (non-)electing external candidatures without decision
date
"""
self.committee_id.button_designate_candidatures()
with self.assertRaises(ValidationError):
self.committee_id.button_non_elect_candidatures()
|
mozaik-association/mozaik
|
mozaik_committee/tests/test_ext_mandate.py
|
Python
|
agpl-3.0
| 2,326
|
# sample script for auto registering users into open edx via csv file
# tested on cypress and dogwood
#
# Instructions:
# update the filename' to point to the correct csv file
# inside of the appropriate edxapp server run:
# cd /edx/app/edxapp/edx-platform
# source ../edxapp_env
# python manage.py lms --settings=aws_appsembler shell
# execfile/copy and paste this file into the shell
# Notes:
# stdout should mention user creation/successes/failures
# users will not be created if they already exist on the site
# running this script multiple times on the same input won't cause any problems
#
from django.conf import settings
from django.contrib.auth.models import User
from django.utils import translation
from student.forms import AccountCreationForm
from student.models import CourseEnrollment, create_comments_service_user
from student.views import _do_create_account, AccountValidationError
from track.management.tracked_command import TrackedCommand
import csv
##example testcsv.csv file:
#username,firstname,lastname,email,password,pi,oucu
#test.username,firstname,lastname,testemail@example.com,testpassword
#test.username2,firstname2,lastname2,testemail2@example.com,testpassword
#test.username,firstname,lastname,testemail@example.com,testpassword
filename = '/tmp/testcsv.csv'
with open(filename,'rb') as csvfile:
csvreader = csv.reader(csvfile,delimiter=',')
csvreader.next() #skip first row of labels
for row in csvreader:
try:
username = row[0]
name = row[1] + ' ' + row[2]
email = row[3]
password = row[4]
if User.objects.filter(email=email):
print 'user {} already exists; skipping'.format(email)
continue
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': name,
},
tos_required=False
)
# django.utils.translation.get_language() will be used to set the new
# user's preferred language. This line ensures that the result will
# match this installation's default locale. Otherwise, inside a
# management command, it will always return "en-us".
translation.activate(settings.LANGUAGE_CODE)
try:
user, _, reg = _do_create_account(form)
reg.activate()
reg.save()
create_comments_service_user(user)
except AccountValidationError as e:
print e.message
user = User.objects.get(email=email)
translation.deactivate()
print 'successfully created user for {}'.format(email)
except Exception as e:
print 'could not create user: {}'.format(username)
print e
continue
|
appsembler/configuration
|
playbooks/appsemblerFiles/customUserImportCsv.py
|
Python
|
agpl-3.0
| 2,958
|
# The Hazard Library
# Copyright (C) 2012-2017 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.youngs_1997 import (
YoungsEtAl1997SInter,
YoungsEtAl1997SSlab,
YoungsEtAl1997GSCSSlabBest,
YoungsEtAl1997GSCSSlabUpperLimit,
YoungsEtAl1997GSCSSlabLowerLimit,
YoungsEtAl1997SInterNSHMP2008
)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
# Test data generated from OpenSHA implementation.
# for the GSC version, test data have been generated from the hazardlib
# implementation. Indipendent test data from original author are needed
# for more robust testing
class YoungsEtAl1997SInterTestCase(BaseGSIMTestCase):
GSIM_CLASS = YoungsEtAl1997SInter
def test_mean(self):
self.check('YOUNGS97/Y97SInter_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('YOUNGS97/Y97SInter_STD_TOTAL.csv',
max_discrep_percentage=0.1)
class YoungsEtAl1997SSlabTestCase(BaseGSIMTestCase):
GSIM_CLASS = YoungsEtAl1997SSlab
def test_mean(self):
self.check('YOUNGS97/Y97SSlab_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('YOUNGS97/Y97SSlab_STD_TOTAL.csv',
max_discrep_percentage=0.1)
class YoungsEtAl1997GSCSSlabBestTestCase(BaseGSIMTestCase):
GSIM_CLASS = YoungsEtAl1997GSCSSlabBest
def test_mean(self):
self.check('YOUNGS97/YoungsEtAl1997GSCSSlabBest_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('YOUNGS97/Y97SSlab_STD_TOTAL.csv',
max_discrep_percentage=0.1)
class YoungsEtAl1997GSCSSlabUpperLimitTestCase(BaseGSIMTestCase):
GSIM_CLASS = YoungsEtAl1997GSCSSlabUpperLimit
def test_mean(self):
self.check('YOUNGS97/YoungsEtAl1997GSCSSlabUpperLimit_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('YOUNGS97/Y97SSlab_STD_TOTAL.csv',
max_discrep_percentage=0.1)
class YoungsEtAl1997GSCSSlabLowerLimitTestCase(BaseGSIMTestCase):
GSIM_CLASS = YoungsEtAl1997GSCSSlabLowerLimit
def test_mean(self):
self.check('YOUNGS97/YoungsEtAl1997GSCSSlabLowerLimit_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('YOUNGS97/Y97SSlab_STD_TOTAL.csv',
max_discrep_percentage=0.1)
class YoungsEtAl1997SInterNSHMP2008TestCase(BaseGSIMTestCase):
GSIM_CLASS = YoungsEtAl1997SInterNSHMP2008
def test_mean(self):
self.check('YOUNGS97/Y97SInterNSHMP2008_MEAN.csv',
max_discrep_percentage=2.5)
def test_std_total(self):
self.check('YOUNGS97/Y97SInterNSHMP2008_STD_TOTAL.csv',
max_discrep_percentage=0.1)
|
gem/oq-hazardlib
|
openquake/hazardlib/tests/gsim/youngs_1997_test.py
|
Python
|
agpl-3.0
| 3,506
|
#!/usr/bin/env python3
# Copyright 2017 Christoph Reiter <reiter.christoph@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
import io
import os
import sys
import errno
import subprocess
import tarfile
import sysconfig
import tempfile
from email import parser
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from distutils.core import Extension, Distribution, Command
from distutils.errors import DistutilsSetupError, DistutilsOptionError
from distutils.ccompiler import new_compiler
from distutils.sysconfig import get_python_lib, customize_compiler
from distutils import dir_util, log
from distutils.spawn import find_executable
PYGOBJECT_VERISON = "3.31.1"
GLIB_VERSION_REQUIRED = "2.38.0"
GI_VERSION_REQUIRED = "1.46.0"
PYCAIRO_VERSION_REQUIRED = "1.11.1"
LIBFFI_VERSION_REQUIRED = "3.0"
WITH_CAIRO = True
"""Set to false if you don't want to build with cairo/pycairo support"""
def is_dev_version():
version = tuple(map(int, PYGOBJECT_VERISON.split(".")))
return version[1] % 2 != 0
def get_command_class(name):
# Returns the right class for either distutils or setuptools
return Distribution({}).get_command_class(name)
def get_pycairo_pkg_config_name():
return "py3cairo" if sys.version_info[0] == 3 else "pycairo"
def get_version_requirement(pkg_config_name):
"""Given a pkg-config module name gets the minimum version required"""
versions = {
"gobject-introspection-1.0": GI_VERSION_REQUIRED,
"glib-2.0": GLIB_VERSION_REQUIRED,
"gio-2.0": GLIB_VERSION_REQUIRED,
get_pycairo_pkg_config_name(): PYCAIRO_VERSION_REQUIRED,
"libffi": LIBFFI_VERSION_REQUIRED,
"cairo": "0",
"cairo-gobject": "0",
}
return versions[pkg_config_name]
def get_versions():
version = PYGOBJECT_VERISON.split(".")
assert len(version) == 3
versions = {
"PYGOBJECT_MAJOR_VERSION": version[0],
"PYGOBJECT_MINOR_VERSION": version[1],
"PYGOBJECT_MICRO_VERSION": version[2],
"VERSION": ".".join(version),
}
return versions
def parse_pkg_info(conf_dir):
"""Returns an email.message.Message instance containing the content
of the PKG-INFO file.
"""
versions = get_versions()
pkg_info = os.path.join(conf_dir, "PKG-INFO.in")
with io.open(pkg_info, "r", encoding="utf-8") as h:
text = h.read()
for key, value in versions.items():
text = text.replace("@%s@" % key, value)
p = parser.Parser()
message = p.parse(io.StringIO(text))
return message
def pkg_config_get_install_hint(pkg_name):
"""Returns an installation hint for a pkg-config name or None"""
if not sys.platform.startswith("linux"):
return
if find_executable("apt"):
dev_packages = {
"gobject-introspection-1.0": "libgirepository1.0-dev",
"glib-2.0": "libglib2.0-dev",
"gio-2.0": "libglib2.0-dev",
"cairo": "libcairo2-dev",
"cairo-gobject": "libcairo2-dev",
"libffi": "libffi-dev",
}
if pkg_name in dev_packages:
return "sudo apt install %s" % dev_packages[pkg_name]
elif find_executable("dnf"):
dev_packages = {
"gobject-introspection-1.0": "gobject-introspection-devel",
"glib-2.0": "glib2-devel",
"gio-2.0": "glib2-devel",
"cairo": "cairo-devel",
"cairo-gobject": "cairo-gobject-devel",
"libffi": "libffi-devel",
}
if pkg_name in dev_packages:
return "sudo dnf install %s" % dev_packages[pkg_name]
class PkgConfigError(Exception):
pass
class PkgConfigMissingPackageError(PkgConfigError):
pass
def _run_pkg_config(pkg_name, args, _cache={}):
"""Raises PkgConfigError"""
command = tuple(["pkg-config"] + args)
if command not in _cache:
try:
result = subprocess.check_output(command)
except OSError as e:
if e.errno == errno.ENOENT:
raise PkgConfigError(
"%r not found.\nArguments: %r" % (command[0], command))
raise PkgConfigError(e)
except subprocess.CalledProcessError as e:
try:
subprocess.check_output(["pkg-config", "--exists", pkg_name])
except (subprocess.CalledProcessError, OSError):
raise PkgConfigMissingPackageError(e)
else:
raise PkgConfigError(e)
else:
_cache[command] = result
return _cache[command]
def _run_pkg_config_or_exit(pkg_name, args):
try:
return _run_pkg_config(pkg_name, args)
except PkgConfigMissingPackageError as e:
hint = pkg_config_get_install_hint(pkg_name)
if hint:
raise SystemExit(
"%s\n\nTry installing it with: %r" % (e, hint))
else:
raise SystemExit(e)
except PkgConfigError as e:
raise SystemExit(e)
def pkg_config_version_check(pkg_name, version):
_run_pkg_config_or_exit(pkg_name, [
"--print-errors",
"--exists",
'%s >= %s' % (pkg_name, version),
])
def pkg_config_parse(opt, pkg_name):
ret = _run_pkg_config_or_exit(pkg_name, [opt, pkg_name])
if sys.version_info[0] == 3:
output = ret.decode()
else:
output = ret
opt = opt[-2:]
return [x.lstrip(opt) for x in output.split()]
def list_headers(d):
return [os.path.join(d, e) for e in os.listdir(d) if e.endswith(".h")]
def filter_compiler_arguments(compiler, args):
"""Given a compiler instance and a list of compiler warning flags
returns the list of supported flags.
"""
if compiler.compiler_type == "msvc":
# TODO
return []
extra = []
def check_arguments(compiler, args):
p = subprocess.Popen(
[compiler.compiler[0]] + args + extra + ["-x", "c", "-E", "-"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate(b"int i;\n")
if p.returncode != 0:
text = stderr.decode("ascii", "replace")
return False, [a for a in args if a in text]
else:
return True, []
def check_argument(compiler, arg):
return check_arguments(compiler, [arg])[0]
# clang doesn't error out for unknown options, force it to
if check_argument(compiler, '-Werror=unknown-warning-option'):
extra += ['-Werror=unknown-warning-option']
if check_argument(compiler, '-Werror=unused-command-line-argument'):
extra += ['-Werror=unused-command-line-argument']
# first try to remove all arguments contained in the error message
supported = list(args)
while 1:
ok, maybe_unknown = check_arguments(compiler, supported)
if ok:
return supported
elif not maybe_unknown:
break
for unknown in maybe_unknown:
if not check_argument(compiler, unknown):
supported.remove(unknown)
# hm, didn't work, try each argument one by one
supported = []
for arg in args:
if check_argument(compiler, arg):
supported.append(arg)
return supported
class sdist_gnome(Command):
description = "Create a source tarball for GNOME"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# Don't use PEP 440 pre-release versions for GNOME releases
self.distribution.metadata.version = PYGOBJECT_VERISON
dist_dir = tempfile.mkdtemp()
try:
cmd = self.reinitialize_command("sdist")
cmd.dist_dir = dist_dir
cmd.ensure_finalized()
cmd.run()
base_name = self.distribution.get_fullname().lower()
cmd.make_release_tree(base_name, cmd.filelist.files)
try:
self.make_archive(base_name, "xztar", base_dir=base_name)
finally:
dir_util.remove_tree(base_name)
finally:
dir_util.remove_tree(dist_dir)
du_sdist = get_command_class("sdist")
class distcheck(du_sdist):
"""Creates a tarball and does some additional sanity checks such as
checking if the tarball includes all files, builds successfully and
the tests suite passes.
"""
def _check_manifest(self):
# make sure MANIFEST.in includes all tracked files
assert self.get_archive_files()
if subprocess.call(["git", "status"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) != 0:
return
included_files = self.filelist.files
assert included_files
process = subprocess.Popen(
["git", "ls-tree", "-r", "HEAD", "--name-only"],
stdout=subprocess.PIPE, universal_newlines=True)
out, err = process.communicate()
assert process.returncode == 0
tracked_files = out.splitlines()
tracked_files = [
f for f in tracked_files
if os.path.basename(f) not in [".gitignore"]]
diff = set(tracked_files) - set(included_files)
assert not diff, (
"Not all tracked files included in tarball, check MANIFEST.in",
diff)
def _check_dist(self):
# make sure the tarball builds
assert self.get_archive_files()
distcheck_dir = os.path.abspath(
os.path.join(self.dist_dir, "distcheck"))
if os.path.exists(distcheck_dir):
dir_util.remove_tree(distcheck_dir)
self.mkpath(distcheck_dir)
archive = self.get_archive_files()[0]
tfile = tarfile.open(archive, "r:gz")
tfile.extractall(distcheck_dir)
tfile.close()
name = self.distribution.get_fullname()
extract_dir = os.path.join(distcheck_dir, name)
old_pwd = os.getcwd()
os.chdir(extract_dir)
try:
self.spawn([sys.executable, "setup.py", "build"])
self.spawn([sys.executable, "setup.py", "install",
"--root",
os.path.join(distcheck_dir, "prefix"),
"--record",
os.path.join(distcheck_dir, "log.txt"),
])
self.spawn([sys.executable, "setup.py", "test"])
finally:
os.chdir(old_pwd)
def run(self):
du_sdist.run(self)
self._check_manifest()
self._check_dist()
class build_tests(Command):
description = "build test libraries and extensions"
user_options = [
("force", "f", "force a rebuild"),
]
def initialize_options(self):
self.build_temp = None
self.build_base = None
self.force = False
def finalize_options(self):
self.set_undefined_options(
'build_ext',
('build_temp', 'build_temp'))
self.set_undefined_options(
'build',
('build_base', 'build_base'))
def _newer_group(self, sources, *targets):
assert targets
from distutils.dep_util import newer_group
if self.force:
return True
else:
for target in targets:
if not newer_group(sources, target):
return False
return True
def run(self):
cmd = self.reinitialize_command("build_ext")
cmd.inplace = True
cmd.force = self.force
cmd.ensure_finalized()
cmd.run()
gidatadir = pkg_config_parse(
"--variable=gidatadir", "gobject-introspection-1.0")[0]
g_ir_scanner = pkg_config_parse(
"--variable=g_ir_scanner", "gobject-introspection-1.0")[0]
g_ir_compiler = pkg_config_parse(
"--variable=g_ir_compiler", "gobject-introspection-1.0")[0]
script_dir = get_script_dir()
gi_dir = os.path.join(script_dir, "gi")
tests_dir = os.path.join(script_dir, "tests")
gi_tests_dir = os.path.join(gidatadir, "tests")
schema_xml = os.path.join(tests_dir, "org.gnome.test.gschema.xml")
schema_bin = os.path.join(tests_dir, "gschemas.compiled")
if self._newer_group([schema_xml], schema_bin):
subprocess.check_call([
"glib-compile-schemas",
"--targetdir=%s" % tests_dir,
"--schema-file=%s" % schema_xml,
])
compiler = new_compiler()
customize_compiler(compiler)
if os.name == "nt":
compiler.shared_lib_extension = ".dll"
elif sys.platform == "darwin":
compiler.shared_lib_extension = ".dylib"
if "-bundle" in compiler.linker_so:
compiler.linker_so = list(compiler.linker_so)
i = compiler.linker_so.index("-bundle")
compiler.linker_so[i] = "-dynamiclib"
else:
compiler.shared_lib_extension = ".so"
def build_ext(ext):
if compiler.compiler_type == "msvc":
raise Exception("MSVC support not implemented")
libname = compiler.shared_object_filename(ext.name)
ext_paths = [os.path.join(tests_dir, libname)]
if os.name == "nt":
implibname = libname + ".a"
ext_paths.append(os.path.join(tests_dir, implibname))
if self._newer_group(ext.sources + ext.depends, *ext_paths):
objects = compiler.compile(
ext.sources,
output_dir=self.build_temp,
include_dirs=ext.include_dirs,
macros=ext.define_macros)
if os.name == "nt":
postargs = ["-Wl,--out-implib=%s" %
os.path.join(tests_dir, implibname)]
else:
postargs = []
compiler.link_shared_object(
objects,
compiler.shared_object_filename(ext.name),
output_dir=tests_dir,
libraries=ext.libraries,
library_dirs=ext.library_dirs,
extra_postargs=postargs)
return ext_paths
ext = Extension(
name='libgimarshallingtests',
sources=[
os.path.join(gi_tests_dir, "gimarshallingtests.c"),
os.path.join(tests_dir, "gimarshallingtestsextra.c"),
],
include_dirs=[
gi_tests_dir,
tests_dir,
],
depends=[
os.path.join(gi_tests_dir, "gimarshallingtests.h"),
os.path.join(tests_dir, "gimarshallingtestsextra.h"),
],
)
add_ext_pkg_config_dep(ext, compiler.compiler_type, "glib-2.0")
add_ext_pkg_config_dep(ext, compiler.compiler_type, "gio-2.0")
ext_paths = build_ext(ext)
gir_path = os.path.join(tests_dir, "GIMarshallingTests-1.0.gir")
typelib_path = os.path.join(
tests_dir, "GIMarshallingTests-1.0.typelib")
if self._newer_group(ext_paths, gir_path):
subprocess.check_call([
g_ir_scanner,
"--no-libtool",
"--include=Gio-2.0",
"--namespace=GIMarshallingTests",
"--nsversion=1.0",
"--symbol-prefix=gi_marshalling_tests",
"--warn-all",
"--warn-error",
"--library-path=%s" % tests_dir,
"--library=gimarshallingtests",
"--pkg=glib-2.0",
"--pkg=gio-2.0",
"--cflags-begin",
"-I%s" % gi_tests_dir,
"--cflags-end",
"--output=%s" % gir_path,
] + ext.sources + ext.depends)
if self._newer_group([gir_path], typelib_path):
subprocess.check_call([
g_ir_compiler,
gir_path,
"--output=%s" % typelib_path,
])
regress_macros = []
if not WITH_CAIRO:
regress_macros.append(("_GI_DISABLE_CAIRO", "1"))
ext = Extension(
name='libregress',
sources=[
os.path.join(gi_tests_dir, "regress.c"),
os.path.join(tests_dir, "regressextra.c"),
],
include_dirs=[
gi_tests_dir,
],
depends=[
os.path.join(gi_tests_dir, "regress.h"),
os.path.join(tests_dir, "regressextra.h"),
],
define_macros=regress_macros,
)
add_ext_pkg_config_dep(ext, compiler.compiler_type, "glib-2.0")
add_ext_pkg_config_dep(ext, compiler.compiler_type, "gio-2.0")
if WITH_CAIRO:
add_ext_pkg_config_dep(ext, compiler.compiler_type, "cairo")
add_ext_pkg_config_dep(
ext, compiler.compiler_type, "cairo-gobject")
ext_paths = build_ext(ext)
gir_path = os.path.join(tests_dir, "Regress-1.0.gir")
typelib_path = os.path.join(tests_dir, "Regress-1.0.typelib")
if WITH_CAIRO:
gir_cairo_args = [
"--include=cairo-1.0", "--pkg=cairo", "--pkg=cairo-gobject"]
else:
gir_cairo_args = ["-D_GI_DISABLE_CAIRO"]
if self._newer_group(ext_paths, gir_path):
subprocess.check_call([
g_ir_scanner,
"--no-libtool",
"--include=Gio-2.0",
"--namespace=Regress",
"--nsversion=1.0",
"--warn-all",
"--warn-error",
"--library-path=%s" % tests_dir,
"--library=regress",
"--pkg=glib-2.0",
"--pkg=gio-2.0",
"--output=%s" % gir_path,
] + gir_cairo_args + ext.sources + ext.depends)
if self._newer_group([gir_path], typelib_path):
subprocess.check_call([
g_ir_compiler,
gir_path,
"--output=%s" % typelib_path,
])
ext = Extension(
name='tests.testhelper',
sources=[
os.path.join(tests_dir, "testhelpermodule.c"),
os.path.join(tests_dir, "test-floating.c"),
os.path.join(tests_dir, "test-thread.c"),
os.path.join(tests_dir, "test-unknown.c"),
],
include_dirs=[
gi_dir,
tests_dir,
],
depends=list_headers(gi_dir) + list_headers(tests_dir),
define_macros=[("PY_SSIZE_T_CLEAN", None)],
)
add_ext_pkg_config_dep(ext, compiler.compiler_type, "glib-2.0")
add_ext_pkg_config_dep(ext, compiler.compiler_type, "gio-2.0")
add_ext_compiler_flags(ext, compiler)
dist = Distribution({"ext_modules": [ext]})
build_cmd = dist.get_command_obj("build")
build_cmd.build_base = os.path.join(self.build_base, "pygobject_tests")
build_cmd.ensure_finalized()
cmd = dist.get_command_obj("build_ext")
cmd.inplace = True
cmd.force = self.force
cmd.ensure_finalized()
cmd.run()
def get_suppression_files_for_prefix(prefix):
"""Returns a list of valgrind suppression files for a given prefix"""
# Most specific first (/usr/share/doc is Fedora, /usr/lib is Debian)
# Take the first one found
major = str(sys.version_info[0])
minor = str(sys.version_info[1])
pyfiles = []
pyfiles.append(
os.path.join(
prefix, "share", "doc", "python%s%s" % (major, minor),
"valgrind-python.supp"))
pyfiles.append(
os.path.join(prefix, "lib", "valgrind", "python%s.supp" % major))
pyfiles.append(
os.path.join(
prefix, "share", "doc", "python%s-devel" % major,
"valgrind-python.supp"))
pyfiles.append(os.path.join(prefix, "lib", "valgrind", "python.supp"))
files = []
for f in pyfiles:
if os.path.isfile(f):
files.append(f)
break
files.append(os.path.join(
prefix, "share", "glib-2.0", "valgrind", "glib.supp"))
return [f for f in files if os.path.isfile(f)]
def get_real_prefix():
"""Returns the base Python prefix, even in a virtualenv/venv"""
return getattr(sys, "base_prefix", getattr(sys, "real_prefix", sys.prefix))
def get_suppression_files():
"""Returns a list of valgrind suppression files"""
prefixes = [
sys.prefix,
get_real_prefix(),
pkg_config_parse("--variable=prefix", "glib-2.0")[0],
]
files = []
for prefix in prefixes:
files.extend(get_suppression_files_for_prefix(prefix))
files.append(os.path.join(get_script_dir(), "tests", "valgrind.supp"))
return sorted(set(files))
class test(Command):
user_options = [
("valgrind", None, "run tests under valgrind"),
("valgrind-log-file=", None, "save logs instead of printing them"),
("gdb", None, "run tests under gdb"),
("no-capture", "s", "don't capture test output"),
]
def initialize_options(self):
self.valgrind = None
self.valgrind_log_file = None
self.gdb = None
self.no_capture = None
def finalize_options(self):
self.valgrind = bool(self.valgrind)
if self.valgrind_log_file and not self.valgrind:
raise DistutilsOptionError("valgrind not enabled")
self.gdb = bool(self.gdb)
self.no_capture = bool(self.no_capture)
def run(self):
cmd = self.reinitialize_command("build_tests")
cmd.ensure_finalized()
cmd.run()
env = os.environ.copy()
env.pop("MSYSTEM", None)
if self.no_capture:
env["PYGI_TEST_VERBOSE"] = "1"
env["MALLOC_PERTURB_"] = "85"
env["MALLOC_CHECK_"] = "3"
env["G_SLICE"] = "debug-blocks"
pre_args = []
if self.valgrind:
env["G_SLICE"] = "always-malloc"
env["G_DEBUG"] = "gc-friendly"
env["PYTHONMALLOC"] = "malloc"
pre_args += [
"valgrind", "--leak-check=full", "--show-possibly-lost=no",
"--num-callers=20", "--child-silent-after-fork=yes",
] + ["--suppressions=" + f for f in get_suppression_files()]
if self.valgrind_log_file:
pre_args += ["--log-file=" + self.valgrind_log_file]
if self.gdb:
env["PYGI_TEST_GDB"] = "1"
pre_args += ["gdb", "--args"]
if pre_args:
log.info(" ".join(pre_args))
tests_dir = os.path.join(get_script_dir(), "tests")
sys.exit(subprocess.call(pre_args + [
sys.executable,
os.path.join(tests_dir, "runtests.py"),
], env=env))
class quality(Command):
description = "run code quality tests"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
status = subprocess.call([
sys.executable, "-m", "flake8",
], cwd=get_script_dir())
if status != 0:
raise SystemExit(status)
def get_script_dir():
return os.path.dirname(os.path.realpath(__file__))
def get_pycairo_include_dir():
"""Returns the best guess at where to find the pycairo headers.
A bit convoluted because we have to deal with multiple pycairo
versions.
Raises if pycairo isn't found or it's too old.
"""
pkg_config_name = get_pycairo_pkg_config_name()
min_version = get_version_requirement(pkg_config_name)
min_version_info = tuple(int(p) for p in min_version.split("."))
def check_path(include_dir):
log.info("pycairo: trying include directory: %r" % include_dir)
header_path = os.path.join(include_dir, "%s.h" % pkg_config_name)
if os.path.exists(header_path):
log.info("pycairo: found %r" % header_path)
return True
log.info("pycairo: header file (%r) not found" % header_path)
return False
def find_path(paths):
for p in reversed(paths):
if check_path(p):
return p
def find_new_api():
log.info("pycairo: new API")
import cairo
if cairo.version_info < min_version_info:
raise DistutilsSetupError(
"pycairo >= %s required, %s found." % (
min_version, ".".join(map(str, cairo.version_info))))
if hasattr(cairo, "get_include"):
return [cairo.get_include()]
log.info("pycairo: no get_include()")
return []
def find_old_api():
log.info("pycairo: old API")
import cairo
if cairo.version_info < min_version_info:
raise DistutilsSetupError(
"pycairo >= %s required, %s found." % (
min_version, ".".join(map(str, cairo.version_info))))
location = os.path.dirname(os.path.abspath(cairo.__path__[0]))
log.info("pycairo: found %r" % location)
def samefile(src, dst):
# Python 2 on Windows doesn't have os.path.samefile, so we have to
# provide a fallback
if hasattr(os.path, "samefile"):
return os.path.samefile(src, dst)
os.stat(src)
os.stat(dst)
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def get_sys_path(location, name):
# Returns the sysconfig path for a distribution, or None
for scheme in sysconfig.get_scheme_names():
for path_type in ["platlib", "purelib"]:
path = sysconfig.get_path(path_type, scheme)
try:
if samefile(path, location):
return sysconfig.get_path(name, scheme)
except EnvironmentError:
pass
data_path = get_sys_path(location, "data") or sys.prefix
return [os.path.join(data_path, "include", "pycairo")]
def find_pkg_config():
log.info("pycairo: pkg-config")
pkg_config_version_check(pkg_config_name, min_version)
return pkg_config_parse("--cflags-only-I", pkg_config_name)
# First the new get_include() API added in >1.15.6
include_dir = find_path(find_new_api())
if include_dir is not None:
return include_dir
# Then try to find it in the data prefix based on the module path.
# This works with many virtualenv/userdir setups, but not all apparently,
# see https://gitlab.gnome.org/GNOME/pygobject/issues/150
include_dir = find_path(find_old_api())
if include_dir is not None:
return include_dir
# Finally, fall back to pkg-config
include_dir = find_path(find_pkg_config())
if include_dir is not None:
return include_dir
raise DistutilsSetupError("Could not find pycairo headers")
def add_ext_pkg_config_dep(ext, compiler_type, name):
msvc_libraries = {
"glib-2.0": ["glib-2.0"],
"gio-2.0": ["gio-2.0", "gobject-2.0", "glib-2.0"],
"gobject-introspection-1.0":
["girepository-1.0", "gobject-2.0", "glib-2.0"],
"cairo": ["cairo"],
"cairo-gobject":
["cairo-gobject", "cairo", "gobject-2.0", "glib-2.0"],
"libffi": ["ffi"],
}
def add(target, new):
for entry in new:
if entry not in target:
target.append(entry)
fallback_libs = msvc_libraries[name]
if compiler_type == "msvc":
# assume that INCLUDE and LIB contains the right paths
add(ext.libraries, fallback_libs)
else:
min_version = get_version_requirement(name)
pkg_config_version_check(name, min_version)
add(ext.include_dirs, pkg_config_parse("--cflags-only-I", name))
add(ext.library_dirs, pkg_config_parse("--libs-only-L", name))
add(ext.libraries, pkg_config_parse("--libs-only-l", name))
def add_ext_compiler_flags(ext, compiler, _cache={}):
cache_key = compiler.compiler[0]
if cache_key not in _cache:
args = [
"-Wall",
"-Warray-bounds",
"-Wcast-align",
"-Wdeclaration-after-statement",
"-Wduplicated-branches",
"-Wextra",
"-Wformat=2",
"-Wformat-nonliteral",
"-Wformat-security",
"-Wimplicit-function-declaration",
"-Winit-self",
"-Wjump-misses-init",
"-Wlogical-op",
"-Wmissing-declarations",
"-Wmissing-format-attribute",
"-Wmissing-include-dirs",
"-Wmissing-noreturn",
"-Wmissing-prototypes",
"-Wnested-externs",
"-Wnull-dereference",
"-Wold-style-definition",
"-Wpacked",
"-Wpointer-arith",
"-Wrestrict",
"-Wreturn-type",
"-Wshadow",
"-Wsign-compare",
"-Wstrict-aliasing",
"-Wstrict-prototypes",
"-Wundef",
"-Wunused-but-set-variable",
"-Wwrite-strings",
]
if sys.version_info[:2] != (3, 4):
args += [
"-Wswitch-default",
]
args += [
"-Wno-incompatible-pointer-types-discards-qualifiers",
"-Wno-missing-field-initializers",
"-Wno-unused-parameter",
"-Wno-discarded-qualifiers",
"-Wno-sign-conversion",
"-Wno-cast-function-type",
"-Wno-int-conversion",
]
# silence clang for unused gcc CFLAGS added by Debian
args += [
"-Wno-unused-command-line-argument",
]
args += [
"-fno-strict-aliasing",
"-fvisibility=hidden",
]
# force GCC to use colors
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
args.append("-fdiagnostics-color")
_cache[cache_key] = filter_compiler_arguments(compiler, args)
ext.extra_compile_args += _cache[cache_key]
du_build_ext = get_command_class("build_ext")
class build_ext(du_build_ext):
def initialize_options(self):
du_build_ext.initialize_options(self)
self.compiler_type = None
def finalize_options(self):
du_build_ext.finalize_options(self)
self.compiler_type = new_compiler(compiler=self.compiler).compiler_type
def _write_config_h(self):
script_dir = get_script_dir()
target = os.path.join(script_dir, "config.h")
versions = get_versions()
content = u"""
/* Configuration header created by setup.py - do not edit */
#ifndef _CONFIG_H
#define _CONFIG_H 1
#define PYGOBJECT_MAJOR_VERSION %(PYGOBJECT_MAJOR_VERSION)s
#define PYGOBJECT_MINOR_VERSION %(PYGOBJECT_MINOR_VERSION)s
#define PYGOBJECT_MICRO_VERSION %(PYGOBJECT_MICRO_VERSION)s
#define VERSION "%(VERSION)s"
#endif /* _CONFIG_H */
""" % versions
try:
with io.open(target, 'r', encoding="utf-8") as h:
if h.read() == content:
return
except EnvironmentError:
pass
with io.open(target, 'w', encoding="utf-8") as h:
h.write(content)
def _setup_extensions(self):
ext = {e.name: e for e in self.extensions}
compiler = new_compiler(compiler=self.compiler)
customize_compiler(compiler)
def add_dependency(ext, name):
add_ext_pkg_config_dep(ext, compiler.compiler_type, name)
def add_pycairo(ext):
ext.include_dirs += [get_pycairo_include_dir()]
gi_ext = ext["gi._gi"]
add_dependency(gi_ext, "glib-2.0")
add_dependency(gi_ext, "gio-2.0")
add_dependency(gi_ext, "gobject-introspection-1.0")
add_dependency(gi_ext, "libffi")
add_ext_compiler_flags(gi_ext, compiler)
if WITH_CAIRO:
gi_cairo_ext = ext["gi._gi_cairo"]
add_dependency(gi_cairo_ext, "glib-2.0")
add_dependency(gi_cairo_ext, "gio-2.0")
add_dependency(gi_cairo_ext, "gobject-introspection-1.0")
add_dependency(gi_cairo_ext, "libffi")
add_dependency(gi_cairo_ext, "cairo")
add_dependency(gi_cairo_ext, "cairo-gobject")
add_pycairo(gi_cairo_ext)
add_ext_compiler_flags(gi_cairo_ext, compiler)
def run(self):
self._write_config_h()
self._setup_extensions()
du_build_ext.run(self)
class install_pkgconfig(Command):
description = "install .pc file"
user_options = []
def initialize_options(self):
self.install_base = None
self.install_platbase = None
self.install_data = None
self.compiler_type = None
self.outfiles = []
def finalize_options(self):
self.set_undefined_options(
'install',
('install_base', 'install_base'),
('install_data', 'install_data'),
('install_platbase', 'install_platbase'),
)
self.set_undefined_options(
'build_ext',
('compiler_type', 'compiler_type'),
)
def get_outputs(self):
return self.outfiles
def get_inputs(self):
return []
def run(self):
cmd = self.distribution.get_command_obj("bdist_wheel", create=False)
if cmd is not None:
log.warn(
"Python wheels and pkg-config is not compatible. "
"No pkg-config file will be included in the wheel. Install "
"from source if you need one.")
return
if self.compiler_type == "msvc":
return
script_dir = get_script_dir()
pkgconfig_in = os.path.join(script_dir, "pygobject-3.0.pc.in")
with io.open(pkgconfig_in, "r", encoding="utf-8") as h:
content = h.read()
config = {
"prefix": self.install_base,
"exec_prefix": self.install_platbase,
"includedir": "${prefix}/include",
"datarootdir": "${prefix}/share",
"datadir": "${datarootdir}",
"VERSION": PYGOBJECT_VERISON,
}
for key, value in config.items():
content = content.replace("@%s@" % key, value)
libdir = os.path.dirname(get_python_lib(True, True, self.install_data))
pkgconfig_dir = os.path.join(libdir, "pkgconfig")
self.mkpath(pkgconfig_dir)
target = os.path.join(pkgconfig_dir, "pygobject-3.0.pc")
with io.open(target, "w", encoding="utf-8") as h:
h.write(content)
self.outfiles.append(target)
du_install = get_command_class("install")
class install(du_install):
sub_commands = du_install.sub_commands + [
("install_pkgconfig", lambda self: True),
]
def main():
script_dir = get_script_dir()
pkginfo = parse_pkg_info(script_dir)
gi_dir = os.path.join(script_dir, "gi")
sources = [
os.path.join("gi", n) for n in os.listdir(gi_dir)
if os.path.splitext(n)[-1] == ".c"
]
cairo_sources = [os.path.join("gi", "pygi-foreign-cairo.c")]
for s in cairo_sources:
sources.remove(s)
readme = os.path.join(script_dir, "README.rst")
with io.open(readme, encoding="utf-8") as h:
long_description = h.read()
ext_modules = []
install_requires = []
gi_ext = Extension(
name='gi._gi',
sources=sources,
include_dirs=[script_dir, gi_dir],
depends=list_headers(script_dir) + list_headers(gi_dir),
define_macros=[("PY_SSIZE_T_CLEAN", None)],
)
ext_modules.append(gi_ext)
if WITH_CAIRO:
gi_cairo_ext = Extension(
name='gi._gi_cairo',
sources=cairo_sources,
include_dirs=[script_dir, gi_dir],
depends=list_headers(script_dir) + list_headers(gi_dir),
define_macros=[("PY_SSIZE_T_CLEAN", None)],
)
ext_modules.append(gi_cairo_ext)
install_requires.append(
"pycairo>=%s" % get_version_requirement(
get_pycairo_pkg_config_name()))
version = pkginfo["Version"]
if is_dev_version():
# This makes it a PEP 440 pre-release and pip will only install it from
# PyPI in case --pre is passed.
version += ".dev0"
setup(
name=pkginfo["Name"],
version=version,
description=pkginfo["Summary"],
url=pkginfo["Home-page"],
author=pkginfo["Author"],
author_email=pkginfo["Author-email"],
maintainer=pkginfo["Maintainer"],
maintainer_email=pkginfo["Maintainer-email"],
license=pkginfo["License"],
long_description=long_description,
platforms=pkginfo.get_all("Platform"),
classifiers=pkginfo.get_all("Classifier"),
packages=[
"pygtkcompat",
"gi",
"gi.repository",
"gi.overrides",
],
ext_modules=ext_modules,
cmdclass={
"build_ext": build_ext,
"distcheck": distcheck,
"sdist_gnome": sdist_gnome,
"build_tests": build_tests,
"test": test,
"quality": quality,
"install": install,
"install_pkgconfig": install_pkgconfig,
},
install_requires=install_requires,
data_files=[
('include/pygobject-3.0', ['gi/pygobject.h']),
],
zip_safe=False,
)
if __name__ == "__main__":
main()
|
pexip/pygobject
|
setup.py
|
Python
|
lgpl-2.1
| 38,639
|
#!/usr/bin/env python
import numpy as np
num_node = 11
time_step = 2
time = [0, 1]
n = np.linspace(0, 1, num_node)
x = n*0
y = n
z = n*0
qc = 0.1*n
ts = 300*n
f = open('qc.dat', 'w')
f.write(str(num_node)+ ' ' +str(time_step) +'\n')
for t in range(0, time_step):
for i in range(0, num_node):
f.write(str(x[i]) + ' ' + str(y[i]) + ' ' + str(z[i]) +' ' + str(qc[i]) +' '+str(ts[i]) +' '+ str(0) +' ' + str(0) + ' ' + str(time[t]) +'\n')
f.close()
|
CARDCFEM/Roshan
|
qc.py
|
Python
|
lgpl-2.1
| 464
|
# Copyright 2015, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
from .mainwindow import ApolMainWindow
|
TresysTechnology/setools
|
setoolsgui/apol/__init__.py
|
Python
|
lgpl-2.1
| 763
|
from collections import Counter
from nlp import nlp_utils
from unittest import TestCase
__author__ = 'fpena'
empty_paragraph = ""
paragraph1 =\
"Good morning Dr. Adams. The patient is waiting for you in room number 3."
paragraph2 = "I'm going to my parent's place over the summer! I can't wait " \
"to get there"
review_text1 = "We had dinner there last night. The food was delicious. " \
"Definitely, is the best restaurant in town."
review_text2 = "Small bar, good music, good beer, bad food"
review_text6 = "My first trip to Phoenix couldn't have been better. I went " \
"to JW for 3 nights and loved every minute of it. It's " \
"beautiful, the pools (about 6 of them) were very nice, and " \
"the rooms were very spacious.\nThe ONLY thing I didn't like " \
"was the parking. It's really far away from the lobby/check " \
"in area. So if you have a lot of stuff to bring in, try to " \
"up front. \nOther than that, I experienced nothing even " \
"slightly bad at JW.\n\nWould definitely go back again!"
review_text9 = "Beef gyros are always good here."
class TestNlpUtils(TestCase):
def test_get_sentences(self):
actual_value = nlp_utils.get_sentences(empty_paragraph)
expected_value = []
self.assertEqual(actual_value, expected_value)
actual_value = nlp_utils.get_sentences(paragraph1)
expected_value = [
'Good morning Dr. Adams.',
'The patient is waiting for you in room number 3.'
]
self.assertEqual(actual_value, expected_value)
actual_value = len(nlp_utils.get_sentences(review_text6))
expected_value = 8
self.assertEqual(actual_value, expected_value)
def test_get_words(self):
actual_value = nlp_utils.get_words(empty_paragraph)
expected_value = []
self.assertEqual(actual_value, expected_value)
actual_value = nlp_utils.get_words(paragraph1)
expected_value = [
'Good', 'morning', 'Dr.', 'Adams', '.',
'The', 'patient', 'is', 'waiting', 'for', 'you', 'in', 'room',
'number', '3', '.'
]
self.assertEqual(actual_value, expected_value)
actual_value = len(nlp_utils.get_words(paragraph2))
expected_value = 19
self.assertEqual(actual_value, expected_value)
self.assertEqual(actual_value, expected_value)
actual_value = len(nlp_utils.get_words(review_text6))
expected_value = 106
self.assertEqual(actual_value, expected_value)
def test_tag_words(self):
actual_value = nlp_utils.tag_words(empty_paragraph)
expected_value = []
self.assertEqual(actual_value, expected_value)
actual_value = nlp_utils.tag_words(paragraph1)
expected_value = [
('good', 'JJ'), ('morning', 'NN'), ('dr.', 'NN'), ('adams', 'NN'),
('.', '.'), ('the', 'DT'), ('patient', 'NN'), ('is', 'VBZ'),
('waiting', 'VBG'), ('for', 'IN'), ('you', 'PRP'), ('in', 'IN'),
('room', 'NN'), ('number', 'NN'), ('3', 'CD'), ('.', '.')
]
self.assertEqual(actual_value, expected_value)
actual_value = nlp_utils.tag_words(paragraph2)
expected_value = [
('i', 'NN'), ("'m", 'VBP'), ('going', 'VBG'), ('to', 'TO'),
('my', 'PRP$'), ('parent', 'NN'), ("'s", 'POS'),
('place', 'NN'), ('over', 'IN'), ('the', 'DT'),
('summer', 'NN'), ('!', '.'),
('i', 'NN'), ('ca', 'MD'), ("n't", 'RB'), ('wait', 'VB'),
('to', 'TO'), ('get', 'VB'), ('there', 'RB')
]
self.assertEqual(actual_value, expected_value)
def test_count_verbs(self):
tagged_words = nlp_utils.tag_words(empty_paragraph)
counts = Counter(tag for word, tag in tagged_words)
actual_value = nlp_utils.count_verbs(counts)
expected_value = 0
self.assertEqual(actual_value, expected_value)
tagged_words = nlp_utils.tag_words(paragraph1)
counts = Counter(tag for word, tag in tagged_words)
actual_value = nlp_utils.count_verbs(counts)
expected_value = 2
self.assertEqual(actual_value, expected_value)
tagged_words = nlp_utils.tag_words(paragraph2)
counts = Counter(tag for word, tag in tagged_words)
actual_value = nlp_utils.count_verbs(counts)
expected_value = 4
self.assertEqual(actual_value, expected_value)
|
melqkiades/yelp
|
source/python/nlp/tests/test_nlp_utils.py
|
Python
|
lgpl-2.1
| 4,551
|
import unittest
import mock
import __builtin__
import cleanup
import util
class FakeXapi(object):
def __init__(self):
self.srRecord = {
'name_label': 'dummy'
}
def isPluggedHere(self):
return True
def isMaster(self):
return True
class AlwaysLockedLock(object):
def acquireNoblock(self):
return False
class AlwaysFreeLock(object):
def acquireNoblock(self):
return True
class TestRelease(object):
def release(self):
None
class IrrelevantLock(object):
pass
def create_cleanup_sr():
xapi = FakeXapi()
return cleanup.SR(uuid=None, xapi=xapi, createLock=False, force=False)
class TestSR(unittest.TestCase):
def setUp(self):
self.sleep_patcher = mock.patch('cleanup.time.sleep')
self.sleep_patcher.start()
def tearDown(self):
self.sleep_patcher.stop()
def setup_abort_flag(self, ipc_mock, should_abort=False):
flag = mock.Mock()
flag.test = mock.Mock(return_value=should_abort)
ipc_mock.return_value = flag
def setup_flag_set(self, ipc_mock):
flag = mock.Mock()
flag.set = mock.Mock(return_value=False)
ipc_mock.return_value = flag
def setup_mock_sr(selfs, mock_sr):
xapi = FakeXapi()
mock_sr.configure_mock(uuid=1234, xapi=xapi,
createLock=False, force=False)
def mock_cleanup_locks(self):
cleanup.lockActive = TestRelease()
cleanup.lockActive.release = mock.Mock(return_value=None)
cleanup.lockRunning = TestRelease()
cleanup.lockRunning.release = mock.Mock(return_value=None)
def test_lock_if_already_locked(self):
"""
Given an already locked SR, a lock call
increments the lock counter
"""
sr = create_cleanup_sr()
sr._srLock = IrrelevantLock()
sr._locked = 1
sr.lock()
self.assertEquals(2, sr._locked)
def test_lock_if_no_locking_is_used(self):
"""
Given no srLock present, the lock operations don't touch
the counter
"""
sr = create_cleanup_sr()
sr._srLock = None
sr.lock()
self.assertEquals(0, sr._locked)
@mock.patch('cleanup.IPCFlag', autospec=True)
def test_lock_succeeds_if_lock_is_acquired(
self,
mock_ipc_flag):
"""
After performing a lock, the counter equals to 1
"""
self.setup_abort_flag(mock_ipc_flag)
sr = create_cleanup_sr()
sr._srLock = AlwaysFreeLock()
sr.lock()
self.assertEquals(1, sr._locked)
@mock.patch('cleanup.IPCFlag', autospec=True)
def test_lock_raises_exception_if_abort_requested(
self,
mock_ipc_flag):
"""
If IPC abort was requested, lock raises AbortException
"""
self.setup_abort_flag(mock_ipc_flag, should_abort=True)
sr = create_cleanup_sr()
sr._srLock = AlwaysLockedLock()
self.assertRaises(cleanup.AbortException, sr.lock)
@mock.patch('cleanup.IPCFlag', autospec=True)
def test_lock_raises_exception_if_unable_to_acquire_lock(
self,
mock_ipc_flag):
"""
If the lock is busy, SMException is raised
"""
self.setup_abort_flag(mock_ipc_flag)
sr = create_cleanup_sr()
sr._srLock = AlwaysLockedLock()
self.assertRaises(util.SMException, sr.lock)
@mock.patch('cleanup.IPCFlag', autospec=True)
def test_lock_leaves_sr_consistent_if_unable_to_acquire_lock(
self,
mock_ipc_flag):
"""
If the lock is busy, the lock counter is not incremented
"""
self.setup_abort_flag(mock_ipc_flag)
sr = create_cleanup_sr()
sr._srLock = AlwaysLockedLock()
try:
sr.lock()
except (util.SMException, cleanup.AbortException) as e:
pass
self.assertEquals(0, sr._locked)
def test_gcPause_fist_point_legal(self):
"""
Make sure the fist point has been added to the array of legal
fist points.
"""
self.assertTrue(util.fistpoint.is_legal(util.GCPAUSE_FISTPOINT))
@mock.patch('util.fistpoint', autospec=True)
@mock.patch('cleanup.SR', autospec=True)
@mock.patch('cleanup.Util.runAbortable')
def test_gcPause_calls_fist_point(
self,
mock_abortable,
mock_sr,
mock_fist):
"""
Call fist point if active and not abortable sleep.
"""
self.setup_mock_sr(mock_sr)
# Fake that we have an active fist point.
mock_fist.is_active.return_value = True
cleanup._gcLoopPause(mock_sr, False)
# Make sure we check for correct fist point.
mock_fist.is_active.assert_called_with(util.GCPAUSE_FISTPOINT)
# Make sure we are calling the fist point.
mock_fist.activate_custom_fn.assert_called_with(util.GCPAUSE_FISTPOINT,
mock.ANY)
# And don't call abortable sleep
mock_abortable.assert_not_called()
@mock.patch('util.fistpoint', autospec=True)
@mock.patch('cleanup.SR', autospec=True)
@mock.patch('cleanup.Util.runAbortable')
def test_gcPause_calls_abortable_sleep(
self,
mock_abortable,
mock_sr,
mock_fist_point):
"""
Call abortable sleep if fist point is not active.
"""
self.setup_mock_sr(mock_sr)
# Fake that the fist point is not active.
mock_fist_point.is_active.return_value = False
cleanup._gcLoopPause(mock_sr, False)
# Make sure we check for the active fist point.
mock_fist_point.is_active.assert_called_with(util.GCPAUSE_FISTPOINT)
# Fist point is not active so call abortable sleep.
mock_abortable.assert_called_with(mock.ANY, None, mock_sr.uuid,
mock.ANY, cleanup.VDI.POLL_INTERVAL,
cleanup.GCPAUSE_DEFAULT_SLEEP * 1.1)
@mock.patch('cleanup.SR', autospec=True)
@mock.patch('cleanup._abort')
def test_lock_released_by_abort_when_held(
self,
mock_abort,
mock_sr):
"""
If _abort returns True make sure we release the lockActive which will
have been held by _abort, also check that we return True.
"""
self.setup_mock_sr(mock_sr)
# Fake that abort returns True, so we hold lockActive.
mock_abort.return_value = True
# Setup mock of release function.
cleanup.lockActive = TestRelease()
cleanup.lockActive.release = mock.Mock(return_value=None)
ret = cleanup.abort(mock_sr, False)
# Pass on the return from _abort.
self.assertEquals(True, ret)
# We hold lockActive so make sure we release it.
self.assertEquals(cleanup.lockActive.release.call_count, 1)
@mock.patch('cleanup.SR', autospec=True)
@mock.patch('cleanup._abort')
def test_lock_not_released_by_abort_when_not_held(
self,
mock_abort,
mock_sr):
"""
If _abort returns False don't release lockActive and ensure that
False returned by _abort is passed on.
"""
self.setup_mock_sr(mock_sr)
# Fake _abort returning False.
mock_abort.return_value = False
# Mock lock release function.
cleanup.lockActive = TestRelease()
cleanup.lockActive.release = mock.Mock(return_value=None)
ret = cleanup.abort(mock_sr, False)
# Make sure pass on False returned by _abort
self.assertEquals(False, ret)
# Make sure we did not release the lock as we don't have it.
self.assertEquals(cleanup.lockActive.release.call_count, 0)
@mock.patch('cleanup._abort')
@mock.patch.object(__builtin__, 'raw_input')
def test_abort_optional_renable_active_held(
self,
mock_raw_input,
mock_abort):
"""
Cli has option to re enable gc make sure we release the locks
correctly if _abort returns True.
"""
mock_abort.return_value = True
mock_raw_input.return_value = None
self.mock_cleanup_locks()
cleanup.abort_optional_reenable(None)
# Make sure released lockActive
self.assertEquals(cleanup.lockActive.release.call_count, 1)
# Make sure released lockRunning
self.assertEquals(cleanup.lockRunning.release.call_count, 1)
@mock.patch('cleanup._abort')
@mock.patch.object(__builtin__, 'raw_input')
def test_abort_optional_renable_active_not_held(
self,
mock_raw_input,
mock_abort):
"""
Cli has option to reenable gc make sure we release the locks
correctly if _abort return False.
"""
mock_abort.return_value = False
mock_raw_input.return_value = None
self.mock_cleanup_locks()
cleanup.abort_optional_reenable(None)
# Don't release lockActive, we don't hold it.
self.assertEquals(cleanup.lockActive.release.call_count, 0)
# Make sure released lockRunning
self.assertEquals(cleanup.lockRunning.release.call_count, 1)
@mock.patch('cleanup.init')
def test__abort_returns_true_when_get_lock(
self,
mock_init):
"""
_abort should return True when it can get
the lockActive straight off the bat.
"""
cleanup.lockActive = AlwaysFreeLock()
ret = cleanup._abort(None)
self.assertEquals(ret, True)
@mock.patch('cleanup.IPCFlag', autospec=True)
@mock.patch('cleanup.init')
def test__abort_return_false_if_flag_not_set(
self,
mock_init,
mock_ipcflag):
"""
If flag not set return False.
"""
mock_init.return_value = None
# Fake the flag returning False.
mock_ipcflag.return_value.set.return_value = False
# Not important for this test but we call it so mock it.
cleanup.lockActive = AlwaysLockedLock()
ret = cleanup._abort(None)
self.assertEqual(mock_ipcflag.return_value.set.call_count, 1)
self.assertEqual(ret, False)
@mock.patch('cleanup.IPCFlag', autospec=True)
@mock.patch('cleanup.init')
def test__abort_should_raise_if_cant_get_lock(
self,
mock_init,
mock_ipcflag):
"""
_abort should raise an exception if it completely
fails to get lockActive.
"""
mock_init.return_value = None
# Fake return true so we don't bomb out straight away.
mock_ipcflag.return_value.set.return_value = True
# Fake never getting the lock.
cleanup.lockActive = AlwaysLockedLock()
with self.assertRaises(util.CommandException):
cleanup._abort(None)
@mock.patch('cleanup.IPCFlag', autospec=True)
@mock.patch('cleanup.init')
def test__abort_should_succeed_if_aquires_on_second_attempt(
self,
mock_init,
mock_ipcflag):
"""
_abort should succeed if gets lock on second attempt
"""
mock_init.return_value = None
# Fake return true so we don't bomb out straight away.
mock_ipcflag.return_value.set.return_value = True
# Use side effect to fake failing to get the lock
# on the first call, succeeding on the second.
mocked_lock = AlwaysLockedLock()
mocked_lock.acquireNoblock = mock.Mock()
mocked_lock.acquireNoblock.side_effect = [False, True]
cleanup.lockActive = mocked_lock
ret = cleanup._abort(None)
self.assertEqual(mocked_lock.acquireNoblock.call_count, 2)
self.assertEqual(ret, True)
@mock.patch('cleanup.IPCFlag', autospec=True)
@mock.patch('cleanup.init')
def test__abort_should_fail_if_reaches_maximum_retries_for_lock(
self,
mock_init,
mock_ipcflag):
"""
_abort should fail if we max out the number of attempts for
obtaining the lock.
"""
mock_init.return_value = None
# Fake return true so we don't bomb out straight away.
mock_ipcflag.return_value.set.return_value = True
# Fake a series of failed attempts to get the lock.
mocked_lock = AlwaysLockedLock()
mocked_lock.acquireNoblock = mock.Mock()
# +1 to SR.LOCK_RETRY_ATTEMPTS as we attempt to get lock
# once outside the loop.
side_effect = [False]*(cleanup.SR.LOCK_RETRY_ATTEMPTS + 1)
# Make sure we are not trying once again
side_effect.append(True)
mocked_lock.acquireNoblock.side_effect = side_effect
cleanup.lockActive = mocked_lock
# We've failed repeatedly to gain the lock so raise exception.
with self.assertRaises(util.CommandException):
cleanup._abort(None)
self.assertEqual(mocked_lock.acquireNoblock.call_count,
cleanup.SR.LOCK_RETRY_ATTEMPTS+1)
@mock.patch('cleanup.IPCFlag', autospec=True)
@mock.patch('cleanup.init')
def test__abort_succeeds_if_gets_lock_on_final_attempt(
self,
mock_init,
mock_ipcflag):
"""
_abort succeeds if we get the lockActive on the final retry
"""
mock_init.return_value = None
mock_ipcflag.return_value.set.return_value = True
mocked_lock = AlwaysLockedLock()
mocked_lock.acquireNoblock = mock.Mock()
# +1 to SR.LOCK_RETRY_ATTEMPTS as we attempt to get lock
# once outside the loop.
side_effect = [False]*(cleanup.SR.LOCK_RETRY_ATTEMPTS)
# On the final attempt we succeed.
side_effect.append(True)
mocked_lock.acquireNoblock.side_effect = side_effect
cleanup.lockActive = mocked_lock
ret = cleanup._abort(None)
self.assertEqual(mocked_lock.acquireNoblock.call_count,
cleanup.SR.LOCK_RETRY_ATTEMPTS+1)
self.assertEqual(ret, True)
|
chandrikas/sm
|
tests/test_cleanup.py
|
Python
|
lgpl-2.1
| 14,345
|
# This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import unittest
from gosa.backend.plugins.samba.logonhours import *
class SambaLogonHoursTestCase(unittest.TestCase):
def setUp(self):
self.obj = SambaLogonHoursAttribute()
def test_values_match(self):
assert self.obj.values_match("test", "test") is True
assert self.obj.values_match("test", "test1") is False
assert self.obj.values_match("1", 1) is True
def test_is_valid_value(self):
val = "0" * 168
assert self.obj.is_valid_value([val]) is True
assert self.obj.is_valid_value([1]) is False
def test_convert_to_unicodestring(self):
assert self.obj._convert_to_unicodestring(["1" * 168]) == ['F' * 42]
def test_convert_from_string(self):
assert self.obj._convert_from_string(['F' * 42]) == ["1" * 168]
|
gonicus/gosa
|
backend/src/tests/backend/plugins/samba/test_logonhours.py
|
Python
|
lgpl-2.1
| 1,020
|
# Copyright 2015-2016, Tresys Technology, LLC
# Copyright 2016, Chris PeBenito <pebenito@ieee.org>
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
from collections import defaultdict, namedtuple
from ..policyrep import IoctlSet, TERuletype
from ..policyrep.exception import RuleNotConditional, RuleUseError, TERuleNoFilename
from .conditional import ConditionalExprWrapper
from .descriptors import DiffResultDescriptor
from .difference import Difference, SymbolWrapper, Wrapper
modified_avrule_record = namedtuple("modified_avrule", ["rule",
"added_perms",
"removed_perms",
"matched_perms"])
modified_terule_record = namedtuple("modified_terule", ["rule", "added_default", "removed_default"])
def _avrule_expand_generator(rule_list, Wrapper):
"""
Generator that yields wrapped, expanded, av(x) rules with
unioned permission sets.
"""
items = dict()
for unexpanded_rule in rule_list:
for expanded_rule in unexpanded_rule.expand():
expanded_wrapped_rule = Wrapper(expanded_rule)
# create a hash table (dict) with the first rule
# as the key and value. Rules where permission sets should
# be unioned together have the same hash, so this will union
# the permissions together.
try:
items[expanded_wrapped_rule].origin.perms |= expanded_wrapped_rule.origin.perms
except KeyError:
items[expanded_wrapped_rule] = expanded_wrapped_rule
return items.keys()
def av_diff_template(ruletype):
"""
This is a template for the access vector diff functions.
Parameters:
ruletype The rule type, e.g. "allow".
"""
ruletype = TERuletype.lookup(ruletype)
def diff(self):
"""Generate the difference in rules between the policies."""
self.log.info(
"Generating {0} differences from {1.left_policy} to {1.right_policy}".
format(ruletype, self))
if not self._left_te_rules or not self._right_te_rules:
self._create_te_rule_lists()
added, removed, matched = self._set_diff(
_avrule_expand_generator(self._left_te_rules[ruletype], AVRuleWrapper),
_avrule_expand_generator(self._right_te_rules[ruletype], AVRuleWrapper))
modified = []
for left_rule, right_rule in matched:
# Criteria for modified rules
# 1. change to permissions
added_perms, removed_perms, matched_perms = self._set_diff(left_rule.perms,
right_rule.perms)
# the final set comprehension is to avoid having lists
# like [("perm1", "perm1"), ("perm2", "perm2")], as the
# matched_perms return from _set_diff is a set of tuples
if added_perms or removed_perms:
modified.append(modified_avrule_record(left_rule,
added_perms,
removed_perms,
set(p[0] for p in matched_perms)))
setattr(self, "added_{0}s".format(ruletype), added)
setattr(self, "removed_{0}s".format(ruletype), removed)
setattr(self, "modified_{0}s".format(ruletype), modified)
return diff
def avx_diff_template(ruletype):
"""
This is a template for the extended permission access vector diff functions.
Parameters:
ruletype The rule type, e.g. "allowxperm".
"""
ruletype = TERuletype.lookup(ruletype)
def diff(self):
"""Generate the difference in rules between the policies."""
self.log.info(
"Generating {0} differences from {1.left_policy} to {1.right_policy}".
format(ruletype, self))
if not self._left_te_rules or not self._right_te_rules:
self._create_te_rule_lists()
added, removed, matched = self._set_diff(
_avrule_expand_generator(self._left_te_rules[ruletype], AVRuleXpermWrapper),
_avrule_expand_generator(self._right_te_rules[ruletype], AVRuleXpermWrapper))
modified = []
for left_rule, right_rule in matched:
# Criteria for modified rules
# 1. change to permissions
added_perms, removed_perms, matched_perms = self._set_diff(left_rule.perms,
right_rule.perms)
# the final set comprehension is to avoid having lists
# like [("perm1", "perm1"), ("perm2", "perm2")], as the
# matched_perms return from _set_diff is a set of tuples
if added_perms or removed_perms:
modified.append(modified_avrule_record(left_rule,
IoctlSet(added_perms),
IoctlSet(removed_perms),
IoctlSet(p[0] for p in matched_perms)))
setattr(self, "added_{0}s".format(ruletype), added)
setattr(self, "removed_{0}s".format(ruletype), removed)
setattr(self, "modified_{0}s".format(ruletype), modified)
return diff
def te_diff_template(ruletype):
"""
This is a template for the type_* diff functions.
Parameters:
ruletype The rule type, e.g. "type_transition".
"""
ruletype = TERuletype.lookup(ruletype)
def diff(self):
"""Generate the difference in rules between the policies."""
self.log.info(
"Generating {0} differences from {1.left_policy} to {1.right_policy}".
format(ruletype, self))
if not self._left_te_rules or not self._right_te_rules:
self._create_te_rule_lists()
added, removed, matched = self._set_diff(
self._expand_generator(self._left_te_rules[ruletype], TERuleWrapper),
self._expand_generator(self._right_te_rules[ruletype], TERuleWrapper))
modified = []
for left_rule, right_rule in matched:
# Criteria for modified rules
# 1. change to default type
if SymbolWrapper(left_rule.default) != SymbolWrapper(right_rule.default):
modified.append(modified_terule_record(left_rule,
right_rule.default,
left_rule.default))
setattr(self, "added_{0}s".format(ruletype), added)
setattr(self, "removed_{0}s".format(ruletype), removed)
setattr(self, "modified_{0}s".format(ruletype), modified)
return diff
class TERulesDifference(Difference):
"""
Determine the difference in type enforcement rules
between two policies.
"""
diff_allows = av_diff_template("allow")
added_allows = DiffResultDescriptor("diff_allows")
removed_allows = DiffResultDescriptor("diff_allows")
modified_allows = DiffResultDescriptor("diff_allows")
diff_auditallows = av_diff_template("auditallow")
added_auditallows = DiffResultDescriptor("diff_auditallows")
removed_auditallows = DiffResultDescriptor("diff_auditallows")
modified_auditallows = DiffResultDescriptor("diff_auditallows")
diff_neverallows = av_diff_template("neverallow")
added_neverallows = DiffResultDescriptor("diff_neverallows")
removed_neverallows = DiffResultDescriptor("diff_neverallows")
modified_neverallows = DiffResultDescriptor("diff_neverallows")
diff_dontaudits = av_diff_template("dontaudit")
added_dontaudits = DiffResultDescriptor("diff_dontaudits")
removed_dontaudits = DiffResultDescriptor("diff_dontaudits")
modified_dontaudits = DiffResultDescriptor("diff_dontaudits")
diff_allowxperms = avx_diff_template("allowxperm")
added_allowxperms = DiffResultDescriptor("diff_allowxperms")
removed_allowxperms = DiffResultDescriptor("diff_allowxperms")
modified_allowxperms = DiffResultDescriptor("diff_allowxperms")
diff_auditallowxperms = avx_diff_template("auditallowxperm")
added_auditallowxperms = DiffResultDescriptor("diff_auditallowxperms")
removed_auditallowxperms = DiffResultDescriptor("diff_auditallowxperms")
modified_auditallowxperms = DiffResultDescriptor("diff_auditallowxperms")
diff_neverallowxperms = avx_diff_template("neverallowxperm")
added_neverallowxperms = DiffResultDescriptor("diff_neverallowxperms")
removed_neverallowxperms = DiffResultDescriptor("diff_neverallowxperms")
modified_neverallowxperms = DiffResultDescriptor("diff_neverallowxperms")
diff_dontauditxperms = avx_diff_template("dontauditxperm")
added_dontauditxperms = DiffResultDescriptor("diff_dontauditxperms")
removed_dontauditxperms = DiffResultDescriptor("diff_dontauditxperms")
modified_dontauditxperms = DiffResultDescriptor("diff_dontauditxperms")
diff_type_transitions = te_diff_template("type_transition")
added_type_transitions = DiffResultDescriptor("diff_type_transitions")
removed_type_transitions = DiffResultDescriptor("diff_type_transitions")
modified_type_transitions = DiffResultDescriptor("diff_type_transitions")
diff_type_changes = te_diff_template("type_change")
added_type_changes = DiffResultDescriptor("diff_type_changes")
removed_type_changes = DiffResultDescriptor("diff_type_changes")
modified_type_changes = DiffResultDescriptor("diff_type_changes")
diff_type_members = te_diff_template("type_member")
added_type_members = DiffResultDescriptor("diff_type_members")
removed_type_members = DiffResultDescriptor("diff_type_members")
modified_type_members = DiffResultDescriptor("diff_type_members")
# Lists of rules for each policy
_left_te_rules = defaultdict(list)
_right_te_rules = defaultdict(list)
#
# Internal functions
#
def _create_te_rule_lists(self):
"""Create rule lists for both policies."""
# do not expand yet, to keep memory
# use down as long as possible
self.log.debug("Building TE rule lists from {0.left_policy}".format(self))
for rule in self.left_policy.terules():
self._left_te_rules[rule.ruletype].append(rule)
self.log.debug("Building TE rule lists from {0.right_policy}".format(self))
for rule in self.right_policy.terules():
self._right_te_rules[rule.ruletype].append(rule)
self.log.debug("Completed building TE rule lists.")
def _reset_diff(self):
"""Reset diff results on policy changes."""
self.log.debug("Resetting TE rule differences")
self.added_allows = None
self.removed_allows = None
self.modified_allows = None
self.added_auditallows = None
self.removed_auditallows = None
self.modified_auditallows = None
self.added_neverallows = None
self.removed_neverallows = None
self.modified_neverallows = None
self.added_dontaudits = None
self.removed_dontaudits = None
self.modified_dontaudits = None
self.added_allowxperms = None
self.removed_allowxperms = None
self.modified_allowxperms = None
self.added_auditallowxperms = None
self.removed_auditallowxperms = None
self.modified_auditallowxperms = None
self.added_neverallowxperms = None
self.removed_neverallowxperms = None
self.modified_neverallowxperms = None
self.added_dontauditxperms = None
self.removed_dontauditxperms = None
self.modified_dontauditxperms = None
self.added_type_transitions = None
self.removed_type_transitions = None
self.modified_type_transitions = None
self.added_type_changes = None
self.removed_type_changes = None
self.modified_type_changes = None
self.added_type_members = None
self.removed_type_members = None
self.modified_type_members = None
# Sets of rules for each policy
self._left_te_rules.clear()
self._right_te_rules.clear()
class AVRuleWrapper(Wrapper):
"""Wrap access vector rules to allow set operations."""
__slots__ = ("ruletype", "source", "target", "tclass", "conditional", "conditional_block")
def __init__(self, rule):
self.origin = rule
self.ruletype = rule.ruletype
self.source = SymbolWrapper(rule.source)
self.target = SymbolWrapper(rule.target)
self.tclass = SymbolWrapper(rule.tclass)
self.key = hash(rule)
try:
self.conditional = ConditionalExprWrapper(rule.conditional)
self.conditional_block = rule.conditional_block
except RuleNotConditional:
self.conditional = None
self.conditional_block = None
def __hash__(self):
return self.key
def __lt__(self, other):
return self.key < other.key
def __eq__(self, other):
# because TERuleDifference groups rules by ruletype,
# the ruletype always matches.
return self.source == other.source and \
self.target == other.target and \
self.tclass == other.tclass and \
self.conditional == other.conditional and \
self.conditional_block == other.conditional_block
class AVRuleXpermWrapper(Wrapper):
"""Wrap extended permission access vector rules to allow set operations."""
__slots__ = ("ruletype", "source", "target", "tclass", "xperm_type")
def __init__(self, rule):
self.origin = rule
self.ruletype = rule.ruletype
self.source = SymbolWrapper(rule.source)
self.target = SymbolWrapper(rule.target)
self.tclass = SymbolWrapper(rule.tclass)
self.xperm_type = rule.xperm_type
self.key = hash(rule)
def __hash__(self):
return self.key
def __lt__(self, other):
return self.key < other.key
def __eq__(self, other):
# because TERuleDifference groups rules by ruletype,
# the ruletype always matches.
return self.source == other.source and \
self.target == other.target and \
self.tclass == other.tclass and \
self.xperm_type == other.xperm_type
class TERuleWrapper(Wrapper):
"""Wrap type_* rules to allow set operations."""
__slots__ = ("ruletype", "source", "target", "tclass", "conditional", "conditional_block",
"filename")
def __init__(self, rule):
self.origin = rule
self.ruletype = rule.ruletype
self.source = SymbolWrapper(rule.source)
self.target = SymbolWrapper(rule.target)
self.tclass = SymbolWrapper(rule.tclass)
self.key = hash(rule)
try:
self.conditional = ConditionalExprWrapper(rule.conditional)
self.conditional_block = rule.conditional_block
except RuleNotConditional:
self.conditional = None
self.conditional_block = None
try:
self.filename = rule.filename
except (RuleUseError, TERuleNoFilename):
self.filename = None
def __hash__(self):
return self.key
def __lt__(self, other):
return self.key < other.key
def __eq__(self, other):
# because TERuleDifference groups rules by ruletype,
# the ruletype always matches.
return self.source == other.source and \
self.target == other.target and \
self.tclass == other.tclass and \
self.conditional == other.conditional and \
self.conditional_block == other.conditional_block and \
self.filename == self.filename
|
TresysTechnology/setools
|
setools/diff/terules.py
|
Python
|
lgpl-2.1
| 16,624
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Mpfr(AutotoolsPackage):
"""The MPFR library is a C library for multiple-precision
floating-point computations with correct rounding."""
homepage = "http://www.mpfr.org"
url = "https://ftp.gnu.org/gnu/mpfr/mpfr-3.1.5.tar.bz2"
version('3.1.5', 'b1d23a55588e3b2a13e3be66bc69fd8d')
version('3.1.4', 'b8a2f6b0e68bef46e53da2ac439e1cf4')
version('3.1.3', '5fdfa3cfa5c86514ee4a241a1affa138')
version('3.1.2', 'ee2c3ac63bf0c2359bf08fc3ee094c19')
# mpir is a drop-in replacement for gmp
depends_on('gmp@4.1.0:') # 4.2.3 or higher is recommended
patch('vasprintf.patch', when='@3.1.5')
patch('strtofr.patch', when='@3.1.5')
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/mpfr/package.py
|
Python
|
lgpl-2.1
| 1,938
|
#!/usr/bin/env python
#
# @file strFunctions.py
# @brief functions atht adjust strings in some way
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2015 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
import re
try:
from . import global_variables
except:
import global_variables
def upper_first(word):
if len(word) == 0:
return word
returned_word = word[0].upper() + word[1:len(word)]
return returned_word
def lower_first(word):
# hack for spatial CSGFoo classes
if word.startswith('CSG'):
returned_word = 'csg' + word[3:len(word)]
else:
returned_word = word[0].lower() + word[1:len(word)]
return returned_word
def get_indent(element):
s1 = '{0}('.format(element)
return len(s1)
def abbrev_name(element):
abbrev = ''
for i in range(0, len(element)):
if element[i].isupper():
abbrev = abbrev + element[i]
return abbrev.lower()
def abbrev_lo_name(loname):
return 'LO' + loname[6:]
def list_of_name(name, addPrefix=True):
prefix = ''
if addPrefix and not global_variables.is_package:
prefix = global_variables.prefix
return prefix + 'ListOf' + plural_no_prefix(name)
def jsbml_list_of_name(name, addPrefix=True):
prefix = ''
if addPrefix and not global_variables.is_package:
prefix = global_variables.prefix
return prefix + 'listOf' + plural_no_prefix(name)
def lower_list_of_name_no_prefix(name):
return 'listOf' + plural_no_prefix(upper_first(name))
def cap_list_of_name(name, addPrefix=True):
name = upper_first(name)
return list_of_name(name, addPrefix)
def cap_list_of_name_no_prefix(name):
name = upper_first(name)
return list_of_name(name, False)
def plural_no_prefix(name):
if global_variables.is_package:
return plural(name)
else:
new_name = remove_prefix(name)
return plural(new_name)
def plural(name):
if name.endswith('s'):
returned_word = name
elif name.endswith('nformation'):
returned_word = name
elif name.endswith('hild'):
returned_word = name + 'ren'
elif name.endswith('x'):
returned_word = name[0:len(name)-1] + 'es'
else:
returned_word = name + 's'
return returned_word
def singular(name):
returned_word = name
length = len(name)
if name.endswith('s'):
returned_word = name[0:length-1]
elif name.endswith('hildren'):
returned_word = name[0:length-3]
return returned_word
def remove_prefix(name, in_concrete=False):
prefix_to_remove = ''
if global_variables.prefix == 'SBML':
# we might want to remove the name of the package
if not in_concrete and global_variables.is_package \
and global_variables.package_prefix != '':
prefix_to_remove = global_variables.package_prefix
else:
prefix_to_remove = global_variables.prefix
length = len(prefix_to_remove)
if length == 0:
return name
if name.startswith(prefix_to_remove) and not name.endswith('Document'):
newname = name[length:]
else:
newname = name
return newname
def get_indefinite(name):
if name.startswith('a') or name.startswith('A') \
or name.startswith('e') or name.startswith('E') \
or name.startswith('i') or name.startswith('I') \
or name.startswith('o') or name.startswith('O') \
or name.startswith('u') or name.startswith('U'):
return 'an'
else:
return 'a'
def standard_element_name(name):
name = remove_spaces(name)
length = len(name)
temp = name
# dont want * , _t at end
if name.endswith('*'):
temp = name[0:length-1]
elif name.endswith(','):
temp = name[0:length-1]
elif name.endswith('_t'):
temp = name[0:length-2]
returned_word = temp
# also dont want ListOf
if returned_word.startswith('ListOf') or returned_word.startswith('listOf'):
temp = singular(returned_word[6:length])
return upper_first(temp)
def get_library_suffix(name):
ret_name = name.lower();
if ret_name.startswith('lib'):
ret_name = ret_name[3:]
return upper_first(ret_name)
def wrap_token(name, pkg=''):
""" returns the name wrapped as a token
e.g. \token{'id'} or \token{'comp:\-id'} """
if pkg == '':
return '\\token{' + name + '}'
else:
return '\\token{' + pkg + ':\\-' + name + '}'
def wrap_type(name, element, hack=False):
if name == 'array':
return 'consisting of an array of \\primtype{' + element + '}'
elif name == 'enum':
element_name = texify(element)
return 'of type \\primtype{' + element_name + '}'
elif name == 'element':
if hack:
return 'of type \\' + element
else:
return wrap_token(element)
elif name == 'lo_element':
return wrap_token(element)
elif name == 'inline_lo_element':
return 'TO DO: add type'
else:
return 'of type \\primtype{' + name + '}'
def wrap_section(name, add_class=True):
if add_class:
return '\\sec{' + make_class(name) + '}'
else:
return '\\sec{' + name + '}'
def make_class(name):
return name.lower() + '-class'
def wrap_enum(name):
return '\\primtype{' + lower_first(name) + '}'
def get_sid_refs(refs):
if ',' not in refs:
return [upper_first(refs), upper_first(refs)]
else:
ret_string = ''
ret_type = ''
str_refs = refs.split(',')
length = len(str_refs)
if length > 0:
ret_string = upper_first(str_refs[0])
ret_type = upper_first(str_refs[0])
for i in range(1, length):
ret_string += ' or \{0}'.format(upper_first(str_refs[i]))
ret_type += 'Or{0}'.format(upper_first(str_refs[i]))
return [ret_string, ret_type]
def get_element_name(attribute, addPrefix=True):
if 'type' in attribute:
name = ''
if 'texname' in attribute:
name = attribute['texname']
if len(name) == 0:
name = remove_prefix(attribute['name'])
if attribute['type'] == 'lo_element':
return '\{0}'.format(cap_list_of_name(name, addPrefix))
elif attribute['type'] == 'inline_lo_element':
return '\{0}'.format(cap_list_of_name(name, addPrefix))
elif attribute['type'] == 'element':
if attribute['element'] == 'ASTNode*':
return 'MathML math'
else:
return attribute['element']
else:
return 'FIX_ME'
elif 'isListOf' in attribute:
if attribute['isListOf']:
return '\{0}'.format(cap_list_of_name(remove_prefix(attribute['name'])))
else:
return '\{0}'.format(upper_first(remove_prefix(attribute['name'])))
else:
return 'FIX ME'
def get_element_name_no_prefix(attribute):
if 'type' in attribute:
name = ''
if 'texname' in attribute:
name = attribute['texname']
if len(name) == 0:
name = attribute['name']
if attribute['type'] == 'lo_element':
return '\{0}'.format(cap_list_of_name_no_prefix(name))
elif attribute['type'] == 'inline_lo_element':
return '\{0}'.format(cap_list_of_name_no_prefix(name))
elif attribute['type'] == 'element':
if attribute['element'] == 'ASTNode*':
return 'MathML math'
else:
return attribute['element']
else:
return 'FIX_ME'
elif 'isListOf' in attribute:
if attribute['isListOf']:
return '\{0}'.format(cap_list_of_name(attribute['name']))
else:
return '\{0}'.format(upper_first(attribute['name']))
else:
return 'FIX ME'
def replace_digits(name):
name = re.sub('0', 'Zero', name)
name = re.sub('1', 'One', name)
name = re.sub('2', 'Two', name)
name = re.sub('3', 'Three', name)
name = re.sub('4', 'Four', name)
name = re.sub('5', 'Five', name)
name = re.sub('6', 'Six', name)
name = re.sub('7', 'Seven', name)
name = re.sub('8', 'Eight', name)
name = re.sub('9', 'Nine', name)
return name
def replace_underscore(name):
name = re.sub('_', '\_', name)
return name
def remove_spaces(name):
newname = ''
for i in range(0, len(name)):
if name[i] != ' ':
newname += name[i]
return newname
def texify(name):
name = replace_digits(name)
name = replace_underscore(name)
name = remove_spaces(name)
return name
def compare_no_case(name, reference):
caseless_name = name.lower()
caseless_ref = reference.lower()
if caseless_name == caseless_ref:
return True
else:
return False
def get_class_from_plugin(plugin, package):
num = len(package)
length = len(plugin)
name = plugin[num:length-6]
return name
def prefix_name(name):
if name.startswith(global_variables.prefix):
return name
elif name == 'XMLNode' or name == 'ASTNode':
return name
elif name == 'SBase':
return '{0}Base'.format(global_variables.prefix)
else:
return '{0}{1}'.format(global_variables.prefix, name)
# Prefix names - if we are working with another library we want class
# prefixed but element names to stay untouched
def prefix_classes(working_class):
existing_name = working_class['name']
working_class['name'] = prefix_name(existing_name)
if working_class['baseClass'] != global_variables.baseClass:
working_class['baseClass'] = prefix_name(working_class['baseClass'])
if 'elementName' not in working_class or len(working_class['elementName']) == 0:
working_class['elementName'] = lower_first(existing_name)
for attrib in working_class['attribs']:
if attrib['type'] == 'lo_element' or attrib['type'] == 'element' or \
attrib['type'] == 'inline_lo_element':
attrib['element'] = prefix_name(attrib['element'])
if 'concrete' in attrib:
for conc in attrib['concrete']:
conc['element'] = prefix_name(conc['element'])
# if 'concrete' in attrib and len(attrib['concrete']) > 0:
# for conc in attrib['concrete']:
# conc['element'] = prefix_name(conc['element'])
|
hovo1990/deviser
|
generator/util/strFunctions.py
|
Python
|
lgpl-2.1
| 12,236
|
# -*- coding: utf-8 -*-
{
'!langcode!': 'fr',
'!langname!': 'Français',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression en option tels que "field1 = \'newvalue\'". Vous ne pouvez pas mettre à jour ou supprimer les résultats d\'une jointure "a JOIN"',
'%s %%{row} deleted': 'lignes %s supprimées',
'%s %%{row} updated': 'lignes %s mises à jour',
'%Y-%m-%d': '%d-%m-%Y',
'%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S',
'(requires internet access)': '(nécessite un accès Internet)',
'(requires internet access, experimental)': '(requires internet access, experimental)',
'(something like "it-it")': '(quelque chose comme "it-it") ',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'@markmin\x01Searching: **%s** %%{file}': 'Cherche: **%s** fichiers',
'A new version of web2py is available: %s': 'Une nouvelle version de web2py est disponible: %s ',
'A new version of web2py is available: Version 1.68.2 (2009-10-21 09:59:29)\n': 'Une nouvelle version de web2py est disponible: Version 1.68.2 (2009-10-21 09:59:29)\r\n',
'About': 'à propos',
'About application': "A propos de l'application",
'additional code for your application': 'code supplémentaire pour votre application',
'Additional code for your application': 'Code additionnel pour votre application',
'admin disabled because no admin password': 'admin désactivée car aucun mot de passe admin',
'admin disabled because not supported on google app engine': 'admin désactivée car non prise en charge sur Google Apps engine',
'admin disabled because unable to access password file': "admin désactivée car incapable d'accéder au fichier mot de passe",
'Admin is disabled because insecure channel': 'Admin est désactivé parce que canal non sécurisé',
'Admin language': "Language de l'admin",
'administrative interface': "interface d'administration",
'Administrator Password:': 'Mot de passe Administrateur:',
'and rename it (required):': 'et renommez-la (obligatoire):',
'and rename it:': 'et renommez-le:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin est désactivé parce que canal non sécurisé',
'application "%s" uninstalled': 'application "%s" désinstallée',
'application %(appname)s installed with md5sum: %(digest)s': 'application %(appname)s installée avec md5sum: %(digest)s',
'application compiled': 'application compilée',
'application is compiled and cannot be designed': "l'application est compilée et ne peut être modifiée",
'Application name:': "Nom de l'application:",
'are not used': 'are not used',
'are not used yet': 'are not used yet',
'Are you sure you want to delete file "%s"?': 'Êtes-vous sûr de vouloir supprimer le fichier «%s»?',
'Are you sure you want to delete plugin "%s"?': 'Êtes-vous sûr de vouloir supprimer le plugin "%s"?',
'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Are you sure you want to uninstall application "%s"?': "Êtes-vous sûr de vouloir désinstaller l'application «%s»?",
'Are you sure you want to upgrade web2py now?': 'Êtes-vous sûr de vouloir mettre à jour web2py maintenant?',
'arguments': 'arguments',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATTENTION: nécessite une connexion sécurisée (HTTPS) ou être en localhost. ',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATTENTION: les tests ne sont pas thread-safe DONC NE PAS EFFECTUER DES TESTS MULTIPLES SIMULTANÉMENT.',
'ATTENTION: you cannot edit the running application!': "ATTENTION: vous ne pouvez pas modifier l'application qui tourne!",
'Autocomplete Python Code': 'Autocomplete Python Code',
'Available databases and tables': 'Bases de données et tables disponible',
'back': 'retour',
'cache': 'cache',
'cache, errors and sessions cleaned': 'cache, erreurs et sessions nettoyés',
'can be a git repo': 'can be a git repo',
'Cannot be empty': 'Ne peut pas être vide',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Ne peut pas compiler: il y a des erreurs dans votre application. corriger les erreurs et essayez à nouveau.',
'Cannot compile: there are errors in your app:': 'Ne peut pas compiler: il y a des erreurs dans votre application:',
'cannot create file': 'ne peut pas créer de fichier',
'cannot upload file "%(filename)s"': 'ne peut pas charger le fichier "%(filename)s"',
'Change admin password': 'Changer le mot de passe admin',
'change editor settings': 'change editor settings',
'check all': 'tout vérifier ',
'Check for upgrades': 'Vérifier les mises à jour',
'Check to delete': 'Cocher pour supprimer',
'Checking for upgrades...': 'Vérification des mises à jour ... ',
'Clean': 'nettoyer',
'click to check for upgrades': 'Cliquez pour vérifier les mises jour',
'code': 'code',
'collapse/expand all': 'tout réduire/agrandir',
'Compile': 'compiler',
'compiled application removed': 'application compilée enlevée',
'Controllers': 'Contrôleurs',
'controllers': 'contrôleurs',
'Create': 'Créer',
'create file with filename:': 'créer un fichier avec nom de fichier:',
'create new application:': 'créer une nouvelle application:',
'Create new simple application': 'Créer une nouvelle application',
'Create/Upload': 'Create/Upload',
'created by': 'créé par',
'crontab': 'crontab',
'Current request': 'Requête actuelle',
'Current response': 'Réponse actuelle',
'Current session': 'Session en cours',
'currently running': 'tourne actuellement',
'currently saved or': 'actuellement enregistré ou',
'data uploaded': 'données chargées',
'database': 'base de données',
'database %s select': 'base de données %s sélectionner',
'database administration': 'administration base de données',
'Date and Time': 'Date et heure',
'db': 'bdd',
'Debug': 'Debug',
'defines tables': 'définit les tables',
'Delete': 'Supprimer',
'delete': 'supprimer',
'delete all checked': 'supprimer tout ce qui est coché',
'delete plugin': ' supprimer le plugin',
'Delete this file (you will be asked to confirm deletion)': 'Supprimer ce fichier (on vous demandera de confirmer la suppression)',
'Delete:': 'Supprimer:',
'Deploy': 'Déployer',
'Deploy on Google App Engine': 'Déployer sur Google App Engine',
'Deploy to OpenShift': 'Deploy to OpenShift',
'design': 'conception',
'direction: ltr': 'direction: ltr',
'Disable': 'Disable',
'docs': 'docs',
'done!': 'fait!',
'download layouts': 'télécharger layouts',
'Download layouts from repository': 'Download layouts from repository',
'download plugins': 'télécharger plugins',
'Download plugins from repository': 'Download plugins from repository',
'EDIT': 'MODIFIER',
'Edit': 'modifier',
'Edit application': "Modifier l'application",
'edit controller': 'modifier contrôleur',
'edit controller:': 'edit controller:',
'Edit current record': 'Modifier cette entrée',
'edit views:': 'modifier vues:',
'Editing %s': 'Editing %s',
'Editing file': 'Modifier le fichier',
'Editing file "%s"': 'Modifier le fichier "% s" ',
'Editing Language file': 'Modifier le fichier de langue',
'Enterprise Web Framework': 'Enterprise Web Framework',
'Error logs for "%(app)s"': 'Journal d\'erreurs pour "%(app)s"',
'Errors': 'erreurs',
'Exception instance attributes': "Attributs d'instance Exception",
'Exit Fullscreen': 'Exit Fullscreen',
'Expand Abbreviation (html files only)': 'Expand Abbreviation (html files only)',
'export as csv file': 'export au format CSV',
'exposes': 'expose',
'exposes:': 'expose:',
'extends': 'étend',
'failed to reload module': 'impossible de recharger le module',
'failed to reload module because:': 'impossible de recharger le module car:',
'file "%(filename)s" created': 'fichier "%(filename)s" créé',
'file "%(filename)s" deleted': 'fichier "%(filename)s" supprimé',
'file "%(filename)s" uploaded': 'fichier "%(filename)s" chargé',
'file "%s" of %s restored': 'fichier "%s" de %s restauré',
'file changed on disk': 'fichier modifié sur le disque',
'file does not exist': "fichier n'existe pas",
'file saved on %(time)s': 'fichier enregistré le %(time)s',
'file saved on %s': 'fichier enregistré le %s',
'filter': 'filtre',
'Find Next': 'Find Next',
'Find Previous': 'Find Previous',
'Functions with no doctests will result in [passed] tests.': 'Des fonctions sans doctests entraîneront des tests [passed] .',
'graph model': 'graph model',
'Help': 'aide',
'htmledit': 'edition html',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': "Si le rapport ci-dessus contient un numéro de ticket, cela indique une défaillance dans l'exécution du contrôleur, avant toute tentative d'exécuter les doctests. Cela est généralement dû à une erreur d'indentation ou une erreur à l'extérieur du code de la fonction.\r\nUn titre vert indique que tous les tests (si définis) sont passés. Dans ce cas, les résultats des essais ne sont pas affichées.",
'Import/Export': 'Importer/Exporter',
'includes': 'inclus',
'index': 'index',
'insert new': 'insérer nouveau',
'insert new %s': 'insérer nouveau %s',
'Install': 'Installer',
'Installed applications': 'Applications installées',
'internal error': 'erreur interne',
'Internal State': 'État Interne',
'Invalid action': 'Action non valide',
'invalid password': 'mot de passe invalide',
'Invalid Query': 'Requête non valide',
'invalid request': 'Demande incorrecte',
'invalid ticket': 'ticket non valide',
'Keyboard shortcuts': 'Keyboard shortcuts',
'language file "%(filename)s" created/updated': 'fichier de langue "%(filename)s" créé/mis à jour',
'Language files (static strings) updated': 'Fichiers de langue (chaînes statiques) mis à jour ',
'languages': 'langues',
'Languages': 'Langues',
'Last saved on:': 'Dernière sauvegarde le:',
'License for': 'Licence pour',
'loading...': 'Chargement ...',
'login': 'connexion',
'Login': 'Connexion',
'Login to the Administrative Interface': "Se connecter à l'interface d'administration",
'Logout': 'déconnexion',
'Manage': 'Manage',
'merge': 'fusionner',
'models': 'modèles',
'Models': 'Modèles',
'Modules': 'Modules',
'modules': 'modules',
'new application "%s" created': 'nouvelle application "%s" créée',
'New application wizard': 'Assistant nouvelle application',
'new plugin installed': 'nouveau plugin installé',
'New Record': 'Nouvelle Entrée',
'new record inserted': 'nouvelle entrée insérée',
'New simple application': 'Nouvelle application simple',
'next 100 rows': '100 lignes suivantes',
'NO': 'NON',
'No databases in this application': 'Aucune base de données dans cette application',
'no match': 'aucune correspondance',
'no package selected': 'no package selected',
'online designer': 'online designer',
'or alternatively': 'or alternatively',
'Or Get from URL:': 'Or Get from URL:',
'or import from csv file': 'ou importer depuis un fichier CSV ',
'or provide app url:': "ou fournir l'URL de l'app:",
'or provide application url:': "ou fournir l'URL de l'application:",
'Original/Translation': 'Original / Traduction',
'Overwrite installed app': "Écraser l'application installée",
'Pack all': 'tout empaqueter',
'Pack compiled': 'paquet compilé',
'Pack custom': 'Pack custom',
'pack plugin': 'paquet plugin',
'PAM authenticated user, cannot change password here': 'Utilisateur authentifié par PAM, vous ne pouvez pas changer le mot de passe ici',
'password changed': 'mot de passe modifié',
'Peeking at file': 'Jeter un oeil au fichier',
'plugin "%(plugin)s" deleted': 'plugin "%(plugin)s" supprimé',
'Plugin "%s" in application': 'Plugin "%s" dans l\'application',
'Plugins': 'Plugins',
'plugins': 'plugins',
'Plural-Forms:': 'Plural-Forms:',
'Powered by': 'Propulsé par',
'previous 100 rows': '100 lignes précédentes',
'private files': 'private files',
'Private files': 'Private files',
'Query:': 'Requête: ',
'Rapid Search': 'Rapid Search',
'record': 'entrée',
'record does not exist': "l'entrée n'existe pas",
'record id': 'id entrée',
'Reload routes': 'Reload routes',
'Remove compiled': 'retirer compilé',
'Replace': 'Replace',
'Replace All': 'Replace All',
'Resolve Conflict file': 'Résoudre les conflits de fichiers',
'restore': 'restaurer',
'revert': 'revenir',
'Rows in table': 'Lignes de la table',
'Rows selected': 'Lignes sélectionnées',
"Run tests in this file (to run all files, you may also use the button labelled 'test')": "Lancer les tests dans ce fichier (pour lancer tous les fichiers, vous pouvez également utiliser le bouton nommé 'test')",
'Running on %s': 'Running on %s',
'save': 'sauver',
'Save': 'Enregistrer',
'Save file:': 'Save file:',
'Save file: %s': 'Save file: %s',
'Save via Ajax': 'Save via Ajax',
'Saved file hash:': 'Hash du Fichier enregistré:',
'selected': 'sélectionnés',
'session expired': 'la session a expiré ',
'shell': 'shell',
'Site': 'Site',
'some files could not be removed': 'certains fichiers ne peuvent pas être supprimés',
'Start searching': 'Start searching',
'Start wizard': "Démarrer l'assistant",
'state': 'état',
'static': 'statiques',
'Static': 'Static',
'Static files': 'Fichiers statiques',
'submit': 'envoyer',
'Submit': 'Submit',
'Sure you want to delete this object?': 'Vous êtes sûr de vouloir supprimer cet objet? ',
'table': 'table',
'test': 'tester',
'Testing application': "Test de l'application",
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "requête" est une condition comme "db.table1.field1==\'value\'". Quelque chose comme "db.table1.field1==db.table2.field2" aboutit à un JOIN SQL.',
'The application logic, each URL path is mapped in one exposed function in the controller': "La logique de l'application, chaque chemin d'URL est mappé avec une fonction exposée dans le contrôleur",
'the application logic, each URL path is mapped in one exposed function in the controller': "la logique de l'application, chaque chemin d'URL est mappé dans une fonction exposée dans le contrôleur",
'The data representation, define database tables and sets': 'La représentation des données, définir les tables et ensembles de la base de données',
'the data representation, define database tables and sets': 'La représentation des données, définir les tables et ensembles de la base de données',
'The presentations layer, views are also known as templates': 'Les couches de présentation, les vues sont également appelées modples',
'the presentations layer, views are also known as templates': 'la couche de présentation, les vues sont également appelées modèles',
'There are no controllers': "Il n'y a pas de contrôleurs",
'There are no models': "Il n'y a pas de modèles",
'There are no modules': "Il n'y a pas de modules",
'There are no plugins': "Il n'y a pas de plugins",
'There are no private files': 'There are no private files',
'There are no static files': "Il n'y a pas de fichiers statiques",
'There are no translators, only default language is supported': "Il n'y a pas de traducteurs, seule la langue par défaut est prise en charge",
'There are no views': "Il n'y a pas de vues",
'These files are not served, they are only available from within your app': 'These files are not served, they are only available from within your app',
'These files are served without processing, your images go here': 'Ces fichiers sont renvoyés sans traitement, vos images viennent ici',
'these files are served without processing, your images go here': 'ces fichiers sont servis sans transformation, vos images vont ici',
'This is the %(filename)s template': 'Ceci est le modèle %(filename)s ',
'Ticket': 'Ticket',
'TM': 'MD',
'to previous version.': 'à la version précédente.',
'To create a plugin, name a file/folder plugin_[name]': 'Pour créer un plugin, créer un fichier /dossier plugin_[nom]',
'Toggle comment': 'Toggle comment',
'Toggle Fullscreen': 'Toggle Fullscreen',
'translation strings for the application': "chaînes de traduction de l'application",
'Translation strings for the application': "Chaînes de traduction pour l'application",
'try': 'essayer',
'try something like': 'essayez quelque chose comme',
'Try the mobile interface': 'Try the mobile interface',
'try view': 'try view',
'Unable to check for upgrades': 'Impossible de vérifier les mises à jour',
'unable to create application "%s"': 'impossible de créer l\'application "%s"',
'unable to delete file "%(filename)s"': 'impossible de supprimer le fichier "%(filename)s"',
'unable to delete file plugin "%(plugin)s"': 'impossible de supprimer le plugin "%(plugin)s"',
'Unable to download': 'Impossible de télécharger',
'Unable to download app': "Impossible de télécharger l'app",
'Unable to download app because:': "Impossible de télécharger l'app car:",
'Unable to download because': 'Impossible de télécharger car',
'unable to parse csv file': "impossible d'analyser les fichiers CSV",
'unable to uninstall "%s"': 'impossible de désinstaller "%s"',
'unable to upgrade because "%s"': 'impossible de mettre à jour car "%s"',
'uncheck all': 'tout décocher',
'Uninstall': 'désinstaller',
'update': 'mettre à jour',
'update all languages': 'mettre à jour toutes les langues',
'Update:': 'Mise à jour:',
'upgrade now': 'mettre à jour maintenant',
'upgrade web2py now': 'mettre à jour web2py maintenant',
'upload': 'charger',
'Upload': 'Upload',
'Upload & install packed application': "Charger & installer l'application empaquetée",
'Upload a package:': 'Charger un paquet:',
'Upload and install packed application': 'Upload and install packed application',
'upload application:': "charger l'application:",
'Upload existing application': 'Charger une application existante',
'upload file:': 'charger le fichier:',
'upload plugin file:': 'charger fichier plugin:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Utilisez (...)&(...) pour AND, (...)|(...) pour OR, et ~(...) pour NOT afin de construire des requêtes plus complexes. ',
'Use an url:': 'Utiliser une url:',
'user': 'utilisateur',
'variables': 'variables',
'Version': 'Version',
'versioning': 'versioning',
'Versioning': 'Versioning',
'view': 'vue',
'Views': 'Vues',
'views': 'vues',
'Web Framework': 'Framework Web',
'web2py is up to date': 'web2py est à jour',
'web2py Recent Tweets': 'Tweets récents sur web2py ',
'web2py upgraded; please restart it': 'web2py mis à jour; veuillez le redémarrer',
'YES': 'OUI',
}
|
olituks/sentinella
|
frontend/library/web2py/applications/admin/languages/fr.py
|
Python
|
lgpl-2.1
| 18,832
|
# Copyright 2018, Bastian Germann
# All rights reserved.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should also have received a copy of the GNU Lesser General Public
# License along with this library in the file named "LICENSE".
# If not, write to the Free Software Foundation, 51 Franklin Street,
# suite 500, Boston, MA 02110-1335, USA or visit their web page on the
# internet at http://www.fsf.org/licenses/lgpl.html.
import os
import shutil
import tempfile
from glob import glob
from subprocess import check_call
from sys import platform
from zipfile import ZipFile
try:
from urllib.request import urlretrieve
from urllib.error import HTTPError
except ImportError:
from urllib import urlretrieve
from urllib2 import HTTPError
def build(root, version):
origdir = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
urlretrieve('https://github.com/silnrsi/grcompiler/archive/' + version + '.zip', tmpdir + '/grc.zip')
os.chdir(tmpdir)
grcz = ZipFile('grc.zip')
grcz.extractall()
grcz.close()
os.chdir('grcompiler-' + version)
if platform == 'win32':
# possible improvement: check for 64 bit compiler and download a 64 bit version
urlretrieve('http://download.icu-project.org/files/icu4c/56.1/icu4c-56_1-Win32-msvc10.zip', 'icu.zip')
icuz = ZipFile('icu.zip')
icuz.extractall()
icuz.close()
# the make file expects an icu source package to find the headers
os.chdir('icu')
os.mkdir('source')
shutil.move('include', 'source/common')
os.chdir('../preprocessor')
check_call(['nmake', '-f', 'gdlpp.mak'])
os.chdir('..')
check_call(['nmake', '-f', 'makefile.mak'])
binaries = glob('release/*')
else:
check_call(['autoreconf', '-i'])
check_call(['./configure'])
check_call(['make'])
binaries = ['compiler/grcompiler', 'preprocessor/gdlpp']
except HTTPError:
if os.path.exists('grcompiler/' + platform):
binaries = glob('grcompiler/' + platform + '/*')
else:
binaries = []
dst = os.path.join(origdir, root, 'graide', 'grcompiler')
if binaries and not os.path.exists(dst):
os.mkdir(dst)
for f in binaries:
shutil.copy(f, dst)
os.chdir(origdir)
shutil.rmtree(tmpdir)
return {'graide' : ['grcompiler/*']}
|
silnrsi/graide
|
grcompiler/__init__.py
|
Python
|
lgpl-2.1
| 3,008
|
import json
from uuid import uuid1
from sqlalchemy import func
from sqlalchemy.exc import IntegrityError
from flask import Blueprint, request, jsonify, make_response
from bson.errors import InvalidId
from mhn import db
from mhn.api import errors
from mhn.api.models import (
Sensor, Rule, DeployScript as Script,
DeployScript, RuleSource)
from mhn.api.decorators import deploy_auth, sensor_auth, token_auth
from mhn.common.utils import error_response
from mhn.common.clio import Clio
from mhn.auth import current_user, login_required
api = Blueprint('api', __name__, url_prefix='/api')
# Endpoints for the Sensor resource.
@api.route('/sensor/', methods=['POST'])
@deploy_auth
def create_sensor():
missing = Sensor.check_required(request.json)
if missing:
return error_response(
errors.API_FIELDS_MISSING.format(missing), 400)
else:
sensor = Sensor(**request.json)
sensor.uuid = str(uuid1())
sensor.ip = request.remote_addr
Clio().authkey.new(**sensor.new_auth_dict()).post()
try:
db.session.add(sensor)
db.session.commit()
except IntegrityError:
return error_response(
errors.API_SENSOR_EXISTS.format(request.json['name']), 400)
else:
return jsonify(sensor.to_dict())
@api.route('/sensor/<uuid>/', methods=['PUT'])
def update_sensor(uuid):
sensor = Sensor.query.filter_by(uuid=uuid).first_or_404()
for field in request.json.keys():
if field in Sensor.editable_fields():
setattr(sensor, field, request.json[field])
elif field in Sensor.fields():
return error_response(
errors.API_FIELD_NOT_EDITABLE.format(field), 400)
else:
return error_response(
errors.API_FIELD_INVALID.format(field), 400)
else:
try:
db.session.commit()
except IntegrityError:
return error_response(
errors.API_SENSOR_EXISTS.format(request.json['name']), 400)
return jsonify(sensor.to_dict())
@api.route('/sensor/<uuid>/', methods=['DELETE'])
@login_required
def delete_sensor(uuid):
sensor = Sensor.query.filter_by(uuid=uuid).first_or_404()
Clio().authkey.delete(identifier=uuid)
db.session.delete(sensor)
db.session.commit()
return jsonify({})
@api.route('/sensor/<uuid>/connect/', methods=['POST'])
@sensor_auth
def connect_sensor(uuid):
sensor = Sensor.query.filter_by(uuid=uuid).first_or_404()
sensor.ip = request.remote_addr
db.session.commit()
return jsonify(sensor.to_dict())
# Utility functions that generalize the GET
# requests of resources from Mnemosyne.
def _get_one_resource(resource, res_id):
try:
res = resource.get(_id=res_id)
except InvalidId:
res = None
if not res:
return error_response(errors.API_RESOURCE_NOT_FOUND, 404)
else:
return jsonify(res.to_dict())
def _get_query_resource(resource, query):
options = {}
if 'limit' in query:
options['limit'] = int(query['limit'])
results = list(resource.get(options, **query))
return jsonify(
data=[r.to_dict() for r in results],
meta={
'size': len(results),
'query': query,
'options': options
}
)
# Now let's make use these methods in the views.
@api.route('/feed/<feed_id>/', methods=['GET'])
@token_auth
def get_feed(feed_id):
return _get_one_resource(Clio().hpfeed, feed_id)
@api.route('/session/<session_id>/', methods=['GET'])
@token_auth
def get_session(session_id):
return _get_one_resource(Clio().session, session_id)
@api.route('/feed/', methods=['GET'])
@token_auth
def get_feeds():
return _get_query_resource(Clio().hpfeed, request.args.to_dict())
@api.route('/session/', methods=['GET'])
@token_auth
def get_sessions():
return _get_query_resource(Clio().session, request.args.to_dict())
@api.route('/top_attackers/', methods=['GET'])
@token_auth
def top_attackers():
options = request.args.to_dict()
limit = int(options.get('limit', '1000'))
hours_ago = int(options.get('hours_ago', '4'))
for name in options.keys():
if name not in ('hours_ago', 'limit',):
del options[name]
results = Clio().session._tops(['source_ip', 'honeypot'], top=limit, hours_ago=hours_ago)
return jsonify(
data=results,
meta={
'size': len(results),
'query': 'top_attackers',
'options': options
}
)
@api.route('/rule/<rule_id>/', methods=['PUT'])
@token_auth
def update_rule(rule_id):
rule = Rule.query.filter_by(id=rule_id).first_or_404()
for field in request.json.keys():
if field in Rule.editable_fields():
setattr(rule, field, request.json[field])
elif field in Rule.fields():
return error_response(
errors.API_FIELD_NOT_EDITABLE.format(field), 400)
else:
return error_response(
errors.API_FIELD_INVALID.format(field), 400)
else:
db.session.commit()
return jsonify(rule.to_dict())
@api.route('/rule/', methods=['GET'])
@sensor_auth
def get_rules():
# Getting active rules.
if request.args.get('plaintext') in ['1', 'true']:
# Requested rendered rules in plaintext.
resp = make_response(Rule.renderall())
resp.headers['Content-Disposition'] = "attachment; filename=mhn.rules"
return resp
else:
# Responding with active rules.
rules = Rule.query.filter_by(is_active=True).\
group_by(Rule.sid).\
having(func.max(Rule.rev))
resp = make_response(json.dumps([ru.to_dict() for ru in rules]))
resp.headers['Content-Type'] = "application/json"
return resp
@api.route('/rulesources/', methods=['POST'])
@login_required
def create_rule_source():
missing = RuleSource.check_required(request.json)
if missing:
return error_response(
errors.API_FIELDS_MISSING.format(missing), 400)
else:
rsource = RuleSource(**request.json)
try:
db.session.add(rsource)
db.session.commit()
except IntegrityError:
return error_response(
errors.API_SOURCE_EXISTS.format(request.json['uri']), 400)
else:
return jsonify(rsource.to_dict())
@api.route('/rulesources/<rs_id>/', methods=['DELETE'])
@login_required
def delete_rule_source(rs_id):
source = RuleSource.query.filter_by(id=rs_id).first_or_404()
db.session.delete(source)
db.session.commit()
return jsonify({})
@api.route('/script/', methods=['POST'])
@login_required
def create_script():
missing = Script.check_required(request.json)
if missing:
return error_response(
errors.API_FIELDS_MISSING.format(missing), 400)
else:
script = Script(**request.json)
script.user = current_user
db.session.add(script)
db.session.commit()
return jsonify(script.to_dict())
@api.route('/script/', methods=['PUT', 'PATCH'])
@login_required
def update_script():
script = Script.query.get(request.json.get('id'))
script.user = current_user
for editable in Script.editable_fields():
if editable in request.json:
setattr(script, editable, request.json[editable])
db.session.add(script)
db.session.commit()
return jsonify(script.to_dict())
@api.route('/script/', methods=['GET'])
def get_script():
if request.args.get('script_id'):
script = DeployScript.query.get(request.args.get('script_id'))
else:
script = DeployScript.query.order_by(DeployScript.date.desc()).first()
if request.args.get('text') in ['1', 'true']:
resp = make_response(script.script)
resp.headers['Content-Disposition'] = "attachment; filename=deploy.sh"
return resp
else:
return jsonify(script.to_dict())
|
joyhuang-hack/mhn
|
server/mhn/api/views.py
|
Python
|
lgpl-2.1
| 8,093
|
#***************************************************************************
#* *
#* Copyright (c) 2015 - Victor Titov (DeepSOIC) *
#* <vv.titov@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="Lattice Resample object: changes the number of placements in an array, maintaining overall path. Aka interpolation."
__author__ = "DeepSOIC"
__url__ = ""
import math
import FreeCAD as App
import Part
from latticeCommon import *
import latticeBaseFeature
import latticeCompoundExplorer as LCE
import latticeInterpolatorUtil as LIU
import latticeExecuter
# -------------------------- document object --------------------------------------------------
def dotProduct(list1,list2):
sum = 0
for i in range(0,len(list1)):
sum += list1[i]*list2[i]
return sum
def makeLatticeResample(name):
'''makeLatticeResample(name): makes a LatticeResample object.'''
return latticeBaseFeature.makeLatticeFeature(name, LatticeResample, ViewProviderLatticeResample)
class LatticeResample(latticeBaseFeature.LatticeFeature):
"The Lattice Resample object"
def derivedInit(self,obj):
self.Type = "LatticeResample"
obj.addProperty("App::PropertyLink","Base","Lattice Resample","Lattice, the array of placements to be interpolated.")
obj.addProperty("App::PropertyEnumeration","TranslateMode","Lattice Resample","What to do with translation part of placements")
obj.TranslateMode = ['interpolate', 'reset']
obj.TranslateMode = 'interpolate'
obj.addProperty("App::PropertyEnumeration","OrientMode","Lattice Resample","what to do with orientation part of placements")
obj.OrientMode = ['interpolate', 'reset']
obj.OrientMode = 'interpolate'
obj.addProperty("App::PropertyFloat","NumberSamples","Lattice Resample","Number of placements to generate")
obj.NumberSamples = 51
def derivedExecute(self,obj):
# cache stuff
base = obj.Base.Shape
if not latticeBaseFeature.isObjectLattice(obj.Base):
latticeExecuter.warning(obj, "Base is not a lattice, but lattice is expected. Results may be unexpected.\n")
input = [leaf.Placement for leaf in LCE.AllLeaves(base)]
if len(input) < 2:
raise ValueError("At least 2 placements ar needed to interpolate; there are just "+str(len(input))+" in base array.")
if obj.NumberSamples < 2:
raise ValueError("Can output no less than 2 samples; "+str(obj.NumberSamples)+" was requested.")
#cache mode comparisons, for speed
posIsInterpolate = obj.TranslateMode == 'interpolate'
posIsReset = obj.TranslateMode == 'reset'
oriIsInterpolate = obj.OrientMode == 'interpolate'
oriIsReset = obj.OrientMode == 'reset'
# construct interpolation functions
# prepare lists of input samples
IArray = [float(i) for i in range(0,len(input))]
XArray = [plm.Base.x for plm in input]
YArray = [plm.Base.y for plm in input]
ZArray = [plm.Base.z for plm in input]
QArrays = [[],[],[],[]]
prevQ = [0.0]*4
for plm in input:
Q = plm.Rotation.Q
#test if quaernion has changed sign compared to previous one.
# Quaternions of opposite sign are equivalent in terms of rotation,
# but sign changes confuse interpolation, so we are detecting sign
# changes and discarding them
if dotProduct(Q,prevQ) < -ParaConfusion:
Q = [-v for v in Q]
for iQ in [0,1,2,3]:
QArrays[iQ].append( Q[iQ] )
prevQ = Q
# constuct function objects
if posIsInterpolate:
FX = LIU.InterpolateF(IArray,XArray)
FY = LIU.InterpolateF(IArray,YArray)
FZ = LIU.InterpolateF(IArray,ZArray)
if oriIsInterpolate:
FQs = []
for iQ in [0,1,2,3]:
FQs.append(LIU.InterpolateF(IArray,QArrays[iQ]))
# initialize output containers and loop variables
outputPlms = [] #list of placements
for i_output in range(0,math.trunc(obj.NumberSamples+ParaConfusion)):
i_input = float(i_output) / (obj.NumberSamples-1) * (len(input)-1)
pos = App.Vector()
ori = App.Rotation()
if posIsInterpolate:
pos = App.Vector(FX.value(i_input), FY.value(i_input), FZ.value(i_input))
if oriIsInterpolate:
ori = App.Rotation(FQs[0].value(i_input),
FQs[1].value(i_input),
FQs[2].value(i_input),
FQs[3].value(i_input))
plm = App.Placement(pos, ori)
outputPlms.append(plm)
return outputPlms
class ViewProviderLatticeResample(latticeBaseFeature.ViewProviderLatticeFeature):
def getIcon(self):
return getIconPath('Lattice_Resample.svg')
def claimChildren(self):
return [self.Object.Base]
# -------------------------- /document object --------------------------------------------------
# -------------------------- Gui command --------------------------------------------------
def CreateLatticeResample(name):
sel = FreeCADGui.Selection.getSelectionEx()
FreeCAD.ActiveDocument.openTransaction("Create LatticeResample")
FreeCADGui.addModule("latticeResample")
FreeCADGui.addModule("latticeExecuter")
FreeCADGui.doCommand("f = latticeResample.makeLatticeResample(name='"+name+"')")
FreeCADGui.doCommand("f.Base = App.ActiveDocument."+sel[0].ObjectName)
FreeCADGui.doCommand("for child in f.ViewObject.Proxy.claimChildren():\n"+
" child.ViewObject.hide()")
FreeCADGui.doCommand("latticeExecuter.executeFeature(f)")
FreeCADGui.doCommand("f = None")
FreeCAD.ActiveDocument.commitTransaction()
class _CommandLatticeResample:
"Command to create LatticeResample feature"
def GetResources(self):
return {'Pixmap' : getIconPath("Lattice_Resample.svg"),
'MenuText': QtCore.QT_TRANSLATE_NOOP("Lattice_Resample","Resample Array"),
'Accel': "",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Lattice_Resample","Lattice Resample: interpolate placement-path using 3-rd degree b-spline interpolation.")}
def Activated(self):
if len(FreeCADGui.Selection.getSelection()) == 1 :
CreateLatticeResample(name = "Resample")
else:
mb = QtGui.QMessageBox()
mb.setIcon(mb.Icon.Warning)
mb.setText(translate("Lattice_Resample", "Please select one object, first. The object must be a lattice object (array of placements).", None))
mb.setWindowTitle(translate("Lattice_Resample","Bad selection", None))
mb.exec_()
def IsActive(self):
if FreeCAD.ActiveDocument:
return True
else:
return False
FreeCADGui.addCommand('Lattice_Resample', _CommandLatticeResample())
exportedCommands = ['Lattice_Resample']
# -------------------------- /Gui command --------------------------------------------------
|
DeepSOIC/Lattice
|
latticeResample.py
|
Python
|
lgpl-2.1
| 8,898
|
#! /usr/bin/env python
#
# GUI module generated by PAGE version 4.9
# In conjunction with Tcl version 8.6
# Dec 22, 2017 02:19:10 PM
import sys
from tkinter import messagebox
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = 0
except ImportError:
import tkinter.ttk as ttk
py3 = 1
class BINARIA:
def __init__(self, top):
self.top = Toplevel(top)
self.top.focus()
self.top.transient()
self.top.grab_set()
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.', background=_bgcolor)
self.style.configure('.', foreground=_fgcolor)
self.style.configure('.', font="TkDefaultFont")
self.style.map('.', background=
[('selected', _compcolor), ('active', _ana2color)])
self.top.geometry("1080x720+180+10")
self.top.title("Árvore Binária")
self.top.configure(background="#d9d9d9")
self.frPrincipal = Frame(self.top)
self.frPrincipal.place(relx=0.00, rely=0.00, relheight=1 , relwidth=1)
self.frPrincipal.configure(relief=GROOVE,background="#d9d9d9")
self.lblInserir = Label(self.frPrincipal)
self.lblInserir.place(relx=0.0, rely=0.02 )
self.lblInserir.configure(text='''Inserir: ''' , background="#d9d9d9")
self.txtInserir = Entry(self.frPrincipal)
self.txtInserir.place(relx=0.055, rely=0.02, relheight=0.03, relwidth=0.05)
self.txtInserir.bind('<Return>', lambda e: self.inserir())
self.lblRemover = Label(self.frPrincipal)
self.lblRemover.place(relx=0.00, rely=0.06 )
self.lblRemover.configure(text='''Remover: ''' , background="#d9d9d9")
self.txtRemover = Entry(self.frPrincipal)
self.txtRemover.place(relx=0.055, rely=0.06, relheight=0.03, relwidth=0.05)
self.txtRemover.bind('<Return>', lambda e: self.remover())
self.lblPesquisar = Label(self.frPrincipal)
self.lblPesquisar.place(relx=0.0, rely=0.097 )
self.lblPesquisar.configure(text='''Pesquisar: ''' , background="#d9d9d9")
self.txtPesquisar = Entry(self.frPrincipal)
self.txtPesquisar.place(relx=0.055, rely=0.1, relheight=0.03, relwidth=0.05)
self.txtPesquisar.bind('<Return>', lambda e: self.pesquisar())
def inserir(self):
messagebox.showinfo("Inserir", "Inserindo ...")
self.lblPesquisar1 = Label(self.frPrincipal)
self.lblPesquisar1.place(relx=0.5, rely=0.0 )
self.lblPesquisar1.configure(text='''2''' , background="#d9d9d9")
self.lblPesquisar2 = Label(self.frPrincipal)
self.lblPesquisar2.place(relx=0.48, rely=0.02 )
self.lblPesquisar2.configure(text='''1''' , background="#d9d9d9")
self.lblPesquisar3 = Label(self.frPrincipal)
self.lblPesquisar3.place(relx=0.52, rely=0.02 )
self.lblPesquisar3.configure(text='''3''' , background="#d9d9d9")
def remover(self):
messagebox.showinfo("Remover", "Removendo ...")
def pesquisar(self):
messagebox.showinfo("Pesquisar", "Pesquisando ...")
if __name__ == '__main__':
root = Tk()
top = BINARIA(root)
root.mainloop()
|
sergillam/Fucapi
|
simulador/binaria/binaria.py
|
Python
|
lgpl-3.0
| 3,776
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import numpy as np
from psi4 import core
from psi4.driver import p4util
from psi4.driver.p4util.exceptions import *
def ah_iteration(mcscf_obj, tol=1e-3, max_iter=15, lindep=1e-14, print_micro=True):
"""
Solve the generalized eigenvalue problem:
| 0, g.T | | 1/l | = | 1/l |
| g, H/l | | X | = e | X |
Where g is the gradient, H is the orbital Hessian, X is our orbital update step,
and l is the eigenvalue.
In some ways this is the subspace reduction of the full MCSCF Hessian where the
CC part has been solved exactly. When this occurs the OC and CO elements collapse
to the above and the CC Hessian becomes diagonally dominant.
We can solve this through Davidson iterations where we condition the edges. It's the
Pulay equations all over again, just iterative.
Watch out for lambdas that are zero. Looking for the lambda that is ~1.
"""
# Unpack information
orb_grad = mcscf_obj.gradient()
precon = mcscf_obj.H_approx_diag()
approx_step = mcscf_obj.approx_solve()
orb_grad_ssq = orb_grad.sum_of_squares()
# Gears
min_lambda = 0.3
converged = False
warning_neg = False
warning_mult = False
fullG = np.zeros((max_iter + 2, max_iter + 2))
fullS = np.zeros((max_iter + 2, max_iter + 2))
fullS[np.diag_indices_from(fullS)] = 1
guesses = []
sigma_list = []
guesses.append(approx_step)
sigma_list.append(mcscf_obj.compute_Hk(approx_step))
if print_micro:
core.print_out("\n Eigenvalue Rel dE dX \n")
# Run Davidson look for lambda ~ 1
old_val = 0
for microi in range(1, max_iter + 1):
# Gradient
fullG[0, microi] = guesses[-1].vector_dot(orb_grad)
for i in range(microi):
fullG[i + 1, microi] = guesses[-1].vector_dot(sigma_list[i])
fullS[i + 1, microi] = guesses[-1].vector_dot(guesses[i])
fullG[microi] = fullG[:, microi]
fullS[microi] = fullS[:, microi]
wlast = old_val
# Slice out relevant S and G
S = fullS[:microi + 1, :microi + 1]
G = fullG[:microi + 1, :microi + 1]
# Solve Gv = lSv
v, L = np.linalg.eigh(S)
mask = v > (np.min(np.abs(v)) * 1.e-10)
invL = L[:, mask] * (v[mask]**-0.5)
# Solve in S basis, rotate back
evals, evecs = np.linalg.eigh(np.dot(invL.T, G).dot(invL))
vectors = np.dot(invL, evecs)
# Figure out the right root to follow
if np.sum(np.abs(vectors[0]) > min_lambda) == 0:
raise PsiException("Augmented Hessian: Could not find the correct root!\n"
"Try starting AH when the MCSCF wavefunction is more converged.")
if np.sum(np.abs(vectors[0]) > min_lambda) > 1 and not warning_mult:
core.print_out(r" Warning! Multiple eigenvectors found to follow. Following closest to \lambda = 1.\n")
warning_mult = True
idx = (np.abs(1 - np.abs(vectors[0]))).argmin()
lam = abs(vectors[0, idx])
subspace_vec = vectors[1:, idx]
# Negative roots should go away?
if idx > 0 and evals[idx] < -5.0e-6 and not warning_neg:
core.print_out(' Warning! AH might follow negative eigenvalues!\n')
warning_neg = True
diff_val = evals[idx] - old_val
old_val = evals[idx]
new_guess = guesses[0].clone()
new_guess.zero()
for num, c in enumerate(subspace_vec / lam):
new_guess.axpy(c, guesses[num])
# Build estimated sigma vector
new_dx = sigma_list[0].clone()
new_dx.zero()
for num, c in enumerate(subspace_vec):
new_dx.axpy(c, sigma_list[num])
# Consider restraints
new_dx.axpy(lam, orb_grad)
new_dx.axpy(old_val * lam, new_guess)
norm_dx = (new_dx.sum_of_squares() / orb_grad_ssq)**0.5
if print_micro:
core.print_out(" AH microiter %2d % 18.12e % 6.4e % 6.4e\n" % (microi, evals[idx],
diff_val / evals[idx], norm_dx))
if abs(old_val - wlast) < tol and norm_dx < (tol**0.5):
converged = True
break
# Apply preconditioner
tmp = precon.clone()
val = tmp.clone()
val.set(evals[idx])
tmp.subtract(val)
new_dx.apply_denominator(tmp)
guesses.append(new_dx)
sigma_list.append(mcscf_obj.compute_Hk(new_dx))
if print_micro and converged:
core.print_out("\n")
# core.print_out(" AH converged! \n\n")
#if not converged:
# core.print_out(" !Warning. Augmented Hessian did not converge.\n")
new_guess.scale(-1.0)
return converged, microi, new_guess
|
psi4/psi4
|
psi4/driver/procrouting/mcscf/augmented_hessian.py
|
Python
|
lgpl-3.0
| 5,801
|
from superdesk.notification import push_notification
from superdesk.resource import Resource
from superdesk.services import BaseService
from superdesk.utc import utcnow
from liveblog.common import get_user, update_dates_for
blogs_schema = {
'title': {
'type': 'string',
'required': True,
},
'description': {
'type': 'string'
},
'language': {
'type': 'string'
},
'settings': {
'type': 'dict'
},
'original_creator': Resource.rel('users', True),
'version_creator': Resource.rel('users', True),
'state': {
'type': 'string',
'allowed': ['open', 'closed'],
'default': 'open'
}
}
class BlogsResource(Resource):
schema = blogs_schema
datasource = {
'default_sort': [('_updated', -1)]
}
class BlogService(BaseService):
def on_create(self, docs):
for doc in docs:
update_dates_for(doc)
doc['original_creator'] = get_user()
def on_created(self, docs):
push_notification('blogs', created=1)
def on_update(self, updates, original):
user = get_user()
updates['versioncreated'] = utcnow()
updates['version_creator'] = str(user.get('_id'))
def on_updated(self, updates, original):
push_notification('blogs', updated=1)
def on_deleted(self, doc):
push_notification('blogs', deleted=1)
|
nistormihai/liveblog-server
|
liveblog/blogs/blogs.py
|
Python
|
lgpl-3.0
| 1,412
|
"""patchwatcher2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from patchwork import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='text'),
url(r'^data$', views.data, name='data'),
url(r'^updategroup$', views.updategroup, name='updategroup'),
url(r'^updatetestcase$', views.updatetestcase, name='updatetestcase'),
url(r'^updatetestby$', views.updatetestby, name='updatetestby'),
url(r'^updatestate$', views.updatestate, name='updatestate'),
url(r'^updatecomment$', views.updatecomment, name='updatecomment'),
url(r'^updatepushed$', views.updatepushed, name='updatepushed'),
url(r'^updatetestplan$', views.updatetestplan, name='updatetestplan'),
url(r'^updatefeature$', views.updatefeature, name='updatefeature'),
url(r'^updatebuglink$', views.updatebuglink, name='updatebuglink'),
url(r'^patchfile/([0-9a-f]{32})$', views.creatpatchfile, name='creatpatchfile'),
url(r'^showallmd5lable$', views.showallmd5lable, name='showallmd5lable'),
]
|
LuyaoHuang/patchwatcher
|
patchwatcher2/patchwatcher2/urls.py
|
Python
|
lgpl-3.0
| 1,683
|
from django.contrib import admin
from .models import Customer, Document
admin.site.register(Customer)
admin.site.register(Document)
# Register your models here.
|
justinwiley/bluecard
|
idimport/admin.py
|
Python
|
lgpl-3.0
| 165
|
#!/usr/bin/evn python
# run failed cmd according to .dayu/failedcmd/ record
import os
import sys
import commands
import socket,fcntl,struct
def getIpAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
if __name__ == "__main__":
ifname = ["bond0","em1","eth0"]
for inte in ifname:
try:
ip = getIpAddr(inte)
except IOError,msg:
continue
hostname = socket.gethostname()
#print hostname
dir1 = "/mnt/dayu/.dayu/failedcmd/" + hostname + "/"
dir2 = "/mnt/dayu/.dayu/failedcmd/" + ip + "/"
#print dir1,dir2
if os.path.isdir(dir1):
dir = dir1
elif os.path.isdir(dir2):
dir = dir2
else:
sys.exit(1)
lcmd = 'ls ' + dir + '|grep -v .error'
output = commands.getoutput(lcmd)
output = output.split()
#print output
print len(output)
while len(output) > 0:
print len(output)
name = min(output)
file = dir + name
output.remove(name)
try:
f = open(file,'r')
cmd = f.read()
f.close
if cmd.split() == []:
dcmd = 'rm -f ' + file
os.system(dcmd)
continue
f = open(file,'w')
status,info = commands.getstatusoutput(cmd)
if status != 0:
f.write(cmd)
f.close()
ename = file + ".error"
ef = open(ename,'w')
ef.write(info)
ef.close()
else:
os.remove(file)
except:
raise
|
kk47/Scripts
|
runfailcmd.py
|
Python
|
lgpl-3.0
| 1,807
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Clément Schreiner
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import datetime
try:
import feedparser
except ImportError:
raise ImportError('Please install python3-feedparser')
__all__ = ['Entry', 'Newsfeed']
class Entry(object):
def __init__(self, entry, rssid_func=None):
if hasattr(entry, 'id'):
self.id = entry.id
else:
self.id = None
if "link" in entry:
self.link = entry["link"]
if not self.id:
self.id = entry["link"]
else:
self.link = None
if "title" in entry:
self.title = entry["title"]
else:
self.title = None
if "author" in entry:
self.author = entry["author"]
else:
self.author = None
if "updated_parsed" in entry:
self.datetime = datetime.datetime(*entry['updated_parsed'][:7])
elif "published_parsed" in entry:
self.datetime = datetime.datetime(*entry['published_parsed'][:7])
else:
self.datetime = None
if "summary" in entry:
self.summary = entry["summary"]
else:
self.summary = None
self.content = []
if "content" in entry:
for i in entry["content"]:
self.content.append(i.value)
elif self.summary:
self.content.append(self.summary)
if "wfw_commentrss" in entry:
self.rsscomment = entry["wfw_commentrss"]
else:
self.rsscomment = None
if rssid_func:
self.id = rssid_func(self)
class Newsfeed(object):
def __init__(self, url, rssid_func=None):
self.feed = feedparser.parse(url)
self.rssid_func = rssid_func
def iter_entries(self):
for entry in self.feed['entries']:
yield Entry(entry, self.rssid_func)
def get_entry(self, id):
for entry in self.iter_entries():
if entry.id == id:
return entry
|
laurentb/weboob
|
weboob/tools/newsfeed.py
|
Python
|
lgpl-3.0
| 2,710
|
from __future__ import with_statement
from logging.config import fileConfig
from alembic import context
from sqlalchemy import engine_from_config, pool
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
from sqlalchemy.orm import configure_mappers
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
from niimanga import models
# ensure all our schema-object-creating events run!
configure_mappers()
# target_metadata = mymodel.Base.metadata
target_metadata = models.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
suryakencana/niimanga
|
migrations/env.py
|
Python
|
lgpl-3.0
| 2,152
|
#!/usr/bin/env python
# encoding: utf-8
"""
web2serial.py
Proxy from web to serial and vice versa, to flash devices and other fun things.
https://github.com/Metalab/web2serial
Developed in cooperation of
Hackerspaceshop (hackerspaceshop.com)
Bits Working (bitsworking.com)
Community at Metalab Hackerspace Vienna (metalab.at)
License
LGPLv3 (see `LICENSE`)
"""
__author__ = "Chris Hager"
__email__ = "chris@bitsworking.com"
__version__ = "0.3.1"
import sys
import os
from optparse import OptionParser
from time import sleep, time
import logging
import json
import threading
import hashlib
import traceback
import tornado.escape
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
from tornado.options import define, options
import serial
import serial.tools.list_ports
# Port for the web interface
PORT_WEB = 54321
SERIAL_SEND_TIMEOUT = 0.001 # Todo: better name (sleep() after sending a message)
SERIAL_READWRITE_TIMEOUT = 0
CLOSE_IF_NO_PING_SINCE = 5 # Seconds
# Length of the device id hash
DEVICE_ID_HASH_LENGTH = 8
# Cache for last received ping (global - does not associate session with pings)
last_ping = None
checker = None
class ConnectionChecker(threading.Thread):
"""
Checks if connections are still alive. If not, remove them.
"""
def __init__(self):
super(ConnectionChecker, self).__init__()
self.alive = True
self.connections = {
# 'hash': web2SerialSocket
}
def run(self):
while self.alive:
now = time()
for connection in self.connections.keys():
if now - self.connections[connection].ping_last_timestamp > CLOSE_IF_NO_PING_SINCE:
# Cleanup time
logging.warn("- force removing connection %s due to no ping", self.connections[connection])
# Try to close
try:
self.connections[connection].close()
except:
logging.warn(traceback.format_exc())
# Remove from list
del self.connections[self.connections[connection].device_hash]
sleep(1)
logging.info("connectionchecker over and out")
def stop(self):
logging.info("stop sent")
self.alive = False
def addConnection(self, websocketHandler):
self.connections[websocketHandler.device_hash] = websocketHandler
def removeConnection(self, websocketHandler):
if websocketHandler.device_hash in self.connections:
del self.connections[websocketHandler.device_hash]
# Tornado Web Application Description
class Application(tornado.web.Application):
def __init__(self):
# URLs
handlers = [
(r"/", MainHandler),
(r"/ping", PingHandler),
(r"/devices", DevicesHandler),
(r"/device/([^/]+)/baudrate/([^/]+)", SerSocketHandler),
]
# Settings
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
)
# Init Web Application
tornado.web.Application.__init__(self, handlers, **settings)
# Tools
def get_com_ports():
""" Returns the available com ports with hash """
iterator = sorted(serial.tools.list_ports.comports())
return [(
hashlib.sha256(deviceid).hexdigest()[:DEVICE_ID_HASH_LENGTH],
deviceid, desc, hwid
) for deviceid, desc, hwid in iterator]
def open_serial_device_by_hash(hash, baudrate):
""" Opens a serial device and returns the serial.Serial(...) connection """
logging.info("open serial device by hash: %s" % hash)
for _hash, _deviceid, _desc, _hwid in get_com_ports():
if _hash == hash:
logging.info("serial device found for hash: %s" % _deviceid)
ser = serial.Serial(_deviceid, int(baudrate),
timeout=SERIAL_READWRITE_TIMEOUT,
writeTimeout=SERIAL_READWRITE_TIMEOUT)
return ser
raise LookupError("serial device not found for hash '%s'" % hash)
# Handlers
class SharedRequestHandler(tornado.web.RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header('Access-Control-Allow-Methods', "GET, POST, OPTIONS")
self.set_header('Access-Control-Allow-Headers', "X-Requested-With")
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html", devices=get_com_ports())
class PingHandler(SharedRequestHandler):
def get(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("pong")
class DevicesHandler(SharedRequestHandler):
def get(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write(json.dumps(get_com_ports()))
class SerSocketHandler(tornado.websocket.WebSocketHandler):
"""
Handler for both the websocket and the serial connection.
"""
alive = True
ser = None
device_hash = None
ping_last_timestamp = 0 # timestamp (ms epoch) of last ping from js client
def check_origin(self, origin):
return True
def open(self, hash, baudrate):
"""
Websocket initiated a connection. Open serial device with baudrate and start reader thread.
"""
global checker
logging.info("Opening serial socket (hash=%s, baudrate=%s)" % (hash, baudrate))
self.device_hash = hash
self.ping_last_timestamp = time()
# Check if serial device is already opened
if hash in checker.connections:
err = "Device '%s' already opened" % hash
logging.error(err)
self.write_message(json.dumps({ "error": str(err) }))
self.close()
return
try:
self.ser = open_serial_device_by_hash(hash, baudrate)
checker.addConnection(self)
logging.info("Serial device successfullyh opened (hash=%s, baudrate=%s)" % (hash, baudrate))
except:
err = traceback.format_exc()
err_short = sys.exc_info()[0]
logging.error(err)
message_for_websocket = { "error": err_short }
self.write_message(json.dumps(message_for_websocket))
self.close()
return
# Start the thread which reads for serial input
self.alive = True
self.thread_read = threading.Thread(target=self.reader)
self.thread_read.setDaemon(True)
self.thread_read.setName('serial->socket')
self.thread_read.start()
def on_message(self, message):
"""
JSON message from the websocket is unpacked, and the byte message sent to the serial connection.
"""
logging.info("msg from websocket: %s (len=%s)" % (repr(message), len(message)))
# Unpack
j = json.loads(message)
if "cmd" in j:
if j["cmd"] == "ping":
self.ping_last_timestamp = time()
if "msg" in j:
data = bytearray(j["msg"], "raw_unicode_escape");
logging.info("web -> serial: %s (len=%s)" % (repr(data), len(data)))
# Send data to serial
try:
self.ser.write(data)
sleep(SERIAL_SEND_TIMEOUT)
except:
# probably got disconnected
err = traceback.format_exc()
err_short = sys.exc_info()[0]
logging.error(err)
message_for_websocket = { "error": err_short }
self.write_message(json.dumps(message_for_websocket))
self.close()
raise
def on_close(self):
""" Close serial and quit reader thread """
logging.info("Closing serial connection...")
self.alive = False
if self.ser is not None:
self.ser.close()
global connections
checker.removeConnection(self)
logging.info("Serial closed, waiting for reader thread to quit...")
self.thread_read.join()
logging.info("Serial closed, reader thread quit.")
def reader(self):
"""
Thread which reads on the serial connection. If data is received, forwards
it to websocket.
"""
logging.debug('reader thread started')
while self.alive:
try:
data = self.ser.read(1) # read one, blocking
n = self.ser.inWaiting() # look if there is more
if n:
data = data + self.ser.read(n) # and get as much as possible
if data:
# escape outgoing data when needed (Telnet IAC (0xff) character)
# data = serial.to_bytes(self.rfc2217.escape(data))
message = { "msg": data }
logging.info("message from serial to websocket: %s (len=%s)" % (repr(message), len(message)))
self.write_message(json.dumps(message, encoding="raw_unicode_escape"))
# except serial.SerialException, e:
# except TypeError, e:
except:
# probably got disconnected
err = traceback.format_exc()
err_short = sys.exc_info()[0]
logging.warn(err)
message_for_websocket = { "error": err_short }
# Try to send error to client js
try:
self.write_message(json.dumps(message_for_websocket))
except:
pass
# Try to close websocket connection
try:
self.close()
except:
logging.warn(traceback.format_exc())
break
self.alive = False
logging.debug('reader thread terminated')
# Get web2serial-core up and running
def start(port):
# Have tornado parse command line arguments
tornado.options.parse_command_line()
# Initial output
logging.info("web2serial.py v%s" % __version__)
logging.info("Com ports: %s" % get_com_ports())
logging.info("Listening on http://0.0.0.0:%s" % port)
global checker
checker = ConnectionChecker()
checker.start()
# Start of tornado web application, and ioloop blocking method
app = Application()
app.listen(port)
try:
tornado.ioloop.IOLoop.instance().start()
except:
logging.warn(traceback.format_exc())
checker.stop()
logging.info("bye")
# If run from command line:
if __name__ == '__main__':
usage = """usage: %prog [options] arg
Example: '%prog abc' or '%prog xyz'"""
version = "%prog " + __version__
parser = OptionParser(usage=usage, version=version)
#parser.add_option("-v", "--verbose", default=False,
# action="store_true", dest="verbose")
parser.add_option("-p", "--port", default=PORT_WEB, dest="port")
(options, args) = parser.parse_args()
start(options.port)
|
Metalab/web2serial
|
software/web2serial-core/web2serial.py
|
Python
|
lgpl-3.0
| 11,200
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 Dana Christen
#
# This file is part of talking-sockets, a tool for connection routing.
#
# talking-sockets is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import pprint
SOURCE_ROOT = os.path.join(os.path.dirname(__file__), '..', 'talking_sockets')
DOC_ROOT = os.path.join(os.path.dirname(__file__), 'code_doc')
#DOC_ROOT = os.path.join('/tmp', 'backend')
SPHINX_FILE_PATTERN = """
{}
=================================================
.. toctree::
:maxdepth: 10
{}
.. automodule:: {}
:members:
:undoc-members:
:show-inheritance:
"""
def find_source_files():
hierarchy = dict()
for root, dirs, files in os.walk(SOURCE_ROOT):
if "__pycache__" in root:
continue
folder = os.path.relpath(root, os.path.join(SOURCE_ROOT, '..'))
package = folder.replace(os.sep, '.')
modules = [f.split('.')[0] for f in files if f[-2:] == 'py' and "__" not in f and f[0] != '_']
if "__init__.py" not in files:
continue
hierarchy[package] = modules
return hierarchy
def create_rst_files():
package_list = find_source_files()
file_list = list()
for package in package_list:
file_list.append(package + ".rst")
for sub_module in package_list[package]:
file_list.append(".".join([package, sub_module]) + ".rst")
sub_package_list = dict()
for f in file_list:
package_name = f[:-4]
sub_package_list[package_name] = list()
for f2 in file_list:
if not package_name in f2:
continue
sub_package_name = f2.replace(package_name, "")[:-4]
if len(sub_package_name.split(".")) == 2:
sub_package_list[package_name].append(f2[:-4])
for f in file_list:
with open(os.path.join(DOC_ROOT, f), 'w') as f_hdl:
package_name = f[:-4]
sub_packages = '\n '.join(sub_package_list[package_name])
f_hdl.write(SPHINX_FILE_PATTERN.format(package_name, sub_packages, package_name))
if __name__ == "__main__":
create_rst_files()
|
danac/talking-sockets
|
doc/generate_code_doc.py
|
Python
|
lgpl-3.0
| 2,817
|
from contracts import ContractsMeta
from .constants import FIXED, VARIABLE, DEFINED_AT_RUNTIME
from .exceptions import BlockWriterError, ModelWriterError
class BlockConfig(object):
def __init__(self, variable, has_default, default, desc, desc_rest, dtype,
where=None):
self.variable = variable
self.has_default = has_default
self.default = default
self.desc = desc
self.desc_rest = desc_rest
self.where = where
self.dtype = dtype
def __repr__(self):
return ('BlockConfig(%r,%r,%r,%r,%r)' %
(self.variable, self.has_default, self.default,
self.desc, self.desc_rest))
class BlockInput(object):
def __init__(self, type, name, min, max, # @ReservedAssignment
desc, desc_rest, where, dtype):
self.type = type # FIXED, VARIABLE, DEFINED_AT_RUNTIME
self.name = name
self.min = min
self.max = max
self.desc = desc
self.desc_rest = desc_rest
self.where = where
self.dtype = dtype
# TODO: add repr
class BlockOutput(object):
def __init__(self, type, name, # @ReservedAssignment
desc, desc_rest, where, dtype):
self.type = type # FIXED, VARIABLE, DEFINED_AT_RUNTIME
self.name = name
self.desc = desc
self.desc_rest = desc_rest
self.where = where
self.dtype = dtype
def __repr__(self):
return "BlockOutput(%s,%s)" % (self.type, self.name)
def block_alias(name):
assert isinstance(name, str)
BlockMeta.aliases.append(name)
def block_config(name, description=None, default='not-given', dtype=None):
assert isinstance(name, str)
assert description is None or isinstance(description, str)
desc, desc_rest = split_docstring(description)
has_default = default != 'not-given'
if [x for x in BlockMeta.tmp_config if x.variable == name]:
cleanup()
msg = 'Already described config variable %r.' % name
raise BlockWriterError(msg)
BlockMeta.tmp_config.append(BlockConfig(name, has_default, default,
desc, desc_rest, dtype=dtype, where=None))
def block_input(name, description=None, dtype=None):
assert isinstance(name, str)
assert description is None or isinstance(description, str)
desc, desc_rest = split_docstring(description)
if [x for x in BlockMeta.tmp_input if x.name == name]:
cleanup()
msg = 'Already described input variable "%s".' % name
raise BlockWriterError(msg)
if BlockMeta.tmp_input and BlockMeta.tmp_input[-1].type == VARIABLE:
cleanup()
msg = 'Cannot mix variable and fixed input.'
raise BlockWriterError(msg)
BlockMeta.tmp_input.append(BlockInput(FIXED, name, None, None,
desc, desc_rest, None, dtype=None))
def block_input_is_variable(description=None,
min=None, max=None): # @ReservedAssignment
assert description is None or isinstance(description, str)
desc, desc_rest = split_docstring(description)
if BlockMeta.tmp_input:
cleanup()
msg = 'Cannot mix variable and fixed input or variable with variable.'
raise BlockWriterError(msg)
BlockMeta.tmp_input.append(BlockInput(VARIABLE, None, min, max,
desc, desc_rest, None, dtype=None))
def block_output(name, description=None, dtype=None):
assert isinstance(name, str)
assert description is None or isinstance(description, str)
desc, desc_rest = split_docstring(description)
if [x for x in BlockMeta.tmp_output if x.name == name]:
cleanup()
msg = 'Already described output variable %r.' % name
raise BlockWriterError(msg)
if BlockMeta.tmp_output and BlockMeta.tmp_output[-1].type == VARIABLE:
cleanup()
msg = 'Cannot mix variable and fixed output.'
raise BlockWriterError(msg)
BlockMeta.tmp_output.append(BlockOutput(FIXED, name,
desc, desc_rest, None, dtype=dtype))
def block_output_is_variable(description=None, suffix=None):
assert description is None or isinstance(description, str)
desc, desc_rest = split_docstring(description)
if BlockMeta.tmp_output:
cleanup()
msg = ('Cannot mix variable and fixed output or variable with variable. '
'(added already: %s)' % (BlockMeta.tmp_output))
raise BlockWriterError(msg)
bo = BlockOutput(VARIABLE, suffix, desc, desc_rest, None, dtype=None)
BlockMeta.tmp_output.append(bo)
def block_output_is_defined_at_runtime(description=None):
assert description is None or isinstance(description, str)
desc, desc_rest = split_docstring(description)
if BlockMeta.tmp_output:
cleanup()
raise BlockWriterError('Cannot mix variable and fixed output'
' or variable with variable. (added already: %s)'
% (BlockMeta.tmp_output))
BlockMeta.tmp_output.append(BlockOutput(DEFINED_AT_RUNTIME, None,
desc, desc_rest, None, dtype=None))
def cleanup():
''' Cleans up temporary data for the meta-sugar, if the construction
is aborted '''
BlockMeta.tmp_config = []
BlockMeta.tmp_output = []
BlockMeta.tmp_input = []
BlockMeta.aliases = []
class BlockMeta(ContractsMeta):
aliases = []
tmp_config = []
tmp_input = []
tmp_output = []
def __init__(cls, clsname, bases, clsdict): # @UnusedVariable @NoSelf
ContractsMeta.__init__(cls, clsname, bases, clsdict)
# Do not do this for the superclasses
if clsname in ['Generator', 'Block']:
return
setattr(cls, 'defined_in', cls.__module__)
setattr(cls, 'config', BlockMeta.tmp_config)
setattr(cls, 'output', BlockMeta.tmp_output)
setattr(cls, 'input', BlockMeta.tmp_input)
BlockMeta.tmp_config = []
BlockMeta.tmp_output = []
BlockMeta.tmp_input = []
has_variable_input = [x
for x in BlockMeta.tmp_input
if x.type == VARIABLE]
has_variable_output = [x
for x in BlockMeta.tmp_output
if x.type == VARIABLE]
if has_variable_output and not has_variable_input:
raise ModelWriterError('Cannot have variable output without '
'variable input.')
if len(BlockMeta.aliases) > 1:
raise ModelWriterError("We don't support multiple aliases yet. "
"Tried to set %r." % BlockMeta.aliases)
if BlockMeta.aliases:
name = BlockMeta.aliases[0]
else:
name = cls.__name__
from .registrar import default_library
default_library.register(name, cls)
BlockMeta.aliases = []
class BlockMetaSugar(object):
@staticmethod
def alias(*arg, **kwargs):
block_alias(*arg, **kwargs)
@staticmethod
def config(*arg, **kwargs):
block_config(*arg, **kwargs)
@staticmethod
def input(*arg, **kwargs): # @ReservedAssignment
block_input(*arg, **kwargs)
@staticmethod
def output(*arg, **kwargs):
block_output(*arg, **kwargs)
@staticmethod
def output_is_variable(*arg, **kwargs):
block_output_is_variable(*arg, **kwargs)
@staticmethod
def output_is_defined_at_runtime(description=None):
""" If this is used, then one must implement get_output_signals() """
block_output_is_defined_at_runtime(description)
@staticmethod
def input_is_variable(description=None,
min=None, max=None): # @ReservedAssignment
''' Declares that this block can accept a variable number
of inputs. You can specify minimum and maximum number. '''
block_input_is_variable(description, min, max)
# TODO: move this somewhere else
def trim(docstring):
if not docstring:
return ''
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
maxi = 10000
indent = maxi
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < maxi:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
result = '\n'.join(trimmed)
# print 'input: """%s"""' % docstring
# print 'result: """%s"""' % result
return result
# TODO: remove space on the right
def split_docstring(s):
''' Splits a docstring in a tuple (first, rest). '''
if s is None:
return None, None
s = trim(s)
all_lines = s.split('\n')
stripped = [l.strip() for l in all_lines]
valid_lines = [l for l in stripped if l]
if valid_lines:
for i in range(len(all_lines)):
if all_lines[i]: # found first
# join all non-empty lines with the first
j = i
while j < len(all_lines) - 1 and all_lines[j].strip():
j += 1
first = ' '.join(all_lines[i:(j + 1)])
rest = '\n'.join(all_lines[j + 1:])
return first, rest
assert False
else:
return None, None
|
spillai/procgraph
|
src/procgraph/core/block_meta.py
|
Python
|
lgpl-3.0
| 9,792
|
import numpy
import vigra
from lazyflow.graph import Graph
from lazyflow.operators.opFeatureMatrixCache import OpFeatureMatrixCache
from lazyflow.operators.classifierOperators import OpTrainClassifierFromFeatureVectors
from lazyflow.classifiers import ParallelVigraRfLazyflowClassifierFactory, ParallelVigraRfLazyflowClassifier
class TestOpTrainClassifierFromFeatureVectors(object):
def testBasic(self):
features = numpy.indices( (100,100) ).astype(numpy.float32) + 0.5
features = numpy.rollaxis(features, 0, 3)
features = vigra.taggedView(features, 'xyc')
labels = numpy.zeros( (100,100,1), dtype=numpy.uint8 )
labels = vigra.taggedView(labels, 'xyc')
labels[10,10] = 1
labels[10,11] = 1
labels[20,20] = 2
labels[20,21] = 2
graph = Graph()
opFeatureMatrixCache = OpFeatureMatrixCache(graph=graph)
opFeatureMatrixCache.FeatureImage.setValue(features)
opFeatureMatrixCache.LabelImage.setValue(labels)
opFeatureMatrixCache.NonZeroLabelBlocks.setValue(0)
opFeatureMatrixCache.LabelImage.setDirty( numpy.s_[10:11, 10:12] )
opFeatureMatrixCache.LabelImage.setDirty( numpy.s_[20:21, 20:22] )
opFeatureMatrixCache.LabelImage.setDirty( numpy.s_[30:31, 30:32] )
opTrain = OpTrainClassifierFromFeatureVectors( graph=graph )
opTrain.ClassifierFactory.setValue( ParallelVigraRfLazyflowClassifierFactory(100) )
opTrain.MaxLabel.setValue(2)
opTrain.LabelAndFeatureMatrix.connect( opFeatureMatrixCache.LabelAndFeatureMatrix )
assert opTrain.Classifier.ready()
trained_classifer = opTrain.Classifier.value
# This isn't much of a test at the moment...
assert isinstance( trained_classifer, ParallelVigraRfLazyflowClassifier ), \
"classifier is of the wrong type: {}".format(type(trained_classifer))
if __name__ == "__main__":
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
nose.run(defaultTest=__file__)
|
jakirkham/lazyflow
|
tests/testOpTrainClassifierFromFeatureVectors.py
|
Python
|
lgpl-3.0
| 2,243
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import math
import itertools
import numpy as np
from psi4 import core
from psi4.driver import p4util
from psi4.driver import constants
from psi4.driver.p4util.exceptions import *
from psi4.driver import driver_nbody_helper
### Math helper functions
def nCr(n, r):
f = math.factorial
return f(n) // f(r) // f(n - r)
### Begin CBS gufunc data
def _sum_cluster_ptype_data(ptype,
ptype_dict,
compute_list,
fragment_slice_dict,
fragment_size_dict,
ret,
vmfc=False,
n=0):
"""
Sums gradient and hessian data from compute_list.
compute_list comes in as a tuple(frag, basis)
"""
if len(compute_list) == 0:
return
sign = 1
# Do ptype
if ptype == 'gradient':
for fragn, basisn in compute_list:
start = 0
grad = np.asarray(ptype_dict[(fragn, basisn)])
if vmfc:
sign = ((-1)**(n - len(fragn)))
for bas in basisn:
end = start + fragment_size_dict[bas]
ret[fragment_slice_dict[bas]] += sign * grad[start:end]
start += fragment_size_dict[bas]
elif ptype == 'hessian':
for fragn, basisn in compute_list:
hess = np.asarray(ptype_dict[(fragn, basisn)])
if vmfc:
sign = ((-1)**(n - len(fragn)))
# Build up start and end slices
abs_start, rel_start = 0, 0
abs_slices, rel_slices = [], []
for bas in basisn:
rel_end = rel_start + 3 * fragment_size_dict[bas]
rel_slices.append(slice(rel_start, rel_end))
rel_start += 3 * fragment_size_dict[bas]
tmp_slice = fragment_slice_dict[bas]
abs_slices.append(slice(tmp_slice.start * 3, tmp_slice.stop * 3))
for abs_sl1, rel_sl1 in zip(abs_slices, rel_slices):
for abs_sl2, rel_sl2 in zip(abs_slices, rel_slices):
ret[abs_sl1, abs_sl2] += hess[rel_sl1, rel_sl2]
else:
raise KeyError("ptype can only be gradient or hessian How did you end up here?")
def _print_nbody_energy(energy_body_dict, header, embedding=False):
core.print_out("""\n ==> N-Body: %s energies <==\n\n""" % header)
core.print_out(""" n-Body Total Energy [Eh] I.E. [kcal/mol] Delta [kcal/mol]\n""")
previous_e = energy_body_dict[1]
nbody_range = list(energy_body_dict)
nbody_range.sort()
for n in nbody_range:
delta_e = (energy_body_dict[n] - previous_e)
delta_e_kcal = delta_e * constants.hartree2kcalmol
int_e_kcal = (
energy_body_dict[n] - energy_body_dict[1]) * constants.hartree2kcalmol if not embedding else np.nan
core.print_out(""" %4s %20.12f %20.12f %20.12f\n""" % (n, energy_body_dict[n], int_e_kcal,
delta_e_kcal))
previous_e = energy_body_dict[n]
core.print_out("\n")
def nbody_gufunc(func, method_string, **kwargs):
"""
Computes the nbody interaction energy, gradient, or Hessian depending on input.
This is a generalized univeral function for computing interaction and total quantities.
:returns: *return type of func* |w--w| The data.
:returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified.
:type func: function
:param func: ``energy`` || etc.
Python function that accepts method_string and a molecule. Returns a
energy, gradient, or Hessian as requested.
:type method_string: string
:param method_string: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.
First argument, lowercase and usually unlabeled. Indicates the computational
method to be passed to func.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:type return_wfn: :ref:`boolean <op_py_boolean>`
:param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
calculation result as the second element of a tuple.
:type bsse_type: string or list
:param bsse_type: ``'cp'`` || ``['nocp', 'vmfc']`` || |dl| ``None`` |dr| || etc.
Type of BSSE correction to compute: CP, NoCP, or VMFC. The first in this
list is returned by this function. By default, this function is not called.
:type max_nbody: int
:param max_nbody: ``3`` || etc.
Maximum n-body to compute, cannot exceed the number of fragments in the moleucle.
:type ptype: string
:param ptype: ``'energy'`` || ``'gradient'`` || ``'hessian'``
Type of the procedure passed in.
:type return_total_data: :ref:`boolean <op_py_boolean>`
:param return_total_data: ``'on'`` || |dl| ``'off'`` |dr|
If True returns the total data (energy/gradient/etc) of the system,
otherwise returns interaction data.
:type levels: dict
:param levels: ``{1: 'ccsd(t)', 2: 'mp2', 'supersystem': 'scf'}`` || ``{1: 2, 2: 'ccsd(t)', 3: 'mp2'}`` || etc
Dictionary of different levels of theory for different levels of expansion
Note that method_string is not used in this case.
supersystem computes all higher order n-body effects up to nfragments.
:type embedding_charges: dict
:param embedding_charges: ``{1: [-0.834, 0.417, 0.417], ..}``
Dictionary of atom-centered point charges. keys: 1-based index of fragment, values: list of charges for each fragment.
:type charge_method: string
:param charge_method: ``scf/6-31g`` || ``b3lyp/6-31g*`` || etc
Method to compute point charges for monomers. Overridden by embedding_charges if both are provided.
:type charge_type: string
:param charge_type: ``MULLIKEN_CHARGES`` || ``LOWDIN_CHARGES``
Default is ``MULLIKEN_CHARGES``
"""
# Initialize dictionaries for easy data passing
metadata, component_results, nbody_results = {}, {}, {}
# Parse some kwargs
kwargs = p4util.kwargs_lower(kwargs)
if kwargs.get('levels', False):
return driver_nbody_helper.multi_level(func, **kwargs)
metadata['ptype'] = kwargs.pop('ptype', None)
metadata['return_wfn'] = kwargs.pop('return_wfn', False)
metadata['return_total_data'] = kwargs.pop('return_total_data', False)
metadata['molecule'] = kwargs.pop('molecule', core.get_active_molecule())
metadata['molecule'].update_geometry()
metadata['molecule'].fix_com(True)
metadata['molecule'].fix_orientation(True)
metadata['embedding_charges'] = kwargs.get('embedding_charges', False)
metadata['kwargs'] = kwargs
core.clean_variables()
if metadata['ptype'] not in ['energy', 'gradient', 'hessian']:
raise ValidationError("""N-Body driver: The ptype '%s' is not regonized.""" % metadata['ptype'])
# Parse bsse_type, raise exception if not provided or unrecognized
metadata['bsse_type_list'] = kwargs.pop('bsse_type')
if metadata['bsse_type_list'] is None:
raise ValidationError("N-Body GUFunc: Must pass a bsse_type")
if not isinstance(metadata['bsse_type_list'], list):
metadata['bsse_type_list'] = [metadata['bsse_type_list']]
for num, btype in enumerate(metadata['bsse_type_list']):
metadata['bsse_type_list'][num] = btype.lower()
if btype.lower() not in ['cp', 'nocp', 'vmfc']:
raise ValidationError("N-Body GUFunc: bsse_type '%s' is not recognized" % btype.lower())
metadata['max_nbody'] = kwargs.get('max_nbody', -1)
if metadata['molecule'].nfragments() == 1:
raise ValidationError("N-Body requires active molecule to have more than 1 fragment.")
metadata['max_frag'] = metadata['molecule'].nfragments()
if metadata['max_nbody'] == -1:
metadata['max_nbody'] = metadata['molecule'].nfragments()
else:
metadata['max_nbody'] = min(metadata['max_nbody'], metadata['max_frag'])
# Flip this off for now, needs more testing
# If we are doing CP lets save them integrals
#if 'cp' in bsse_type_list and (len(bsse_type_list) == 1):
# # Set to save RI integrals for repeated full-basis computations
# ri_ints_io = core.get_global_option('DF_INTS_IO')
# # inquire if above at all applies to dfmp2 or just scf
# core.set_global_option('DF_INTS_IO', 'SAVE')
# psioh = core.IOManager.shared_object()
# psioh.set_specific_retention(97, True)
bsse_str = metadata['bsse_type_list'][0]
if len(metadata['bsse_type_list']) > 1:
bsse_str = str(metadata['bsse_type_list'])
core.print_out("\n\n")
core.print_out(" ===> N-Body Interaction Abacus <===\n")
core.print_out(" BSSE Treatment: %s\n" % bsse_str)
# Get compute list
metadata = build_nbody_compute_list(metadata)
# Compute N-Body components
component_results = compute_nbody_components(func, method_string, metadata)
# Assemble N-Body quantities
nbody_results = assemble_nbody_components(metadata, component_results)
# Build wfn and bind variables
wfn = core.Wavefunction.build(metadata['molecule'], 'def2-svp')
dicts = [
'energies', 'ptype', 'intermediates', 'energy_body_dict', 'gradient_body_dict', 'hessian_body_dict', 'nbody',
'cp_energy_body_dict', 'nocp_energy_body_dict', 'vmfc_energy_body_dict'
]
if metadata['ptype'] == 'gradient':
wfn.set_gradient(nbody_results['ret_ptype'])
nbody_results['gradient_body_dict'] = nbody_results['ptype_body_dict']
elif metadata['ptype'] == 'hessian':
nbody_results['hessian_body_dict'] = nbody_results['ptype_body_dict']
wfn.set_hessian(nbody_results['ret_ptype'])
component_results_gradient = component_results.copy()
component_results_gradient['ptype'] = component_results_gradient['gradients']
metadata['ptype'] = 'gradient'
nbody_results_gradient = assemble_nbody_components(metadata, component_results_gradient)
wfn.set_gradient(nbody_results_gradient['ret_ptype'])
nbody_results['gradient_body_dict'] = nbody_results_gradient['ptype_body_dict']
for r in [component_results, nbody_results]:
for d in r:
if d in dicts:
for var, value in r[d].items():
try:
wfn.set_scalar_variable(str(var), value)
core.set_scalar_variable(str(var), value)
except:
wfn.set_array_variable(d.split('_')[0].upper() + ' ' + str(var), core.Matrix.from_array(value))
core.set_variable("CURRENT ENERGY", nbody_results['ret_energy'])
wfn.set_variable("CURRENT ENERGY", nbody_results['ret_energy'])
if metadata['return_wfn']:
return (nbody_results['ret_ptype'], wfn)
else:
return nbody_results['ret_ptype']
def build_nbody_compute_list(metadata):
"""Generates the list of N-Body computations to be performed for a given BSSE type.
Parameters
----------
metadata : dict of str
Dictionary containing N-body metadata.
Required ``'key': value`` pairs:
``'bsse_type_list'``: list of str
List of requested BSSE treatments. Possible values include lowercase ``'cp'``, ``'nocp'``,
and ``'vmfc'``.
``'max_nbody'``: int
Maximum number of bodies to include in the N-Body treatment.
Possible: `max_nbody` <= `max_frag`
Default: `max_nbody` = `max_frag`
``'max_frag'``: int
Number of distinct fragments comprising full molecular supersystem.
Returns
-------
metadata : dict of str
Dictionary containing N-body metadata.
New ``'key': value`` pair:
``'compute_dict'`` : dict of str: dict
Dictionary containing subdicts enumerating compute lists for each possible BSSE treatment.
Contents:
``'all'``: dict of int: set
Set containing full list of computations required
``'cp'``: dict of int: set
Set containing list of computations required for CP procedure
``'nocp'``: dict of int: set
Set containing list of computations required for non-CP procedure
``'vmfc_compute'``: dict of int: set
Set containing list of computations required for VMFC procedure
``'vmfc_levels'``: dict of int: set
Set containing list of levels required for VMFC procedure
"""
# What levels do we need?
nbody_range = range(1, metadata['max_nbody'] + 1)
fragment_range = range(1, metadata['max_frag'] + 1)
cp_compute_list = {x: set() for x in nbody_range}
nocp_compute_list = {x: set() for x in nbody_range}
vmfc_compute_list = {x: set() for x in nbody_range}
vmfc_level_list = {x: set() for x in nbody_range} # Need to sum something slightly different
# Verify proper passing of bsse_type_list
bsse_type_remainder = set(metadata['bsse_type_list']) - {'cp', 'nocp', 'vmfc'}
if bsse_type_remainder:
raise ValidationError("""Unrecognized BSSE type(s): %s
Possible values are 'cp', 'nocp', and 'vmfc'.""" % ', '.join(str(i) for i in bsse_type_remainder))
# Build up compute sets
if 'cp' in metadata['bsse_type_list']:
# Everything is in dimer basis
basis_tuple = tuple(fragment_range)
for nbody in nbody_range:
for x in itertools.combinations(fragment_range, nbody):
if metadata['max_nbody'] == 1: break
cp_compute_list[nbody].add((x, basis_tuple))
# Add monomers in monomer basis
for x in fragment_range:
cp_compute_list[1].add(((x, ), (x, )))
if 'nocp' in metadata['bsse_type_list']:
# Everything in monomer basis
for nbody in nbody_range:
for x in itertools.combinations(fragment_range, nbody):
nocp_compute_list[nbody].add((x, x))
if 'vmfc' in metadata['bsse_type_list']:
# Like a CP for all combinations of pairs or greater
for nbody in nbody_range:
for cp_combos in itertools.combinations(fragment_range, nbody):
basis_tuple = tuple(cp_combos)
for interior_nbody in nbody_range:
for x in itertools.combinations(cp_combos, interior_nbody):
combo_tuple = (x, basis_tuple)
vmfc_compute_list[interior_nbody].add(combo_tuple)
vmfc_level_list[len(basis_tuple)].add(combo_tuple)
# Build a comprehensive compute_range
compute_list = {x: set() for x in nbody_range}
for n in nbody_range:
compute_list[n] |= cp_compute_list[n]
compute_list[n] |= nocp_compute_list[n]
compute_list[n] |= vmfc_compute_list[n]
core.print_out(" Number of %d-body computations: %d\n" % (n, len(compute_list[n])))
metadata['compute_dict'] = {
'all': compute_list,
'cp': cp_compute_list,
'nocp': nocp_compute_list,
'vmfc_compute': vmfc_compute_list,
'vmfc_levels': vmfc_level_list
}
return metadata
def compute_nbody_components(func, method_string, metadata):
"""Computes requested N-body components.
Performs requested computations for psi4::Molecule object `molecule` according to
`compute_list` with function `func` at `method_string` level of theory.
Parameters
----------
func : {'energy', 'gradient', 'hessian'}
Function object to be called within N-Body procedure.
method_string : str
Indicates level of theory to be passed to function `func`.
metadata : dict of str
Dictionary of N-body metadata.
Required ``'key': value`` pairs:
``'compute_list'``: dict of int: set
List of computations to perform. Keys indicate body-levels, e.g,. `compute_list[2]` is the
list of all 2-body computations required.
``'kwargs'``: dict
Arbitrary keyword arguments to be passed to function `func`.
Returns
-------
dict of str: dict
Dictionary containing computed N-body components.
Contents:
``'energies'``: dict of set: float64
Dictionary containing all energy components required for given N-body procedure.
``'ptype'``: dict of set: float64 or dict of set: psi4.Matrix
Dictionary of returned quantities from calls of function `func` during N-body computations
``'intermediates'``: dict of str: float64
Dictionary of psivars for intermediate N-body computations to be set at the end of the
N-body procedure.
"""
# Get required metadata
kwargs = metadata['kwargs']
molecule = metadata['molecule']
#molecule = core.get_active_molecule()
compute_list = metadata['compute_dict']['all']
# Now compute the energies
energies_dict = {}
gradients_dict = {}
ptype_dict = {}
intermediates_dict = {}
if kwargs.get('charge_method', False) and not metadata['embedding_charges']:
metadata['embedding_charges'] = driver_nbody_helper.compute_charges(kwargs['charge_method'],
kwargs.get('charge_type', 'MULLIKEN_CHARGES').upper(), molecule)
for count, n in enumerate(compute_list.keys()):
core.print_out("\n ==> N-Body: Now computing %d-body complexes <==\n\n" % n)
total = len(compute_list[n])
for num, pair in enumerate(compute_list[n]):
core.print_out(
"\n N-Body: Computing complex (%d/%d) with fragments %s in the basis of fragments %s.\n\n" %
(num + 1, total, str(pair[0]), str(pair[1])))
ghost = list(set(pair[1]) - set(pair[0]))
current_mol = molecule.extract_subsets(list(pair[0]), ghost)
current_mol.set_name("%s_%i_%i" % (current_mol.name(), count, num))
if metadata['embedding_charges']: driver_nbody_helper.electrostatic_embedding(metadata, pair=pair)
# Save energies info
ptype_dict[pair], wfn = func(method_string, molecule=current_mol, return_wfn=True, **kwargs)
core.set_global_option_python('EXTERN', None)
energies_dict[pair] = core.variable("CURRENT ENERGY")
gradients_dict[pair] = wfn.gradient()
var_key = "N-BODY (%s)@(%s) TOTAL ENERGY" % (', '.join([str(i) for i in pair[0]]), ', '.join(
[str(i) for i in pair[1]]))
intermediates_dict[var_key] = core.variable("CURRENT ENERGY")
core.print_out("\n N-Body: Complex Energy (fragments = %s, basis = %s: %20.14f)\n" % (str(
pair[0]), str(pair[1]), energies_dict[pair]))
# Flip this off for now, needs more testing
#if 'cp' in bsse_type_list and (len(bsse_type_list) == 1):
# core.set_global_option('DF_INTS_IO', 'LOAD')
core.clean()
return {
'energies': energies_dict,
'gradients': gradients_dict,
'ptype': ptype_dict,
'intermediates': intermediates_dict
}
def assemble_nbody_components(metadata, component_results):
"""Assembles N-body components into interaction quantities according to requested BSSE procedure(s).
Parameters
-----------
metadata : dict of str
Dictionary of N-body metadata.
Required ``'key': value`` pairs:
``'ptype'``: {'energy', 'gradient', 'hessian'}
Procedure which has generated the N-body components to be combined.
``'bsse_type_list'``: list of str
List of requested BSSE treatments. Possible values include lowercase ``'cp'``, ``'nocp'``,
and ``'vmfc'``.
``'max_nbody'``: int
Maximum number of bodies to include in the N-Body treatment.
Possible: `max_nbody` <= `max_frag`
Default: `max_nbody` = `max_frag`
``'max_frag'``: int
Number of distinct fragments comprising full molecular supersystem.
``'energies_dict'``: dict of set: float64
Dictionary containing all energy components required for given N-body procedure.
``'ptype_dict'``: dict of set: float64 or dict of set: psi4.Matrix
Dictionary of returned quantities from calls of function `func` during N-body computations
``'compute_dict'``: dict of str: dict
Dictionary containing {int: set} subdicts enumerating compute lists for each possible
BSSE treatment.
``'kwargs'``: dict
Arbitrary keyword arguments.
component_results : dict of str: dict
Dictionary containing computed N-body components.
Required ``'key': value`` pairs:
``'energies'``: dict of set: float64
Dictionary containing all energy components required for given N-body procedure.
``'ptype'``: dict of set: float64 or dict of set: psi4.Matrix
Dictionary of returned quantities from calls of function `func` during N-body computations
``'intermediates'``: dict of str: float64
Dictionary of psivars for intermediate N-body computations to be set at the end of the
N-body procedure.
Returns
-------
results : dict of str
Dictionary of all N-body results.
Contents:
``'ret_energy'``: float64
Interaction data requested. If multiple BSSE types requested in `bsse_type_list`, the
interaction data associated with the *first* BSSE type in the list is returned.
``'nbody_dict'``: dict of str: float64
Dictionary of relevant N-body psivars to be set
``'energy_body_dict'``: dict of int: float64
Dictionary of total energies at each N-body level, i.e., ``results['energy_body_dict'][2]``
is the sum of all 2-body total energies for the supersystem.
``'ptype_body_dict'``: dict or dict of int: array_like
Empty dictionary if `ptype is ``'energy'``, or dictionary of total ptype arrays at each
N-body level; i.e., ``results['ptype_body_dict'][2]`` for `ptype` ``'gradient'``is the
total 2-body gradient.
"""
# Unpack metadata
kwargs = metadata['kwargs']
nbody_range = range(1, metadata['max_nbody'] + 1)
# Unpack compute list metadata
compute_list = metadata['compute_dict']['all']
cp_compute_list = metadata['compute_dict']['cp']
nocp_compute_list = metadata['compute_dict']['nocp']
vmfc_compute_list = metadata['compute_dict']['vmfc_compute']
vmfc_level_list = metadata['compute_dict']['vmfc_levels']
# Build size and slices dictionaries
fragment_size_dict = {
frag: metadata['molecule'].extract_subsets(frag).natom()
for frag in range(1, metadata['max_frag'] + 1)
}
start = 0
fragment_slice_dict = {}
for k, v in fragment_size_dict.items():
fragment_slice_dict[k] = slice(start, start + v)
start += v
molecule_total_atoms = sum(fragment_size_dict.values())
# Final dictionaries
cp_energy_by_level = {n: 0.0 for n in nbody_range}
nocp_energy_by_level = {n: 0.0 for n in nbody_range}
cp_energy_body_dict = {n: 0.0 for n in nbody_range}
nocp_energy_body_dict = {n: 0.0 for n in nbody_range}
vmfc_energy_body_dict = {n: 0.0 for n in nbody_range}
# Build out ptype dictionaries if needed
if metadata['ptype'] != 'energy':
if metadata['ptype'] == 'gradient':
arr_shape = (molecule_total_atoms, 3)
elif metadata['ptype'] == 'hessian':
arr_shape = (molecule_total_atoms * 3, molecule_total_atoms * 3)
else:
raise KeyError("N-Body: ptype '%s' not recognized" % ptype)
cp_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range}
nocp_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range}
vmfc_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range}
cp_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range}
nocp_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range}
vmfc_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range}
else:
cp_ptype_by_level, cp_ptype_body_dict = {}, {}
nocp_ptype_by_level, nocp_ptype_body_dict = {}, {}
vmfc_ptype_body_dict = {}
# Sum up all of the levels
nbody_dict = {}
for n in nbody_range:
# Energy
# Extract energies for monomers in monomer basis for CP total data
if n == 1:
cp_monomers_in_monomer_basis = [v for v in cp_compute_list[1] if len(v[1]) == 1]
cp_monomer_energies = 0.0
cp_monomer_energy_list = []
for i in cp_monomers_in_monomer_basis:
cp_monomer_energy_list.append(component_results['energies'][i])
cp_monomer_energies += component_results['energies'][i]
cp_compute_list[1].remove(i)
cp_energy_by_level[n] = sum(component_results['energies'][v] for v in cp_compute_list[n])
nocp_energy_by_level[n] = sum(component_results['energies'][v] for v in nocp_compute_list[n])
# Special vmfc case
if n > 1:
vmfc_energy_body_dict[n] = vmfc_energy_body_dict[n - 1]
for tup in vmfc_level_list[n]:
vmfc_energy_body_dict[n] += ((-1)**(n - len(tup[0]))) * component_results['energies'][tup]
# Do ptype
if metadata['ptype'] != 'energy':
_sum_cluster_ptype_data(metadata['ptype'], component_results['ptype'], cp_compute_list[n],
fragment_slice_dict, fragment_size_dict, cp_ptype_by_level[n])
_sum_cluster_ptype_data(metadata['ptype'], component_results['ptype'], nocp_compute_list[n],
fragment_slice_dict, fragment_size_dict, nocp_ptype_by_level[n])
_sum_cluster_ptype_data(
metadata['ptype'],
component_results['ptype'],
vmfc_level_list[n],
fragment_slice_dict,
fragment_size_dict,
vmfc_ptype_by_level[n],
vmfc=True,
n=n)
# Add extracted monomers back.
for i, j in enumerate(cp_monomers_in_monomer_basis):
cp_compute_list[1].add(j)
if metadata['ptype'] != 'energy':
# Extract ptype data for monomers in monomer basis for CP total data
cp_monomer_ptype = np.zeros(arr_shape)
_sum_cluster_ptype_data(metadata['ptype'], component_results['ptype'], cp_monomers_in_monomer_basis,
fragment_slice_dict, fragment_size_dict, cp_monomer_ptype)
# Compute cp energy and ptype
if 'cp' in metadata['bsse_type_list']:
for n in nbody_range:
if n == metadata['max_frag']:
cp_energy_body_dict[n] = cp_energy_by_level[n] - bsse
if metadata['ptype'] != 'energy':
cp_ptype_body_dict[n][:] = cp_ptype_by_level[n] - bsse_ptype
continue
for k in range(1, n + 1):
take_nk = nCr(metadata['max_frag'] - k - 1, n - k)
sign = ((-1)**(n - k))
value = cp_energy_by_level[k]
cp_energy_body_dict[n] += take_nk * sign * value
if metadata['ptype'] != 'energy':
value = cp_ptype_by_level[k]
cp_ptype_body_dict[n] += take_nk * sign * value
if n == 1:
bsse = cp_energy_body_dict[n] - cp_monomer_energies
cp_energy_body_dict[n] = cp_monomer_energies
if metadata['ptype'] != 'energy':
bsse_ptype = cp_ptype_body_dict[n] - cp_monomer_ptype
cp_ptype_body_dict[n] = cp_monomer_ptype.copy()
else:
cp_energy_body_dict[n] -= bsse
if metadata['ptype'] != 'energy':
cp_ptype_body_dict[n] -= bsse_ptype
cp_interaction_energy = cp_energy_body_dict[metadata['max_nbody']] - cp_energy_body_dict[1]
nbody_dict['Counterpoise Corrected Interaction Energy'] = cp_interaction_energy
for n in nbody_range[1:]:
var_key = 'CP-CORRECTED %d-BODY INTERACTION ENERGY' % n
nbody_dict[var_key] = cp_energy_body_dict[n] - cp_energy_body_dict[1]
_print_nbody_energy(cp_energy_body_dict, "Counterpoise Corrected (CP)", metadata['embedding_charges'])
cp_interaction_energy = cp_energy_body_dict[metadata['max_nbody']] - cp_energy_body_dict[1]
nbody_dict['Counterpoise Corrected Total Energy'] = cp_energy_body_dict[metadata['max_nbody']]
nbody_dict['Counterpoise Corrected Interaction Energy'] = cp_interaction_energy
# Compute nocp energy and ptype
if 'nocp' in metadata['bsse_type_list']:
for n in nbody_range:
if n == metadata['max_frag']:
nocp_energy_body_dict[n] = nocp_energy_by_level[n]
if metadata['ptype'] != 'energy':
nocp_ptype_body_dict[n][:] = nocp_ptype_by_level[n]
continue
for k in range(1, n + 1):
take_nk = nCr(metadata['max_frag'] - k - 1, n - k)
sign = ((-1)**(n - k))
value = nocp_energy_by_level[k]
nocp_energy_body_dict[n] += take_nk * sign * value
if metadata['ptype'] != 'energy':
value = nocp_ptype_by_level[k]
nocp_ptype_body_dict[n] += take_nk * sign * value
_print_nbody_energy(nocp_energy_body_dict, "Non-Counterpoise Corrected (NoCP)", metadata['embedding_charges'])
nocp_interaction_energy = nocp_energy_body_dict[metadata['max_nbody']] - nocp_energy_body_dict[1]
nbody_dict['Non-Counterpoise Corrected Total Energy'] = nocp_energy_body_dict[metadata['max_nbody']]
nbody_dict['Non-Counterpoise Corrected Interaction Energy'] = nocp_interaction_energy
for n in nbody_range[1:]:
var_key = 'NOCP-CORRECTED %d-BODY INTERACTION ENERGY' % n
nbody_dict[var_key] = nocp_energy_body_dict[n] - nocp_energy_body_dict[1]
# Compute vmfc ptype
if 'vmfc' in metadata['bsse_type_list']:
if metadata['ptype'] != 'energy':
for n in nbody_range:
if n > 1:
vmfc_ptype_body_dict[n] = vmfc_ptype_by_level[n - 1]
vmfc_ptype_body_dict[n] += vmfc_ptype_by_level[n]
_print_nbody_energy(vmfc_energy_body_dict, "Valiron-Mayer Function Couterpoise (VMFC)",
metadata['embedding_charges'])
vmfc_interaction_energy = vmfc_energy_body_dict[metadata['max_nbody']] - vmfc_energy_body_dict[1]
nbody_dict['Valiron-Mayer Function Couterpoise Total Energy'] = vmfc_energy_body_dict[metadata['max_nbody']]
nbody_dict['Valiron-Mayer Function Couterpoise Interaction Energy'] = vmfc_interaction_energy
for n in nbody_range[1:]:
var_key = 'VMFC-CORRECTED %d-BODY INTERACTION ENERGY' % n
nbody_dict[var_key] = vmfc_energy_body_dict[n] - vmfc_energy_body_dict[1]
# Returns
results = {}
results['nbody'] = nbody_dict
for b in ['cp', 'nocp', 'vmfc']:
results['%s_energy_body_dict' % b] = eval('%s_energy_body_dict' % b)
results['%s_energy_body_dict' % b] = {str(i) + b: j for i, j in results['%s_energy_body_dict' % b].items()}
# Figure out and build return types
return_method = metadata['bsse_type_list'][0]
if return_method == 'cp':
results['ptype_body_dict'] = cp_ptype_body_dict
results['energy_body_dict'] = cp_energy_body_dict
elif return_method == 'nocp':
results['ptype_body_dict'] = nocp_ptype_body_dict
results['energy_body_dict'] = nocp_energy_body_dict
elif return_method == 'vmfc':
results['ptype_body_dict'] = vmfc_ptype_body_dict
results['energy_body_dict'] = vmfc_energy_body_dict
else:
raise ValidationError(
"N-Body Wrapper: Invalid return type. Should never be here, please post this error on github.")
if metadata['return_total_data']:
results['ret_energy'] = results['energy_body_dict'][metadata['max_nbody']]
else:
results['ret_energy'] = results['energy_body_dict'][metadata['max_nbody']]
results['ret_energy'] -= results['energy_body_dict'][1]
if metadata['ptype'] != 'energy':
if metadata['return_total_data']:
np_final_ptype = results['ptype_body_dict'][metadata['max_nbody']].copy()
else:
np_final_ptype = results['ptype_body_dict'][metadata['max_nbody']].copy()
np_final_ptype -= results['ptype_body_dict'][1]
results['ret_ptype'] = core.Matrix.from_array(np_final_ptype)
else:
results['ret_ptype'] = results['ret_energy']
return results
|
ashutoshvt/psi4
|
psi4/driver/driver_nbody.py
|
Python
|
lgpl-3.0
| 34,436
|
import copy
from typing import Set, Any
from .node import Node
from .fol.node_types import is_variable_declaration
class Namer:
def __init__(self):
self._name_dict = {}
self._first_name_candidates = ['x', 'y', 'z', 'w']
def _new_name(self, taken_names: Set[str]) -> str:
def make_name(prefix, index):
return '{}_{}'.format(name, index)
for cand in self._first_name_candidates:
if cand not in taken_names:
return cand
for cand in self._first_name_candidates:
for index in range(1, 3):
name = make_name(name, index)
if name not in taken_names:
return cand
index = 4
prefix = self._first_name_candidates[0]
name = make_name(prefix, index)
while name in taken_names:
index = index + 1
name = make_name(prefix, index)
return name
def _name_node(self, node: Node, taken_names: Set[str]) -> str:
if isinstance(self.repr_node(node), int):
# We only need to name nodes that return the object id
# as their representation. This is a convention.
return self._new_name(taken_names)
else:
return None
def analyze(self, node: Node):
def recurse_analyze(node, taken_names: Set[str]):
if node is None:
return
new_node_name = self._name_node(node, taken_names)
if new_node_name is not None:
taken_names.add(new_node_name)
self._name_dict[node] = new_node_name
decl_children = [x for x in node.children()
if is_variable_declaration(x)]
nondecl_children = [x for x in node.children()
if not is_variable_declaration(x)]
# Variable declarations as our children affect the names
# of our other children, so we pass in taken_names by
# reference, so that the declared names are added to it
for child in decl_children:
recurse_analyze(child, taken_names)
for child in nondecl_children:
recurse_analyze(child, copy.copy(taken_names))
recurse_analyze(node, set())
def _repr_resolve(self, thing: Any) -> str:
if thing is None:
# Support None gracefully
return "None"
if not isinstance(thing, Node):
# If we have a string or an int return them
# This complication is needed to handle Node.repr_node
# returing a reference node.
return thing
else:
node = thing
return (self._name_dict[node]
if node in self._name_dict
else self._repr_resolve(node.repr_node()))
def repr_node(self, node: Node) -> str:
return self._repr_resolve(node)
|
jadnohra/connect
|
proto_1/ddq/namer.py
|
Python
|
unlicense
| 2,953
|
"""
Common functions for setup.
Usage:
from libtbngsetup import *
Do not relocate to subfolder.Requires Python 3.x
"""
import sys,logging,os
from pathlib import Path
from string import Template
import netifaces as ni
current_dir = os.path.dirname(os.path.abspath(__file__))
project_dir = Path(current_dir).parent
sys.path.insert(0,'{0}/engine'.format(project_dir))
from libraries import utility
prefix="#Added by TBNG setup - do not edit "
def check_interface(interface_name):
command="nmcli dev show {0}|grep unmanaged||true".format(interface_name)
if "unmanaged" not in utility.run_shell_command(command).decode("utf-8"):
raise Exception("""Interface {0} appears to be managed or not configured.
Configure it via /etc/network/interfaces to have static ip and restart Network Manager or reboot your device.""".format(interface_name))
return ni.ifaddresses(interface_name)[2][0]['addr']
def toSystemd(name,parameters,autostart=False):
systemd_folder="/lib/systemd/system"
filein = open( "{0}/setup/templates/{1}".format(project_dir,name) )
src = Template( filein.read() )
src.substitute(parameters)
with open("{0}/{1}".format(systemd_folder,name), "w") as text_file:
text_file.write(src.substitute(parameters))
logging.info("File {0}/{1} created".format(systemd_folder,name))
logging.debug(utility.run_shell_command("systemctl daemon-reload").decode("utf-8"))
if autostart:
logging.debug(utility.run_shell_command("systemctl enable {0}".format(name)).decode("utf-8"))
|
znoxx/tbng
|
setup/libtbngsetup.py
|
Python
|
unlicense
| 1,508
|
# -*- coding: utf-8 -*-
from unicodedata import normalize
from flask import Flask, render_template, request, abort, session, flash, redirect, url_for, Response, g
from flaskext.markdown import Markdown
from werkzeug.contrib.atom import AtomFeed
from werkzeug import secure_filename
from urlparse import urljoin
from pymongo import Connection
from bson import ObjectId
import ConfigParser
import datetime
import os, sys, logging
logging.basicConfig(stream=sys.stderr)
app = Flask(__name__)
Markdown(app)
con = Connection()
col = con.tinyblog
basedir = os.path.dirname(os.path.realpath(__file__))
config = ConfigParser.ConfigParser()
config.readfp(open(basedir+'/config.ini'))
@app.route('/')
def index():
data = {
'articles' : col.article.find({'published':"1"}).sort('updated', -1).limit(10),
'page' : 1,
'total' : col.article.count()
}
return render_template('index.html', data=data)
@app.route('/page/<num>')
def page(num) :
skip = 10 * int(num) -10
if skip < 0 :
skip = 0
data = {
'articles' : col.article.find({'published':"1"}).sort('updated', -1).limit(10).skip(skip),
'page' : int(num),
'total' : col.article.count()
}
return render_template('index.html', data=data)
UPLOAD_FOLDER = basedir+'/uploads'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['POST'])
def upload() :
if 'auth' not in session :
return redirect(url_for('auth'))
if request.method == 'POST':
saved_files_urls = []
for key, file in request.files.iteritems():
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(UPLOAD_FOLDER, filename))
ident = ObjectId()
col.media.insert({'filename' : filename, '_id' : ident})
try :
saved_files_urls.append('/uploads/'+filename)
except :
pass
return saved_files_urls[0]
@app.route('/<slug>', methods=['GET','POST'])
def view(slug) :
if not slug :
return abort(404)
if request.method == 'POST' :
comment = request.form.get('comment')
author = request.form.get('author')
if not comment or not author :
flash('Failed to post comment due to missing fields', 'danger')
return redirect('/'+slug)
data = {
'comment': request.form.get('comment'),
'slug': request.form.get('slug'),
'author': request.form.get('author'),
'approved': 0,
'created' : datetime.datetime.utcnow()
}
col.comment.insert(data)
flash('Your comment is awaiting moderation', 'warning')
# probably send mod an email
return redirect('/'+slug)
criteria = {'slug':slug}
data = {
'article' : col.article.find_one(criteria),
'comments' : col.comment.find(criteria).sort('updated', -1)
}
return render_template('view.html', data=data)
@app.route('/delete/<comment_id>')
def remove(comment_id) :
if 'auth' not in session :
return redirect(url_for('auth'))
try :
cid = ObjectId(comment_id)
except :
return abort(404)
criteria = {
'_id' : cid
}
data = {
'$set' : {
'approved' : 0
}
}
col.comment.update(criteria, data)
flash('Comment Unapproved', 'success')
return redirect(request.referrer)
@app.route('/approve/<comment_id>')
def approve(comment_id) :
if 'auth' not in session :
return redirect(url_for('auth'))
try :
cid = ObjectId(comment_id)
except :
return abort(404)
criteria = {
'_id' : cid
}
data = {
'$set' : {
'approved' : 1
}
}
col.comment.update(criteria, data)
flash('Comment approved', 'success')
return redirect(request.referrer)
@app.route('/settings', methods=['GET','POST'])
def settings() :
if 'auth' not in session :
return redirect(url_for('auth'))
if request.method == 'POST':
title = request.form.get('title')
description = request.form.get('description')
author = request.form.get('author')
style = request.form.get('style')
criteria = {'settings':1}
data = {
'$set' : {
'title' : title,
'description' : description,
'style' : style,
'author' : author,
}
}
col.settings.update(criteria, data, True)
try :
set_settings()
except :
pass
flash('Settings Saved Successfully', 'success')
return redirect(url_for('settings'))
data = col.settings.find_one({'settings':1})
return render_template('settings.html', data=data)
@app.route('/edit/<slug>', methods=['GET','POST'])
def edit(slug) :
if 'auth' not in session :
return redirect(url_for('auth'))
if request.method == 'POST':
title = request.form.get('title')
if not title :
return redirect('/edit/new')
slug = request.form.get('slug')
if not slug :
slug = slugify(title)
criteria = {'slug':slug}
raw_categories = request.form.get('categories')
categories = [x.strip() for x in raw_categories.split(',')]
data = {
'$set' : {
'title' : title,
'slug' : slug,
'body' : request.form.get('body'),
'image' : request.form.get('image'),
'overview' : request.form.get('overview'),
'published' : request.form.get('published'),
'categories' : categories,
'updated' : datetime.datetime.utcnow()
}
}
col.article.update(criteria, data, True)
flash('Post Saved Successfully', 'success')
return redirect('/edit/'+slug)
data = col.article.find_one({'slug':slug})
return render_template('edit.html', data=data)
@app.route('/auth', methods=['GET','POST'])
def auth() :
if request.method == 'POST' :
username = request.form.get('username')
password = request.form.get('password')
if username != config.get('tinyblog', 'username') or password != config.get('tinyblog', 'password') :
flash('Incorrect username / password', 'danger')
else :
session['auth'] = True
flash('Welcome '+username, 'success')
return redirect(url_for('index'))
data = {}
return render_template('auth.html', data=data)
@app.route('/category/<category>')
def category(category) :
criteria = {
'categories' : category
}
data = {
'articles' : col.article.find(criteria),
'page' : 1,
'total' : col.article.count(),
'category' : category
}
return render_template('index.html', data=data)
@app.route('/style.css')
def stylesheet() :
try :
style = app.config.style
return Response(style, mimetype='text/css')
except :
return ''
@app.route('/sitemap.xml')
def sitemap() :
return ''
@app.route('/logout')
def logout() :
session.clear()
return redirect(url_for('index'))
def make_external(url):
return urljoin(request.url_root, url)
@app.route('/feed')
def recent_feed():
feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root)
articles = col.article.find().limit(20).sort('updated', -1)
for article in articles:
feed.add(
article['title'],
unicode(article['overview']),
content_type='text',
author=app.config.blog_author,
url=make_external(article['slug']),
updated=article['updated'],
published=article['updated']
)
return feed.get_response()
def slugify(text, encoding=None,
# @thanks https://gist.github.com/turicas/1428479
permitted_chars='abcdefghijklmnopqrstuvwxyz0123456789-'):
if isinstance(text, str):
text = text.decode(encoding or 'ascii')
clean_text = text.strip().replace(' ', '-').lower()
while '--' in clean_text:
clean_text = clean_text.replace('--', '-')
ascii_text = normalize('NFKD', clean_text).encode('ascii', 'ignore')
strict_text = map(lambda x: x if x in permitted_chars else '', ascii_text)
return ''.join(strict_text)
def set_settings() :
try :
settings = col.settings.find_one()
app.config.blog_title = settings['title']
app.config.blog_author = settings['author']
app.config.blog_description = settings['description']
app.config.style = settings['style']
except :
return
set_settings()
if __name__ == '__main__' :
app.secret_key = 'abc'
app.run(debug=True, host="0.0.0.0", port=5001)
|
aster1sk/tinyblog
|
__init__.py
|
Python
|
unlicense
| 9,109
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A simple Python module for validating BagIt profiles. See
https://github.com/bagit-profiles/bagit-profiles
for more information.
This module is intended for use with https://github.com/edsu/bagit but does not extend it.
Usage:
import bagit
import bagit_profile
# Instantiate an existing Bag using https://github.com/edsu/bagit.
bag = bagit.Bag('mydir')
# Instantiate a profile, supplying its URI.
my_profile = bagit_profile.Profile('http://example.com/bagitprofile.json')
# Validate 'Serialization' and 'Accept-Serialization'. This must be done
# before .validate(bag) is called. 'mydir' is the path to the Bag.
if my_profile.validate_serialization('mydir'):
print "Serialization validates"
else:
print "Serialization does not validate"
# Validate the rest of the profile.
if my_profile.validate(bag):
print "Validates"
else:
print "Does not validate"
"""
import json
import logging
import mimetypes
import sys
from fnmatch import fnmatch
from os import listdir, walk
from os.path import basename, exists, isdir, isfile, join, relpath, split
if sys.version_info > (3,):
basestring = str
from urllib.request import urlopen # pylint: no-name-in-module
else:
basestring = basestring
from urllib import urlopen # pylint: disable=no-name-in-module
# Define an exceptin class for use within this module.
class ProfileValidationError(Exception):
# TODO: or just 'pass' instead of __init__ and __str__
def __init__(self, value):
super(ProfileValidationError, self).__init__(value)
self.value = value
def __str__(self):
return repr(self.value)
class ProfileValidationReport(object): # pylint: disable=useless-object-inheritance
def __init__(self):
self.errors = []
@property
def is_valid(self):
return not self.errors
def __str__(self):
if self.is_valid:
return "VALID"
return "INVALID: %s" % "\n ".join(["%s" % e for e in self.errors])
# Define the Profile class.
class Profile(object): # pylint: disable=useless-object-inheritance
_baginfo_profile_id_tag = "BagIt-Profile-Identifier"
def __init__(self, url, profile=None, ignore_baginfo_tag_case=False):
self.url = url
if profile is None:
profile = self.get_profile()
else:
if isinstance(profile, dict):
profile = profile
else:
profile = json.loads(profile)
self.validate_bagit_profile(profile)
# Report of the errors in the last run of validate
self.report = None
self.profile = profile
self.ignore_baginfo_tag_case = ignore_baginfo_tag_case
def _fail(self, msg):
logging.error(msg)
raise ProfileValidationError(msg)
def _warn(self, msg):
logging.error(msg)
def get_profile(self):
try:
f = urlopen(self.url)
profile = f.read()
if sys.version_info > (3,):
profile = profile.decode("utf-8")
profile = json.loads(profile)
except Exception as e: # pylint: disable=broad-except
print("Cannot retrieve profile from %s: %s", self.url, e)
logging.error("Cannot retrieve profile from %s: %s", self.url, e)
# This is a fatal error.
sys.exit(1)
return profile
# Call all the validate functions other than validate_bagit_profile(),
# which we've already called. 'Serialization' and 'Accept-Serialization'
# are validated in validate_serialization().
def validate(self, bag):
self.report = ProfileValidationReport()
for (fn, msg, min_version) in [
(self.validate_bag_info, "Error in bag-info.txt", None),
(self.validate_manifests_required, "Required manifests not found", None),
(
self.validate_tag_manifests_required,
"Required tag manifests not found",
None,
),
(self.validate_payload_manifests_allowed, "Disallowed payload manifests present", (1, 3, 0)),
(self.validate_tag_manifests_allowed, "Disallowed tag manifests present", (1, 3, 0)),
(self.validate_tag_files_required, "Required tag files not found", None),
(
self.validate_allow_fetch,
"fetch.txt is present but is not allowed",
None,
),
(
self.validate_accept_bagit_version,
"Required BagIt version not found",
None,
),
(self.validate_tag_files_allowed, "Tag files not allowed", (1, 2, 0)),
]:
try:
if min_version and self.profile_version_info < min_version:
logging.info(
"Skipping %s introduced in version %s (version validated: %s)",
fn,
min_version,
self.profile_version_info,
)
continue
fn(bag)
except ProfileValidationError as e:
# self._warn("%s: %s" % (msg, e))
self.report.errors.append(e)
return self.report.is_valid
def validate_bagit_profile(self, profile):
"""
Set default values for unspecified tags and validate the profile itself.
"""
if "Serialization" not in profile:
profile["Serialization"] = "optional"
if "Allow-Fetch.txt" not in profile:
profile["Allow-Fetch.txt"] = True
if (
"BagIt-Profile-Info" in profile
and "BagIt-Profile-Version" in profile["BagIt-Profile-Info"]
):
profile_version = profile["BagIt-Profile-Info"]["BagIt-Profile-Version"]
else:
profile_version = "1.1.0"
self.profile_version_info = tuple(int(i) for i in profile_version.split("."))
self.validate_bagit_profile_info(profile)
self.validate_bagit_profile_accept_bagit_versions(profile)
self.validate_bagit_profile_bag_info(profile)
# Check self.profile['bag-profile-info'] to see if "Source-Organization",
# "External-Description", "Version" and "BagIt-Profile-Identifier" are present.
def validate_bagit_profile_info(self, profile):
if "BagIt-Profile-Info" not in profile:
self._fail("%s: Required 'BagIt-Profile-Info' dict is missing." % profile)
if "Source-Organization" not in profile["BagIt-Profile-Info"]:
self._fail(
"%s: Required 'Source-Organization' tag is not in 'BagIt-Profile-Info'."
% profile
)
if "Version" not in profile["BagIt-Profile-Info"]:
self._warn(
"%s: Required 'Version' tag is not in 'BagIt-Profile-Info'." % profile
)
return False
if "BagIt-Profile-Identifier" not in profile["BagIt-Profile-Info"]:
self._fail(
"%s: Required 'BagIt-Profile-Identifier' tag is not in 'BagIt-Profile-Info'."
% profile
)
return True
def validate_bagit_profile_accept_bagit_versions(self, profile):
"""
Ensure all versions in 'Accept-BagIt-Version' are strings
"""
if "Accept-BagIt-Version" in profile:
for version_number in profile["Accept-BagIt-Version"]:
# pylint: disable=undefined-variable
if not isinstance(version_number, basestring):
raise ProfileValidationError(
'Version number "%s" in "Accept-BagIt-Version" is not a string!'
% version_number
)
return True
def validate_bagit_profile_bag_info(self, profile):
if 'Bag-Info' in profile:
for tag in profile['Bag-Info']:
config = profile['Bag-Info'][tag]
if self.profile_version_info >= (1, 3, 0) and \
'description' in config and not isinstance(config['description'], basestring):
self._fail("%s: Profile Bag-Info '%s' tag 'description' property, when present, must be a string." %
(profile, tag))
return True
# Validate tags in self.profile['Bag-Info'].
def validate_bag_info(self, bag):
# First, check to see if bag-info.txt exists.
path_to_baginfotxt = join(bag.path, "bag-info.txt")
if not exists(path_to_baginfotxt):
self._fail("%s: bag-info.txt is not present." % bag)
# Then check for the required 'BagIt-Profile-Identifier' tag and ensure it has the same value
# as self.url.
if self.ignore_baginfo_tag_case:
bag_info = {self.normalize_tag(k): v for k, v in bag.info.items()}
ignore_tag_case_help = ""
else:
bag_info = bag.info
ignore_tag_case_help = " Set 'ignore_baginfo_tag_case' to True if you wish to ignore tag case."
profile_id_tag = self.normalize_tag(self._baginfo_profile_id_tag)
if profile_id_tag not in bag_info:
self._fail(
("%s: Required '%s' tag is not in bag-info.txt." + ignore_tag_case_help)
% (bag, self._baginfo_profile_id_tag)
)
else:
if bag_info[profile_id_tag] != self.url:
self._fail(
"%s: '%s' tag does not contain this profile's URI: <%s> != <%s>"
% (bag, profile_id_tag, bag_info[profile_id_tag], self.url)
)
# Then, iterate through self.profile['Bag-Info'] and if a key has a dict containing a 'required' key that is
# True, check to see if that key exists in bag.info.
for tag in self.profile["Bag-Info"]:
normalized_tag = self.normalize_tag(tag)
config = self.profile["Bag-Info"][tag]
if "required" in config and config["required"] is True:
if normalized_tag not in bag_info:
self._fail(
("%s: Required tag '%s' is not present in bag-info.txt." + ignore_tag_case_help)
% (bag, tag)
)
# If the tag is in bag-info.txt, check to see if the value is constrained.
if "values" in config and normalized_tag in bag_info:
if bag_info[normalized_tag] not in config["values"]:
self._fail(
"%s: Required tag '%s' is present in bag-info.txt but does not have an allowed value ('%s')."
% (bag, tag, bag_info[normalized_tag])
)
# If the tag is nonrepeatable, make sure it only exists once. We do this by checking to see if the value for the key is a list.
if "repeatable" in config and config["repeatable"] is False:
value = bag_info.get(normalized_tag)
if isinstance(value, list):
self._fail(
"%s: Nonrepeatable tag '%s' occurs %s times in bag-info.txt."
% (bag, tag, len(value))
)
return True
# Normalize to canonical lowercase, if profile is ignoring bag-info.txt tag case.
def normalize_tag(self, tag):
return tag if not self.ignore_baginfo_tag_case else tag.lower()
# For each member of self.profile['manifests_required'], throw an exception if
# the manifest file is not present.
def validate_manifests_required(self, bag):
for manifest_type in self.profile["Manifests-Required"]:
path_to_manifest = join(bag.path, "manifest-" + manifest_type + ".txt")
if not exists(path_to_manifest):
self._fail(
"%s: Required manifest type '%s' is not present in Bag."
% (bag, manifest_type)
)
return True
# For each member of self.profile['tag_manifests_required'], throw an exception if
# the tag manifest file is not present.
def validate_tag_manifests_required(self, bag):
# Tag manifests are optional, so we return True if none are defined in the profile.
if "Tag-Manifests-Required" not in self.profile:
return True
for tag_manifest_type in self.profile["Tag-Manifests-Required"]:
path_to_tag_manifest = join(
bag.path, "tagmanifest-" + tag_manifest_type + ".txt"
)
if not exists(path_to_tag_manifest):
self._fail(
"%s: Required tag manifest type '%s' is not present in Bag."
% (bag, tag_manifest_type)
)
return True
@staticmethod
def manifest_algorithms(manifest_files):
for filepath in manifest_files:
filename = basename(filepath)
if filename.startswith("tagmanifest-"):
prefix = "tagmanifest-"
else:
prefix = "manifest-"
algorithm = filename.replace(prefix, "").replace(".txt", "")
yield algorithm
def validate_tag_manifests_allowed(self, bag):
return self._validate_allowed_manifests(bag, manifest_type="tag",
manifests_present=self.manifest_algorithms(bag.tagmanifest_files()),
allowed_attribute="Tag-Manifests-Allowed",
required_attribute="Tag-Manifests-Required")
def validate_payload_manifests_allowed(self, bag):
return self._validate_allowed_manifests(bag, manifest_type="payload",
manifests_present=self.manifest_algorithms(bag.manifest_files()),
allowed_attribute="Manifests-Allowed",
required_attribute="Manifests-Required")
def _validate_allowed_manifests(self, bag, manifest_type=None, manifests_present=None,
allowed_attribute=None, required_attribute=None):
if allowed_attribute not in self.profile:
return True
allowed = self.profile[allowed_attribute]
required = self.profile[required_attribute] if required_attribute in self.profile else []
required_but_not_allowed = [alg for alg in required if alg not in allowed]
if required_but_not_allowed:
self._fail("%s: Required %s manifest type(s) %s not allowed by %s" %
(bag, manifest_type, [str(a) for a in required_but_not_allowed], allowed_attribute))
present_but_not_allowed = [alg for alg in manifests_present if alg not in allowed]
if present_but_not_allowed:
self._fail("%s: Unexpected %s manifest type(s) '%s' present, but not allowed by %s" %
(bag, manifest_type, [str(a) for a in present_but_not_allowed], allowed_attribute))
return True
def validate_tag_files_allowed(self, bag):
"""
Validate the ``Tag-Files-Allowed`` tag.
"""
allowed = (
self.profile["Tag-Files-Allowed"]
if "Tag-Files-Allowed" in self.profile
else ["*"]
)
required = (
self.profile["Tag-Files-Required"]
if "Tag-Files-Required" in self.profile
else []
)
# For each member of 'Tag-Files-Required' ensure it is also in 'Tag-Files-Allowed'.
required_but_not_allowed = [f for f in required if not fnmatch_any(f, allowed)]
if required_but_not_allowed:
self._fail(
"%s: Required tag files '%s' not listed in Tag-Files-Allowed"
% (bag, required_but_not_allowed)
)
# For each tag file in the bag base directory, ensure it is also in 'Tag-Files-Allowed'.
for tag_file in find_tag_files(bag.path):
tag_file = relpath(tag_file, bag.path)
if not fnmatch_any(tag_file, allowed):
self._fail(
"%s: Existing tag file '%s' is not listed in Tag-Files-Allowed."
% (bag, tag_file)
)
# For each member of self.profile['Tag-Files-Required'], throw an exception if
# the path does not exist.
def validate_tag_files_required(self, bag):
# Tag files are optional, so we return True if none are defined in the profile.
if "Tag-Files-Required" not in self.profile:
return True
for tag_file in self.profile["Tag-Files-Required"]:
path_to_tag_file = join(bag.path, tag_file)
if not exists(path_to_tag_file):
self._fail(
"%s: Required tag file '%s' is not present in Bag."
% (bag, path_to_tag_file)
)
return True
# Check to see if this constraint is False, and if it is, then check to see
# if the fetch.txt file exists. If it does, throw an exception.
def validate_allow_fetch(self, bag):
if self.profile["Allow-Fetch.txt"] is False:
path_to_fetchtxt = join(bag.path, "fetch.txt")
if exists(path_to_fetchtxt):
self._fail("%s: Fetch.txt is present but is not allowed." % bag)
return True
# Check the Bag's version, and if it's not in the list of allowed versions,
# throw an exception.
def validate_accept_bagit_version(self, bag):
actual = bag.tags["BagIt-Version"]
allowed = self.profile["Accept-BagIt-Version"]
if actual not in allowed:
self._fail(
"%s: Bag version '%s' is not in list of allowed values: %s"
% (bag, actual, allowed)
)
return True
# Perform tests on 'Serialization' and 'Accept-Serialization', in one function.
# Since https://github.com/edsu/bagit can't tell us if a Bag is serialized or
# not, we need to pass this function the path to the Bag, not the object. Also,
# this method needs to be called before .validate().
def validate_serialization(self, path_to_bag):
# First, perform the two negative tests.
if not exists(path_to_bag):
raise IOError("Can't find file %s" % path_to_bag)
if self.profile["Serialization"] == "required" and isdir(path_to_bag):
self._fail(
"%s: Bag serialization is required but Bag is a directory."
% path_to_bag
)
if self.profile["Serialization"] == "forbidden" and isfile(path_to_bag):
self._fail(
"%s: Bag serialization is forbidden but Bag appears is a file."
% path_to_bag
)
# Then test to see whether the Bag is serialized (is a file) and whether the mimetype is one
# of the allowed types.
if (
self.profile["Serialization"] == "required"
or self.profile["Serialization"] == "optional"
and isfile(path_to_bag)
):
_, bag_file = split(path_to_bag)
mtype = mimetypes.guess_type(bag_file)
if mtype[0] not in self.profile["Accept-Serialization"]:
self._fail(
"%s: Bag serialization is forbidden but Bag appears is a file."
% path_to_bag
)
# If we have passed the serialization tests, return True.
return True
# Return true if any of the pattern fnmatches a file path
def fnmatch_any(f, pats):
for pat in pats:
if fnmatch(f, pat):
return True
return False
# Find tag files
def find_tag_files(bag_dir):
for root, _, basenames in walk(bag_dir):
reldir = relpath(root, bag_dir)
for basename in basenames:
if fnmatch(reldir, "data*") or (
reldir == "."
and fnmatch_any(
basename,
[
"manifest-*.txt",
"bag-info.txt",
"tagmanifest-*.txt",
"bagit.txt",
"fetch.txt",
],
)
):
continue
fpath = join(root, basename)
if isfile(fpath):
yield fpath
def _configure_logging(args):
import time
log_format = "%(asctime)s - %(levelname)s - %(message)s"
if args.quiet:
args.loglevel = "ERROR"
level = logging.getLevelName(args.loglevel)
if args.no_logfile:
logging.basicConfig(level=level, format=log_format)
else:
if args.logdir:
filename = join(
args.log + "/logs", "BagitProfile_" + time.strftime("%y_%m_%d") + ".log"
)
else:
filename = "BagitProfile%s.log" % time.strftime("%y_%m_%d")
logging.basicConfig(filename=filename, level=level, format=log_format)
def _main():
# Command-line version.
import bagit
from argparse import ArgumentParser
from pkg_resources import get_distribution
parser = ArgumentParser(description="Validate BagIt bags against BagIt profiles")
parser.add_argument(
"--version",
action="version",
version="%(prog)s, v" + get_distribution("bagit_profile").version,
)
parser.add_argument(
"--quiet",
action="store_true",
help="Suppress all output except errors. Default: %(default)s",
)
parser.add_argument(
"-i", "--ignore-baginfo-tag-case",
dest="ignore_baginfo_tag_case",
action="store_true",
help="Ignore capitalization for Bag-Info tag names. Default: %(default)s",
)
parser.add_argument(
"--log", dest="logdir", help="Log directory. Default: %(default)s"
)
parser.add_argument(
"--no-logfile",
action="store_true",
help="Do not log to a log file. Default: %(default)s",
)
parser.add_argument(
"--loglevel",
default="INFO",
choices=("DEBUG", "INFO", "ERROR"),
help="Log level. Default: %(default)s",
)
parser.add_argument(
"--file", help="Load profile from FILE, not by URL. Default: %(default)s."
)
parser.add_argument(
"--report",
action="store_true",
help="Print validation report. Default: %(default)s",
)
parser.add_argument(
"--skip",
action="append",
default=[],
help="Skip validation steps. Default: %(default)s",
choices=("serialization", "profile"),
)
parser.add_argument("profile_url", nargs=1)
parser.add_argument("bagit_path", nargs=1)
args = parser.parse_args()
profile_url = args.profile_url[0]
bagit_path = args.bagit_path[0]
_configure_logging(args)
# Instantiate a profile, supplying its URI.
if args.file:
with open(args.file, "r") as local_file:
profile = Profile(profile_url, profile=local_file.read(),
ignore_baginfo_tag_case=args.ignore_baginfo_tag_case)
else:
profile = Profile(profile_url, ignore_baginfo_tag_case=args.ignore_baginfo_tag_case)
# Instantiate an existing Bag.
bag = bagit.Bag(bagit_path) # pylint: disable=no-member
# Validate 'Serialization' and 'Accept-Serialization', then perform general validation.
if "serialization" not in args.skip:
if profile.validate_serialization(bagit_path):
print(u"✓ Serialization validates")
else:
print(u"✗ Serialization does not validate")
sys.exit(1)
# Validate the rest of the profile.
if "profile" not in args.skip:
if profile.validate(bag):
print(u"✓ Validates against %s" % profile_url)
else:
print(u"✗ Does not validate against %s" % profile_url)
if args.report:
print(profile.report)
sys.exit(2)
if __name__ == "__main__":
_main()
|
ruebot/bagit-profiles-validator
|
bagit_profile.py
|
Python
|
unlicense
| 24,238
|
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
import random
from utils.lib import O
from genesis.gen_utils import Statement, Vocabulary, choice
__author__ = "bigfatnoob"
def sample(collection, size):
size = min(size, len(collection))
return random.sample(collection, size)
class Oracle(O):
def __init__(self):
O.__init__(self)
def validate(self, query):
raise NotImplemented("Has to be implemented in sub class.")
def sample_positive_examples(self, size):
raise NotImplemented("Has to be implemented in sub class.")
def sample_negative_examples(self, size):
raise NotImplemented("Has to be implemented in sub class.")
class StatementOracle(Oracle):
def __init__(self, statements):
Oracle.__init__(self)
self.statements = set()
self.vocabulary = None
nodes = set()
edges = set()
for statement in statements:
stmt = statement
if isinstance(stmt, str):
stmt = Statement.from_string(stmt)
nodes.add(stmt.source)
edges.add(stmt.relation)
nodes.add(stmt.target)
self.statements.add(stmt)
self.vocabulary = Vocabulary(nodes, edges)
def validate(self, query):
return query in self.statements
def sample_positive_examples(self, size):
return list(sample(self.statements, size))
def sample_negative_examples(self, size):
samples = set()
while len(samples) < size:
target = choice(self.vocabulary.nodes)
relation = choice(self.vocabulary.edges)
source = target
while source == target:
source = choice(self.vocabulary.nodes)
stmt = Statement(source, relation, target)
if stmt in self.statements or stmt in samples:
continue
samples.add(stmt)
return list(samples)
|
dr-bigfatnoob/quirk
|
genesis/oracle.py
|
Python
|
unlicense
| 1,842
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$', 'simplerest.api.views.home'),
url(r'^list/', 'simplerest.api.views.api_list'),
)
|
Bluegear/django-rest-heroku
|
simplerest/api/urls.py
|
Python
|
unlicense
| 181
|
# coding: utf-8
from __future__ import unicode_literals
import random
import time
import re
from .common import InfoExtractor
from ..utils import (
strip_jsonp,
unescapeHTML,
clean_html,
)
from ..compat import compat_urllib_request
class QQMusicIE(InfoExtractor):
IE_NAME = 'qqmusic'
_VALID_URL = r'http://y.qq.com/#type=song&mid=(?P<id>[0-9A-Za-z]+)'
_TESTS = [{
'url': 'http://y.qq.com/#type=song&mid=004295Et37taLD',
'md5': '9ce1c1c8445f561506d2e3cfb0255705',
'info_dict': {
'id': '004295Et37taLD',
'ext': 'mp3',
'title': '可惜没如果',
'upload_date': '20141227',
'creator': '林俊杰',
'description': 'md5:d327722d0361576fde558f1ac68a7065',
'thumbnail': 're:^https?://.*\.jpg$',
}
}, {
'note': 'There is no mp3-320 version of this song.',
'url': 'http://y.qq.com/#type=song&mid=004MsGEo3DdNxV',
'md5': 'fa3926f0c585cda0af8fa4f796482e3e',
'info_dict': {
'id': '004MsGEo3DdNxV',
'ext': 'mp3',
'title': '如果',
'upload_date': '20050626',
'creator': '李季美',
'description': 'md5:46857d5ed62bc4ba84607a805dccf437',
'thumbnail': 're:^https?://.*\.jpg$',
}
}]
_FORMATS = {
'mp3-320': {'prefix': 'M800', 'ext': 'mp3', 'preference': 40, 'abr': 320},
'mp3-128': {'prefix': 'M500', 'ext': 'mp3', 'preference': 30, 'abr': 128},
'm4a': {'prefix': 'C200', 'ext': 'm4a', 'preference': 10}
}
# Reference: m_r_GetRUin() in top_player.js
# http://imgcache.gtimg.cn/music/portal_v3/y/top_player.js
@staticmethod
def m_r_get_ruin():
curMs = int(time.time() * 1000) % 1000
return int(round(random.random() * 2147483647) * curMs % 1E10)
def _real_extract(self, url):
mid = self._match_id(url)
detail_info_page = self._download_webpage(
'http://s.plcloud.music.qq.com/fcgi-bin/fcg_yqq_song_detail_info.fcg?songmid=%s&play=0' % mid,
mid, note='Download song detail info',
errnote='Unable to get song detail info', encoding='gbk')
song_name = self._html_search_regex(
r"songname:\s*'([^']+)'", detail_info_page, 'song name')
publish_time = self._html_search_regex(
r'发行时间:(\d{4}-\d{2}-\d{2})', detail_info_page,
'publish time', default=None)
if publish_time:
publish_time = publish_time.replace('-', '')
singer = self._html_search_regex(
r"singer:\s*'([^']+)", detail_info_page, 'singer', default=None)
lrc_content = self._html_search_regex(
r'<div class="content" id="lrc_content"[^<>]*>([^<>]+)</div>',
detail_info_page, 'LRC lyrics', default=None)
if lrc_content:
lrc_content = lrc_content.replace('\\n', '\n')
thumbnail_url = None
albummid = self._search_regex(
[r'albummid:\'([0-9a-zA-Z]+)\'', r'"albummid":"([0-9a-zA-Z]+)"'],
detail_info_page, 'album mid', default=None)
if albummid:
thumbnail_url = "http://i.gtimg.cn/music/photo/mid_album_500/%s/%s/%s.jpg" \
% (albummid[-2:-1], albummid[-1], albummid)
guid = self.m_r_get_ruin()
vkey = self._download_json(
'http://base.music.qq.com/fcgi-bin/fcg_musicexpress.fcg?json=3&guid=%s' % guid,
mid, note='Retrieve vkey', errnote='Unable to get vkey',
transform_source=strip_jsonp)['key']
formats = []
for format_id, details in self._FORMATS.items():
formats.append({
'url': 'http://cc.stream.qqmusic.qq.com/%s%s.%s?vkey=%s&guid=%s&fromtag=0'
% (details['prefix'], mid, details['ext'], vkey, guid),
'format': format_id,
'format_id': format_id,
'preference': details['preference'],
'abr': details.get('abr'),
})
self._check_formats(formats, mid)
self._sort_formats(formats)
return {
'id': mid,
'formats': formats,
'title': song_name,
'upload_date': publish_time,
'creator': singer,
'description': lrc_content,
'thumbnail': thumbnail_url,
}
class QQPlaylistBaseIE(InfoExtractor):
@staticmethod
def qq_static_url(category, mid):
return 'http://y.qq.com/y/static/%s/%s/%s/%s.html' % (category, mid[-2], mid[-1], mid)
@classmethod
def get_entries_from_page(cls, page):
entries = []
for item in re.findall(r'class="data"[^<>]*>([^<>]+)</', page):
song_mid = unescapeHTML(item).split('|')[-5]
entries.append(cls.url_result(
'http://y.qq.com/#type=song&mid=' + song_mid, 'QQMusic',
song_mid))
return entries
class QQMusicSingerIE(QQPlaylistBaseIE):
IE_NAME = 'qqmusic:singer'
_VALID_URL = r'http://y.qq.com/#type=singer&mid=(?P<id>[0-9A-Za-z]+)'
_TEST = {
'url': 'http://y.qq.com/#type=singer&mid=001BLpXF2DyJe2',
'info_dict': {
'id': '001BLpXF2DyJe2',
'title': '林俊杰',
'description': 'md5:2a222d89ba4455a3af19940c0481bb78',
},
'playlist_count': 12,
}
def _real_extract(self, url):
mid = self._match_id(url)
singer_page = self._download_webpage(
self.qq_static_url('singer', mid), mid, 'Download singer page')
entries = self.get_entries_from_page(singer_page)
singer_name = self._html_search_regex(
r"singername\s*:\s*'([^']+)'", singer_page, 'singer name',
default=None)
singer_id = self._html_search_regex(
r"singerid\s*:\s*'([0-9]+)'", singer_page, 'singer id',
default=None)
singer_desc = None
if singer_id:
req = compat_urllib_request.Request(
'http://s.plcloud.music.qq.com/fcgi-bin/fcg_get_singer_desc.fcg?utf8=1&outCharset=utf-8&format=xml&singerid=%s' % singer_id)
req.add_header(
'Referer', 'http://s.plcloud.music.qq.com/xhr_proxy_utf8.html')
singer_desc_page = self._download_xml(
req, mid, 'Donwload singer description XML')
singer_desc = singer_desc_page.find('./data/info/desc').text
return self.playlist_result(entries, mid, singer_name, singer_desc)
class QQMusicAlbumIE(QQPlaylistBaseIE):
IE_NAME = 'qqmusic:album'
_VALID_URL = r'http://y.qq.com/#type=album&mid=(?P<id>[0-9A-Za-z]+)'
_TESTS = [{
'url': 'http://y.qq.com/#type=album&mid=000gXCTb2AhRR1',
'info_dict': {
'id': '000gXCTb2AhRR1',
'title': '我们都是这样长大的',
'description': 'md5:179c5dce203a5931970d306aa9607ea6',
},
'playlist_count': 4,
}, {
'url': 'http://y.qq.com/#type=album&mid=002Y5a3b3AlCu3',
'info_dict': {
'id': '002Y5a3b3AlCu3',
'title': '그리고...',
'description': 'md5:a48823755615508a95080e81b51ba729',
},
'playlist_count': 8,
}]
def _real_extract(self, url):
mid = self._match_id(url)
album = self._download_json(
'http://i.y.qq.com/v8/fcg-bin/fcg_v8_album_info_cp.fcg?albummid=%s&format=json' % mid,
mid, 'Download album page')['data']
entries = [
self.url_result(
'http://y.qq.com/#type=song&mid=' + song['songmid'], 'QQMusic', song['songmid']
) for song in album['list']
]
album_name = album.get('name')
album_detail = album.get('desc')
if album_detail is not None:
album_detail = album_detail.strip()
return self.playlist_result(entries, mid, album_name, album_detail)
class QQMusicToplistIE(QQPlaylistBaseIE):
IE_NAME = 'qqmusic:toplist'
_VALID_URL = r'http://y\.qq\.com/#type=toplist&p=(?P<id>(top|global)_[0-9]+)'
_TESTS = [{
'url': 'http://y.qq.com/#type=toplist&p=global_123',
'info_dict': {
'id': 'global_123',
'title': '美国iTunes榜',
},
'playlist_count': 10,
}, {
'url': 'http://y.qq.com/#type=toplist&p=top_3',
'info_dict': {
'id': 'top_3',
'title': 'QQ音乐巅峰榜·欧美',
'description': 'QQ音乐巅峰榜·欧美根据用户收听行为自动生成,集结当下最流行的欧美新歌!:更新时间:每周四22点|统'
'计周期:一周(上周四至本周三)|统计对象:三个月内发行的欧美歌曲|统计数量:100首|统计算法:根据'
'歌曲在一周内的有效播放次数,由高到低取前100名(同一歌手最多允许5首歌曲同时上榜)|有效播放次数:'
'登录用户完整播放一首歌曲,记为一次有效播放;同一用户收听同一首歌曲,每天记录为1次有效播放'
},
'playlist_count': 100,
}, {
'url': 'http://y.qq.com/#type=toplist&p=global_106',
'info_dict': {
'id': 'global_106',
'title': '韩国Mnet榜',
},
'playlist_count': 50,
}]
def _real_extract(self, url):
list_id = self._match_id(url)
list_type, num_id = list_id.split("_")
toplist_json = self._download_json(
'http://i.y.qq.com/v8/fcg-bin/fcg_v8_toplist_cp.fcg?type=%s&topid=%s&format=json'
% (list_type, num_id),
list_id, 'Download toplist page')
entries = [
self.url_result(
'http://y.qq.com/#type=song&mid=' + song['data']['songmid'], 'QQMusic', song['data']['songmid']
) for song in toplist_json['songlist']
]
topinfo = toplist_json.get('topinfo', {})
list_name = topinfo.get('ListName')
list_description = topinfo.get('info')
return self.playlist_result(entries, list_id, list_name, list_description)
class QQMusicPlaylistIE(QQPlaylistBaseIE):
IE_NAME = 'qqmusic:playlist'
_VALID_URL = r'http://y\.qq\.com/#type=taoge&id=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://y.qq.com/#type=taoge&id=3462654915',
'info_dict': {
'id': '3462654915',
'title': '韩国5月新歌精选下旬',
'description': 'md5:d2c9d758a96b9888cf4fe82f603121d4',
},
'playlist_count': 40,
}
def _real_extract(self, url):
list_id = self._match_id(url)
list_json = self._download_json(
'http://i.y.qq.com/qzone-music/fcg-bin/fcg_ucc_getcdinfo_byids_cp.fcg?type=1&json=1&utf8=1&onlysong=0&disstid=%s'
% list_id, list_id, 'Download list page',
transform_source=strip_jsonp)['cdlist'][0]
entries = [
self.url_result(
'http://y.qq.com/#type=song&mid=' + song['songmid'], 'QQMusic', song['songmid']
) for song in list_json['songlist']
]
list_name = list_json.get('dissname')
list_description = clean_html(unescapeHTML(list_json.get('desc')))
return self.playlist_result(entries, list_id, list_name, list_description)
|
pridemusvaire/youtube-dl
|
youtube_dl/extractor/qqmusic.py
|
Python
|
unlicense
| 11,465
|
# coding: utf-8
# In[ ]:
# Word Representations of Words
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# In[ ]:
# Placeholders for inputs
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# In[ ]:
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# In[ ]:
# Compute the NCE loss, using a sample of the negative labels each time.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# In[ ]:
# We use the SGD optimizer.
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0).minimize(loss)
# In[ ]:
for inputs, labels in generate_batch(...):
feed_dict = {train_inputs: inputs, train_labels: labels}
_, cur_loss = session.run([optimizer, loss], feed_dict=feed_dict)
|
zlpmichelle/crackingtensorflow
|
crackingcode/day10/cc_tf_day10_1.py
|
Python
|
apache-2.0
| 1,238
|
"""
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
action_instance_slice
Revision ID: 30b4676b2f86
Revises: 3da597dde134
Create Date: 2016-01-27 09:03:18.577289
"""
# revision identifiers, used by Alembic.
revision = '30b4676b2f86'
down_revision = '3da597dde134'
branch_labels = ()
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
if 'sqlite' != op.get_context().dialect.name:
op.alter_column('action_instances', 'slice',
existing_type=mysql.VARCHAR(length=25),
nullable=True)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
if 'sqlite' != op.get_context().dialect.name:
op.alter_column('action_instances', 'slice',
existing_type=mysql.VARCHAR(length=25),
nullable=False)
### end Alembic commands ###
|
BambooHR/rapid
|
rapid/master/data/migrations/versions/30b4676b2f86_action_instance_slice.py
|
Python
|
apache-2.0
| 1,456
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/
"""
import mock
from oslo_config import fixture as fixture_config
from oslotest import base
import retrying
from ceilometer.alarm.storage import impl_log as impl_log_alarm
from ceilometer.alarm.storage import impl_sqlalchemy as impl_sqlalchemy_alarm
from ceilometer.event.storage import impl_hbase as impl_hbase_event
from ceilometer import storage
from ceilometer.storage import impl_log
from ceilometer.storage import impl_sqlalchemy
import six
class EngineTest(base.BaseTestCase):
def test_get_connection(self):
engine = storage.get_connection('log://localhost',
'ceilometer.metering.storage')
self.assertIsInstance(engine, impl_log.Connection)
def test_get_connection_no_such_engine(self):
try:
storage.get_connection('no-such-engine://localhost',
'ceilometer.metering.storage')
except RuntimeError as err:
self.assertIn('no-such-engine', six.text_type(err))
class ConnectionRetryTest(base.BaseTestCase):
def setUp(self):
super(ConnectionRetryTest, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
def test_retries(self):
with mock.patch.object(retrying.time, 'sleep') as retry_sleep:
try:
self.CONF.set_override("connection", "no-such-engine://",
group="database")
storage.get_connection_from_config(self.CONF)
except RuntimeError as err:
self.assertIn('no-such-engine', six.text_type(err))
self.assertEqual(9, retry_sleep.call_count)
retry_sleep.assert_called_with(10.0)
class ConnectionConfigTest(base.BaseTestCase):
def setUp(self):
super(ConnectionConfigTest, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
def test_only_default_url(self):
self.CONF.set_override("connection", "log://", group="database")
conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'metering')
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'alarm')
self.assertIsInstance(conn, impl_log_alarm.Connection)
def test_two_urls(self):
self.CONF.set_override("connection", "log://", group="database")
self.CONF.set_override("alarm_connection", "sqlite://",
group="database")
conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'metering')
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'alarm')
self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection)
def test_three_urls(self):
self.CONF.set_override("connection", "log://", group="database")
self.CONF.set_override("alarm_connection", "sqlite://",
group="database")
self.CONF.set_override("event_connection", "hbase://__test__",
group="database")
conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'metering')
self.assertIsInstance(conn, impl_log.Connection)
conn = storage.get_connection_from_config(self.CONF, 'alarm')
self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection)
conn = storage.get_connection_from_config(self.CONF, 'event')
self.assertIsInstance(conn, impl_hbase_event.Connection)
def test_sqlalchemy_driver(self):
self.CONF.set_override("connection", "sqlite+pysqlite://",
group="database")
conn = storage.get_connection_from_config(self.CONF)
self.assertIsInstance(conn, impl_sqlalchemy.Connection)
conn = storage.get_connection_from_config(self.CONF, 'metering')
self.assertIsInstance(conn, impl_sqlalchemy.Connection)
conn = storage.get_connection_from_config(self.CONF, 'alarm')
self.assertIsInstance(conn, impl_sqlalchemy_alarm.Connection)
|
Juniper/ceilometer
|
ceilometer/tests/storage/test_get_connection.py
|
Python
|
apache-2.0
| 5,056
|
# Copyright (c) 2015 Mirantis inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import ddt
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
import webob
from manila.api import common
from manila.api.openstack import api_version_request as api_version
from manila.api.v2 import share_replicas
from manila.api.v2 import shares
from manila.common import constants
from manila import context
from manila import db
from manila import exception
from manila import policy
from manila.share import api as share_api
from manila.share import share_types
from manila import test
from manila.tests.api.contrib import stubs
from manila.tests.api import fakes
from manila.tests import db_utils
from manila import utils
CONF = cfg.CONF
@ddt.ddt
class ShareAPITest(test.TestCase):
"""Share API Test."""
def setUp(self):
super(self.__class__, self).setUp()
self.controller = shares.ShareController()
self.mock_object(db, 'availability_zone_get')
self.mock_object(share_api.API, 'get_all',
stubs.stub_get_all_shares)
self.mock_object(share_api.API, 'get',
stubs.stub_share_get)
self.mock_object(share_api.API, 'update', stubs.stub_share_update)
self.mock_object(share_api.API, 'delete', stubs.stub_share_delete)
self.mock_object(share_api.API, 'get_snapshot',
stubs.stub_snapshot_get)
self.maxDiff = None
self.share = {
"size": 100,
"display_name": "Share Test Name",
"display_description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1",
"is_public": False,
}
self.create_mock = mock.Mock(
return_value=stubs.stub_share(
'1',
display_name=self.share['display_name'],
display_description=self.share['display_description'],
size=100,
share_proto=self.share['share_proto'].upper(),
instance={
'availability_zone': self.share['availability_zone'],
})
)
self.vt = {
'id': 'fake_volume_type_id',
'name': 'fake_volume_type_name',
}
CONF.set_default("default_share_type", None)
def _get_expected_share_detailed_response(self, values=None, admin=False):
share = {
'id': '1',
'name': 'displayname',
'availability_zone': 'fakeaz',
'description': 'displaydesc',
'export_location': 'fake_location',
'export_locations': ['fake_location', 'fake_location2'],
'project_id': 'fakeproject',
'host': 'fakehost',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'share_proto': 'FAKEPROTO',
'metadata': {},
'size': 1,
'snapshot_id': '2',
'share_network_id': None,
'status': 'fakestatus',
'share_type': '1',
'volume_type': '1',
'snapshot_support': True,
'is_public': False,
'consistency_group_id': None,
'source_cgsnapshot_member_id': None,
'task_state': None,
'share_type_name': None,
'links': [
{
'href': 'http://localhost/v1/fake/shares/1',
'rel': 'self'
},
{
'href': 'http://localhost/fake/shares/1',
'rel': 'bookmark'
}
],
}
if values:
if 'display_name' in values:
values['name'] = values.pop('display_name')
if 'display_description' in values:
values['description'] = values.pop('display_description')
share.update(values)
if share.get('share_proto'):
share['share_proto'] = share['share_proto'].upper()
if admin:
share['share_server_id'] = 'fake_share_server_id'
return {'share': share}
@ddt.data("2.0", "2.1")
def test_share_create_original(self, microversion):
self.mock_object(share_api.API, 'create', self.create_mock)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank('/shares', version=microversion)
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(self.share)
expected['share'].pop('snapshot_support')
expected['share'].pop('share_type_name')
expected['share'].pop('task_state')
expected['share'].pop('consistency_group_id')
expected['share'].pop('source_cgsnapshot_member_id')
self.assertEqual(expected, res_dict)
@ddt.data("2.2", "2.3")
def test_share_create_with_snapshot_support_without_cg(self, microversion):
self.mock_object(share_api.API, 'create', self.create_mock)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank('/shares', version=microversion)
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(self.share)
expected['share'].pop('share_type_name')
expected['share'].pop('task_state')
expected['share'].pop('consistency_group_id')
expected['share'].pop('source_cgsnapshot_member_id')
self.assertEqual(expected, res_dict)
@ddt.data("2.4", "2.5")
def test_share_create_with_consistency_group(self, microversion):
self.mock_object(share_api.API, 'create', self.create_mock)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank('/shares', version=microversion)
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(self.share)
expected['share'].pop('share_type_name')
if (api_version.APIVersionRequest(microversion) ==
api_version.APIVersionRequest('2.4')):
expected['share'].pop('task_state')
self.assertEqual(expected, res_dict)
def test_share_create_with_valid_default_share_type(self):
self.mock_object(share_types, 'get_share_type_by_name',
mock.Mock(return_value=self.vt))
CONF.set_default("default_share_type", self.vt['name'])
self.mock_object(share_api.API, 'create', self.create_mock)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(self.share)
share_types.get_share_type_by_name.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), self.vt['name'])
self.assertEqual(expected, res_dict)
def test_share_create_with_invalid_default_share_type(self):
self.mock_object(
share_types, 'get_default_share_type',
mock.Mock(side_effect=exception.ShareTypeNotFoundByName(
self.vt['name'])),
)
CONF.set_default("default_share_type", self.vt['name'])
req = fakes.HTTPRequest.blank('/shares', version='2.7')
self.assertRaises(exception.ShareTypeNotFoundByName,
self.controller.create, req, {'share': self.share})
share_types.get_default_share_type.assert_called_once_with()
def test_share_create_with_replication(self):
self.mock_object(share_api.API, 'create', self.create_mock)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank(
'/shares', version=share_replicas.MIN_SUPPORTED_API_VERSION)
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(self.share)
expected['share']['task_state'] = None
expected['share']['consistency_group_id'] = None
expected['share']['source_cgsnapshot_member_id'] = None
expected['share']['replication_type'] = None
expected['share']['share_type_name'] = None
expected['share']['has_replicas'] = False
expected['share']['access_rules_status'] = 'active'
expected['share'].pop('export_location')
expected['share'].pop('export_locations')
self.assertEqual(expected, res_dict)
def test_share_create_with_share_net(self):
shr = {
"size": 100,
"name": "Share Test Name",
"description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1",
"share_network_id": "fakenetid"
}
create_mock = mock.Mock(return_value=stubs.stub_share('1',
display_name=shr['name'],
display_description=shr['description'],
size=shr['size'],
share_proto=shr['share_proto'].upper(),
availability_zone=shr['availability_zone'],
share_network_id=shr['share_network_id']))
self.mock_object(share_api.API, 'create', create_mock)
self.mock_object(share_api.API, 'get_share_network', mock.Mock(
return_value={'id': 'fakenetid'}))
body = {"share": copy.deepcopy(shr)}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(shr)
self.assertEqual(expected, res_dict)
self.assertEqual("fakenetid",
create_mock.call_args[1]['share_network_id'])
@ddt.data("2.15", "2.16")
def test_share_create_original_with_user_id(self, microversion):
self.mock_object(share_api.API, 'create', self.create_mock)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank('/shares', version=microversion)
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(self.share)
if api_version.APIVersionRequest(microversion) >= (
api_version.APIVersionRequest("2.16")):
expected['share']['user_id'] = 'fakeuser'
else:
self.assertNotIn('user_id', expected['share'])
expected['share']['task_state'] = None
expected['share']['consistency_group_id'] = None
expected['share']['source_cgsnapshot_member_id'] = None
expected['share']['replication_type'] = None
expected['share']['share_type_name'] = None
expected['share']['has_replicas'] = False
expected['share']['access_rules_status'] = 'active'
expected['share'].pop('export_location')
expected['share'].pop('export_locations')
self.assertEqual(expected, res_dict)
def test_migration_start(self):
share = db_utils.create_share()
share_network = db_utils.create_share_network()
share_type = {'share_type_id': 'fake_type_id'}
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
context = req.environ['manila.context']
self.mock_object(db, 'share_network_get', mock.Mock(
return_value=share_network))
self.mock_object(db, 'share_type_get', mock.Mock(
return_value=share_type))
body = {
'migration_start': {
'host': 'fake_host',
'new_share_network_id': 'fake_net_id',
'new_share_type_id': 'fake_type_id',
}
}
method = 'migration_start'
self.mock_object(share_api.API, 'migration_start')
self.mock_object(share_api.API, 'get', mock.Mock(return_value=share))
response = getattr(self.controller, method)(req, share['id'], body)
self.assertEqual(202, response.status_int)
share_api.API.get.assert_called_once_with(context, share['id'])
share_api.API.migration_start.assert_called_once_with(
context, share, 'fake_host', False, True, True, False,
new_share_network=share_network, new_share_type=share_type)
db.share_network_get.assert_called_once_with(
context, 'fake_net_id')
db.share_type_get.assert_called_once_with(
context, 'fake_type_id')
def test_migration_start_has_replicas(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request = api_version.APIVersionRequest('2.22')
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host'}}
self.mock_object(share_api.API, 'migration_start',
mock.Mock(side_effect=exception.Conflict(err='err')))
self.assertRaises(webob.exc.HTTPConflict,
self.controller.migration_start,
req, share['id'], body)
def test_migration_start_no_share_id(self):
req = fakes.HTTPRequest.blank('/shares/%s/action' % 'fake_id',
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host'}}
method = 'migration_start'
self.mock_object(share_api.API, 'get',
mock.Mock(side_effect=[exception.NotFound]))
self.assertRaises(webob.exc.HTTPNotFound,
getattr(self.controller, method),
req, 'fake_id', body)
def test_migration_start_no_host(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {}}
method = 'migration_start'
self.assertRaises(webob.exc.HTTPBadRequest,
getattr(self.controller, method),
req, share['id'], body)
def test_migration_start_new_share_network_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
context = req.environ['manila.context']
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host',
'new_share_network_id': 'nonexistent'}}
self.mock_object(db, 'share_network_get',
mock.Mock(side_effect=exception.NotFound()))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.migration_start,
req, share['id'], body)
db.share_network_get.assert_called_once_with(context, 'nonexistent')
def test_migration_start_new_share_type_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
context = req.environ['manila.context']
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host',
'new_share_type_id': 'nonexistent'}}
self.mock_object(db, 'share_type_get',
mock.Mock(side_effect=exception.NotFound()))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.migration_start,
req, share['id'], body)
db.share_type_get.assert_called_once_with(context, 'nonexistent')
def test_migration_start_invalid_force_host_assisted_migration(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host',
'force_host_assisted_migration': 'fake'}}
method = 'migration_start'
self.assertRaises(webob.exc.HTTPBadRequest,
getattr(self.controller, method),
req, share['id'], body)
@ddt.data('writable', 'preserve_metadata')
def test_migration_start_invalid_writable_preserve_metadata(
self, parameter):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host',
parameter: 'invalid'}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.migration_start, req, share['id'],
body)
@ddt.data(constants.TASK_STATE_MIGRATION_ERROR, None)
def test_reset_task_state(self, task_state):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
update = {'task_state': task_state}
body = {'reset_task_state': update}
self.mock_object(db, 'share_update')
response = self.controller.reset_task_state(req, share['id'], body)
self.assertEqual(202, response.status_int)
db.share_update.assert_called_once_with(utils.IsAMatcher(
context.RequestContext), share['id'], update)
def test_reset_task_state_error_body(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
update = {'error': 'error'}
body = {'reset_task_state': update}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.reset_task_state, req, share['id'],
body)
def test_reset_task_state_error_invalid(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
update = {'task_state': 'error'}
body = {'reset_task_state': update}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.reset_task_state, req, share['id'],
body)
def test_reset_task_state_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR}
body = {'reset_task_state': update}
self.mock_object(db, 'share_update',
mock.Mock(side_effect=exception.NotFound()))
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.reset_task_state, req, share['id'],
body)
db.share_update.assert_called_once_with(utils.IsAMatcher(
context.RequestContext), share['id'], update)
def test_migration_complete(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_complete': None}
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=share))
self.mock_object(share_api.API, 'migration_complete')
response = self.controller.migration_complete(req, share['id'], body)
self.assertEqual(202, response.status_int)
share_api.API.migration_complete.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share)
def test_migration_complete_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_complete': None}
self.mock_object(share_api.API, 'get',
mock.Mock(side_effect=exception.NotFound()))
self.mock_object(share_api.API, 'migration_complete')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.migration_complete, req, share['id'],
body)
def test_migration_cancel(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_cancel': None}
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=share))
self.mock_object(share_api.API, 'migration_cancel')
response = self.controller.migration_cancel(req, share['id'], body)
self.assertEqual(202, response.status_int)
share_api.API.migration_cancel.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share)
def test_migration_cancel_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_cancel': None}
self.mock_object(share_api.API, 'get',
mock.Mock(side_effect=exception.NotFound()))
self.mock_object(share_api.API, 'migration_cancel')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.migration_cancel, req, share['id'],
body)
def test_migration_get_progress(self):
share = db_utils.create_share(
task_state=constants.TASK_STATE_MIGRATION_SUCCESS)
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_get_progress': None}
expected = {
'total_progress': 'fake',
'task_state': constants.TASK_STATE_MIGRATION_SUCCESS,
}
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=share))
self.mock_object(share_api.API, 'migration_get_progress',
mock.Mock(return_value=expected))
response = self.controller.migration_get_progress(req, share['id'],
body)
self.assertEqual(expected, response)
share_api.API.migration_get_progress.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share)
def test_migration_get_progress_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_get_progress': None}
self.mock_object(share_api.API, 'get',
mock.Mock(side_effect=exception.NotFound()))
self.mock_object(share_api.API, 'migration_get_progress')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.migration_get_progress, req,
share['id'], body)
def test_share_create_from_snapshot_without_share_net_no_parent(self):
shr = {
"size": 100,
"name": "Share Test Name",
"description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1",
"snapshot_id": 333,
"share_network_id": None,
}
create_mock = mock.Mock(return_value=stubs.stub_share('1',
display_name=shr['name'],
display_description=shr['description'],
size=shr['size'],
share_proto=shr['share_proto'].upper(),
snapshot_id=shr['snapshot_id'],
instance=dict(
availability_zone=shr['availability_zone'],
share_network_id=shr['share_network_id'])))
self.mock_object(share_api.API, 'create', create_mock)
body = {"share": copy.deepcopy(shr)}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(shr)
self.assertEqual(expected, res_dict)
def test_share_create_from_snapshot_without_share_net_parent_exists(self):
shr = {
"size": 100,
"name": "Share Test Name",
"description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1",
"snapshot_id": 333,
"share_network_id": None,
}
parent_share_net = 444
create_mock = mock.Mock(return_value=stubs.stub_share('1',
display_name=shr['name'],
display_description=shr['description'],
size=shr['size'],
share_proto=shr['share_proto'].upper(),
snapshot_id=shr['snapshot_id'],
instance=dict(
availability_zone=shr['availability_zone'],
share_network_id=shr['share_network_id'])))
self.mock_object(share_api.API, 'create', create_mock)
self.mock_object(share_api.API, 'get_snapshot',
stubs.stub_snapshot_get)
self.mock_object(share_api.API, 'get', mock.Mock(
return_value=mock.Mock(
instance={'share_network_id': parent_share_net})))
self.mock_object(share_api.API, 'get_share_network', mock.Mock(
return_value={'id': parent_share_net}))
body = {"share": copy.deepcopy(shr)}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(shr)
self.assertEqual(expected, res_dict)
self.assertEqual(parent_share_net,
create_mock.call_args[1]['share_network_id'])
def test_share_create_from_snapshot_with_share_net_equals_parent(self):
parent_share_net = 444
shr = {
"size": 100,
"name": "Share Test Name",
"description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1",
"snapshot_id": 333,
"share_network_id": parent_share_net
}
create_mock = mock.Mock(return_value=stubs.stub_share('1',
display_name=shr['name'],
display_description=shr['description'],
size=shr['size'],
share_proto=shr['share_proto'].upper(),
snapshot_id=shr['snapshot_id'],
instance=dict(
availability_zone=shr['availability_zone'],
share_network_id=shr['share_network_id'])))
self.mock_object(share_api.API, 'create', create_mock)
self.mock_object(share_api.API, 'get_snapshot',
stubs.stub_snapshot_get)
self.mock_object(share_api.API, 'get', mock.Mock(
return_value=mock.Mock(
instance={'share_network_id': parent_share_net})))
self.mock_object(share_api.API, 'get_share_network', mock.Mock(
return_value={'id': parent_share_net}))
body = {"share": copy.deepcopy(shr)}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(shr)
self.assertEqual(expected, res_dict)
self.assertEqual(parent_share_net,
create_mock.call_args[1]['share_network_id'])
def test_share_create_from_snapshot_invalid_share_net(self):
self.mock_object(share_api.API, 'create')
shr = {
"size": 100,
"name": "Share Test Name",
"description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1",
"snapshot_id": 333,
"share_network_id": 1234
}
body = {"share": shr}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_share_creation_fails_with_bad_size(self):
shr = {"size": '',
"name": "Share Test Name",
"description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1"}
body = {"share": shr}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
self.assertRaises(exception.InvalidInput,
self.controller.create, req, body)
def test_share_create_no_body(self):
req = fakes.HTTPRequest.blank('/shares', version='2.7')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, {})
def test_share_create_invalid_availability_zone(self):
self.mock_object(
db,
'availability_zone_get',
mock.Mock(side_effect=exception.AvailabilityZoneNotFound(id='id'))
)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create,
req,
body)
def test_share_show(self):
req = fakes.HTTPRequest.blank('/shares/1')
expected = self._get_expected_share_detailed_response()
expected['share'].pop('snapshot_support')
expected['share'].pop('share_type_name')
expected['share'].pop('task_state')
expected['share'].pop('consistency_group_id')
expected['share'].pop('source_cgsnapshot_member_id')
res_dict = self.controller.show(req, '1')
self.assertEqual(expected, res_dict)
def test_share_show_with_consistency_group(self):
req = fakes.HTTPRequest.blank('/shares/1', version='2.4')
expected = self._get_expected_share_detailed_response()
expected['share'].pop('share_type_name')
expected['share'].pop('task_state')
res_dict = self.controller.show(req, '1')
self.assertEqual(expected, res_dict)
def test_share_show_with_share_type_name(self):
req = fakes.HTTPRequest.blank('/shares/1', version='2.6')
res_dict = self.controller.show(req, '1')
expected = self._get_expected_share_detailed_response()
expected['share']['consistency_group_id'] = None
expected['share']['source_cgsnapshot_member_id'] = None
expected['share']['share_type_name'] = None
expected['share']['task_state'] = None
self.assertEqual(expected, res_dict)
@ddt.data("2.15", "2.16")
def test_share_show_with_user_id(self, microversion):
req = fakes.HTTPRequest.blank('/shares/1', version=microversion)
res_dict = self.controller.show(req, '1')
expected = self._get_expected_share_detailed_response()
if api_version.APIVersionRequest(microversion) >= (
api_version.APIVersionRequest("2.16")):
expected['share']['user_id'] = 'fakeuser'
else:
self.assertNotIn('user_id', expected['share'])
expected['share']['consistency_group_id'] = None
expected['share']['source_cgsnapshot_member_id'] = None
expected['share']['share_type_name'] = None
expected['share']['task_state'] = None
expected['share']['access_rules_status'] = 'active'
expected['share'].pop('export_location')
expected['share'].pop('export_locations')
expected['share']['replication_type'] = None
expected['share']['has_replicas'] = False
self.assertEqual(expected, res_dict)
def test_share_show_admin(self):
req = fakes.HTTPRequest.blank('/shares/1', use_admin_context=True)
expected = self._get_expected_share_detailed_response(admin=True)
expected['share'].pop('snapshot_support')
expected['share'].pop('share_type_name')
expected['share'].pop('task_state')
expected['share'].pop('consistency_group_id')
expected['share'].pop('source_cgsnapshot_member_id')
res_dict = self.controller.show(req, '1')
self.assertEqual(expected, res_dict)
def test_share_show_no_share(self):
self.mock_object(share_api.API, 'get',
stubs.stub_share_get_notfound)
req = fakes.HTTPRequest.blank('/shares/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
req, '1')
def test_share_show_with_replication_type(self):
req = fakes.HTTPRequest.blank(
'/shares/1', version=share_replicas.MIN_SUPPORTED_API_VERSION)
res_dict = self.controller.show(req, '1')
expected = self._get_expected_share_detailed_response()
expected['share']['task_state'] = None
expected['share']['consistency_group_id'] = None
expected['share']['source_cgsnapshot_member_id'] = None
expected['share']['access_rules_status'] = 'active'
expected['share']['share_type_name'] = None
expected['share']['replication_type'] = None
expected['share']['has_replicas'] = False
expected['share'].pop('export_location')
expected['share'].pop('export_locations')
self.assertEqual(expected, res_dict)
def test_share_delete(self):
req = fakes.HTTPRequest.blank('/shares/1')
resp = self.controller.delete(req, 1)
self.assertEqual(202, resp.status_int)
def test_share_delete_has_replicas(self):
req = fakes.HTTPRequest.blank('/shares/1')
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=self.share))
self.mock_object(share_api.API, 'delete',
mock.Mock(side_effect=exception.Conflict(err='err')))
self.assertRaises(
webob.exc.HTTPConflict, self.controller.delete, req, 1)
def test_share_delete_in_consistency_group_param_not_provided(self):
fake_share = stubs.stub_share('fake_share',
consistency_group_id='fake_cg_id')
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=fake_share))
req = fakes.HTTPRequest.blank('/shares/1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete, req, 1)
def test_share_delete_in_consistency_group(self):
fake_share = stubs.stub_share('fake_share',
consistency_group_id='fake_cg_id')
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=fake_share))
req = fakes.HTTPRequest.blank(
'/shares/1?consistency_group_id=fake_cg_id')
resp = self.controller.delete(req, 1)
self.assertEqual(202, resp.status_int)
def test_share_delete_in_consistency_group_wrong_id(self):
fake_share = stubs.stub_share('fake_share',
consistency_group_id='fake_cg_id')
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=fake_share))
req = fakes.HTTPRequest.blank(
'/shares/1?consistency_group_id=not_fake_cg_id')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete, req, 1)
def test_share_update(self):
shr = self.share
body = {"share": shr}
req = fakes.HTTPRequest.blank('/share/1')
res_dict = self.controller.update(req, 1, body)
self.assertEqual(shr["display_name"], res_dict['share']["name"])
self.assertEqual(shr["display_description"],
res_dict['share']["description"])
self.assertEqual(shr['is_public'],
res_dict['share']['is_public'])
def test_share_update_with_consistency_group(self):
shr = self.share
body = {"share": shr}
req = fakes.HTTPRequest.blank('/share/1', version="2.4")
res_dict = self.controller.update(req, 1, body)
self.assertIsNone(res_dict['share']["consistency_group_id"])
self.assertIsNone(res_dict['share']["source_cgsnapshot_member_id"])
def test_share_not_updates_size(self):
req = fakes.HTTPRequest.blank('/share/1')
res_dict = self.controller.update(req, 1, {"share": self.share})
self.assertNotEqual(res_dict['share']["size"], self.share["size"])
def test_share_delete_no_share(self):
self.mock_object(share_api.API, 'get',
stubs.stub_share_get_notfound)
req = fakes.HTTPRequest.blank('/shares/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete,
req,
1)
def _share_list_summary_with_search_opts(self, use_admin_context):
search_opts = {
'name': 'fake_name',
'status': constants.STATUS_AVAILABLE,
'share_server_id': 'fake_share_server_id',
'share_type_id': 'fake_share_type_id',
'snapshot_id': 'fake_snapshot_id',
'host': 'fake_host',
'share_network_id': 'fake_share_network_id',
'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1
'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2
'sort_key': 'fake_sort_key',
'sort_dir': 'fake_sort_dir',
'limit': '1',
'offset': '1',
'is_public': 'False',
}
# fake_key should be filtered for non-admin
url = '/shares?fake_key=fake_value'
for k, v in search_opts.items():
url = url + '&' + k + '=' + v
req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context)
shares = [
{'id': 'id1', 'display_name': 'n1'},
{'id': 'id2', 'display_name': 'n2'},
{'id': 'id3', 'display_name': 'n3'},
]
self.mock_object(share_api.API, 'get_all',
mock.Mock(return_value=shares))
result = self.controller.index(req)
search_opts_expected = {
'display_name': search_opts['name'],
'status': search_opts['status'],
'share_server_id': search_opts['share_server_id'],
'share_type_id': search_opts['share_type_id'],
'snapshot_id': search_opts['snapshot_id'],
'host': search_opts['host'],
'share_network_id': search_opts['share_network_id'],
'metadata': {'k1': 'v1'},
'extra_specs': {'k2': 'v2'},
'is_public': 'False',
}
if use_admin_context:
search_opts_expected.update({'fake_key': 'fake_value'})
share_api.API.get_all.assert_called_once_with(
req.environ['manila.context'],
sort_key=search_opts['sort_key'],
sort_dir=search_opts['sort_dir'],
search_opts=search_opts_expected,
)
self.assertEqual(1, len(result['shares']))
self.assertEqual(shares[1]['id'], result['shares'][0]['id'])
self.assertEqual(
shares[1]['display_name'], result['shares'][0]['name'])
def test_share_list_summary_with_search_opts_by_non_admin(self):
self._share_list_summary_with_search_opts(use_admin_context=False)
def test_share_list_summary_with_search_opts_by_admin(self):
self._share_list_summary_with_search_opts(use_admin_context=True)
def test_share_list_summary(self):
self.mock_object(share_api.API, 'get_all',
stubs.stub_share_get_all_by_project)
req = fakes.HTTPRequest.blank('/shares')
res_dict = self.controller.index(req)
expected = {
'shares': [
{
'name': 'displayname',
'id': '1',
'links': [
{
'href': 'http://localhost/v1/fake/shares/1',
'rel': 'self'
},
{
'href': 'http://localhost/fake/shares/1',
'rel': 'bookmark'
}
],
}
]
}
self.assertEqual(expected, res_dict)
def _share_list_detail_with_search_opts(self, use_admin_context):
search_opts = {
'name': 'fake_name',
'status': constants.STATUS_AVAILABLE,
'share_server_id': 'fake_share_server_id',
'share_type_id': 'fake_share_type_id',
'snapshot_id': 'fake_snapshot_id',
'host': 'fake_host',
'share_network_id': 'fake_share_network_id',
'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1
'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2
'sort_key': 'fake_sort_key',
'sort_dir': 'fake_sort_dir',
'limit': '1',
'offset': '1',
'is_public': 'False',
}
# fake_key should be filtered for non-admin
url = '/shares/detail?fake_key=fake_value'
for k, v in search_opts.items():
url = url + '&' + k + '=' + v
req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context)
shares = [
{'id': 'id1', 'display_name': 'n1'},
{
'id': 'id2',
'display_name': 'n2',
'status': constants.STATUS_AVAILABLE,
'snapshot_id': 'fake_snapshot_id',
'share_type_id': 'fake_share_type_id',
'instance': {
'host': 'fake_host',
'share_network_id': 'fake_share_network_id',
},
},
{'id': 'id3', 'display_name': 'n3'},
]
self.mock_object(share_api.API, 'get_all',
mock.Mock(return_value=shares))
result = self.controller.detail(req)
search_opts_expected = {
'display_name': search_opts['name'],
'status': search_opts['status'],
'share_server_id': search_opts['share_server_id'],
'share_type_id': search_opts['share_type_id'],
'snapshot_id': search_opts['snapshot_id'],
'host': search_opts['host'],
'share_network_id': search_opts['share_network_id'],
'metadata': {'k1': 'v1'},
'extra_specs': {'k2': 'v2'},
'is_public': 'False',
}
if use_admin_context:
search_opts_expected.update({'fake_key': 'fake_value'})
share_api.API.get_all.assert_called_once_with(
req.environ['manila.context'],
sort_key=search_opts['sort_key'],
sort_dir=search_opts['sort_dir'],
search_opts=search_opts_expected,
)
self.assertEqual(1, len(result['shares']))
self.assertEqual(shares[1]['id'], result['shares'][0]['id'])
self.assertEqual(
shares[1]['display_name'], result['shares'][0]['name'])
self.assertEqual(
shares[1]['snapshot_id'], result['shares'][0]['snapshot_id'])
self.assertEqual(
shares[1]['status'], result['shares'][0]['status'])
self.assertEqual(
shares[1]['share_type_id'], result['shares'][0]['share_type'])
self.assertEqual(
shares[1]['snapshot_id'], result['shares'][0]['snapshot_id'])
self.assertEqual(
shares[1]['instance']['host'], result['shares'][0]['host'])
self.assertEqual(
shares[1]['instance']['share_network_id'],
result['shares'][0]['share_network_id'])
def test_share_list_detail_with_search_opts_by_non_admin(self):
self._share_list_detail_with_search_opts(use_admin_context=False)
def test_share_list_detail_with_search_opts_by_admin(self):
self._share_list_detail_with_search_opts(use_admin_context=True)
def _list_detail_common_expected(self):
return {
'shares': [
{
'status': 'fakestatus',
'description': 'displaydesc',
'export_location': 'fake_location',
'export_locations': ['fake_location', 'fake_location2'],
'availability_zone': 'fakeaz',
'name': 'displayname',
'share_proto': 'FAKEPROTO',
'metadata': {},
'project_id': 'fakeproject',
'host': 'fakehost',
'id': '1',
'snapshot_id': '2',
'snapshot_support': True,
'share_network_id': None,
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
'share_type': '1',
'volume_type': '1',
'is_public': False,
'links': [
{
'href': 'http://localhost/v1/fake/shares/1',
'rel': 'self'
},
{
'href': 'http://localhost/fake/shares/1',
'rel': 'bookmark'
}
],
}
]
}
def _list_detail_test_common(self, req, expected):
self.mock_object(share_api.API, 'get_all',
stubs.stub_share_get_all_by_project)
res_dict = self.controller.detail(req)
self.assertEqual(expected, res_dict)
self.assertEqual(res_dict['shares'][0]['volume_type'],
res_dict['shares'][0]['share_type'])
def test_share_list_detail(self):
env = {'QUERY_STRING': 'name=Share+Test+Name'}
req = fakes.HTTPRequest.blank('/shares/detail', environ=env)
expected = self._list_detail_common_expected()
expected['shares'][0].pop('snapshot_support')
self._list_detail_test_common(req, expected)
def test_share_list_detail_with_consistency_group(self):
env = {'QUERY_STRING': 'name=Share+Test+Name'}
req = fakes.HTTPRequest.blank('/shares/detail', environ=env,
version="2.4")
expected = self._list_detail_common_expected()
expected['shares'][0]['consistency_group_id'] = None
expected['shares'][0]['source_cgsnapshot_member_id'] = None
self._list_detail_test_common(req, expected)
def test_share_list_detail_with_task_state(self):
env = {'QUERY_STRING': 'name=Share+Test+Name'}
req = fakes.HTTPRequest.blank('/shares/detail', environ=env,
version="2.5")
expected = self._list_detail_common_expected()
expected['shares'][0]['consistency_group_id'] = None
expected['shares'][0]['source_cgsnapshot_member_id'] = None
expected['shares'][0]['task_state'] = None
self._list_detail_test_common(req, expected)
def test_share_list_detail_without_export_locations(self):
env = {'QUERY_STRING': 'name=Share+Test+Name'}
req = fakes.HTTPRequest.blank('/shares/detail', environ=env,
version="2.9")
expected = self._list_detail_common_expected()
expected['shares'][0]['consistency_group_id'] = None
expected['shares'][0]['source_cgsnapshot_member_id'] = None
expected['shares'][0]['task_state'] = None
expected['shares'][0]['share_type_name'] = None
expected['shares'][0].pop('export_location')
expected['shares'][0].pop('export_locations')
self._list_detail_test_common(req, expected)
def test_share_list_detail_with_replication_type(self):
self.mock_object(share_api.API, 'get_all',
stubs.stub_share_get_all_by_project)
env = {'QUERY_STRING': 'name=Share+Test+Name'}
req = fakes.HTTPRequest.blank(
'/shares/detail', environ=env,
version=share_replicas.MIN_SUPPORTED_API_VERSION)
res_dict = self.controller.detail(req)
expected = {
'shares': [
{
'status': 'fakestatus',
'description': 'displaydesc',
'availability_zone': 'fakeaz',
'name': 'displayname',
'share_proto': 'FAKEPROTO',
'metadata': {},
'project_id': 'fakeproject',
'access_rules_status': 'active',
'host': 'fakehost',
'id': '1',
'snapshot_id': '2',
'share_network_id': None,
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
'share_type_name': None,
'share_type': '1',
'volume_type': '1',
'is_public': False,
'consistency_group_id': None,
'source_cgsnapshot_member_id': None,
'snapshot_support': True,
'has_replicas': False,
'replication_type': None,
'task_state': None,
'links': [
{
'href': 'http://localhost/v1/fake/shares/1',
'rel': 'self'
},
{
'href': 'http://localhost/fake/shares/1',
'rel': 'bookmark'
}
],
}
]
}
self.assertEqual(expected, res_dict)
self.assertEqual(res_dict['shares'][0]['volume_type'],
res_dict['shares'][0]['share_type'])
def test_remove_invalid_options(self):
ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=False)
search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}
expected_opts = {'a': 'a', 'c': 'c'}
allowed_opts = ['a', 'c']
common.remove_invalid_options(ctx, search_opts, allowed_opts)
self.assertEqual(expected_opts, search_opts)
def test_remove_invalid_options_admin(self):
ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=True)
search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}
expected_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}
allowed_opts = ['a', 'c']
common.remove_invalid_options(ctx, search_opts, allowed_opts)
self.assertEqual(expected_opts, search_opts)
def _fake_access_get(self, ctxt, access_id):
class Access(object):
def __init__(self, **kwargs):
self.STATE_NEW = 'fake_new'
self.STATE_ACTIVE = 'fake_active'
self.STATE_ERROR = 'fake_error'
self.params = kwargs
self.params['state'] = self.STATE_NEW
self.share_id = kwargs.get('share_id')
self.id = access_id
def __getitem__(self, item):
return self.params[item]
access = Access(access_id=access_id, share_id='fake_share_id')
return access
@ddt.ddt
class ShareActionsTest(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.controller = shares.ShareController()
self.mock_object(share_api.API, 'get', stubs.stub_share_get)
@ddt.data(
{'access_type': 'ip', 'access_to': '127.0.0.1'},
{'access_type': 'user', 'access_to': '1' * 4},
{'access_type': 'user', 'access_to': '1' * 32},
{'access_type': 'user', 'access_to': 'fake\\]{.-_\'`;}['},
{'access_type': 'user', 'access_to': 'MYDOMAIN\\Administrator'},
{'access_type': 'cert', 'access_to': 'x'},
{'access_type': 'cert', 'access_to': 'tenant.example.com'},
{'access_type': 'cert', 'access_to': 'x' * 64},
)
def test_allow_access(self, access):
self.mock_object(share_api.API,
'allow_access',
mock.Mock(return_value={'fake': 'fake'}))
self.mock_object(self.controller._access_view_builder, 'view',
mock.Mock(return_value={'access':
{'fake': 'fake'}}))
id = 'fake_share_id'
body = {'allow_access': access}
expected = {'access': {'fake': 'fake'}}
req = fakes.HTTPRequest.blank(
'/v2/tenant1/shares/%s/action' % id, version="2.7")
res = self.controller.allow_access(req, id, body)
self.assertEqual(expected, res)
@ddt.data(
{'access_type': 'error_type', 'access_to': '127.0.0.1'},
{'access_type': 'ip', 'access_to': 'localhost'},
{'access_type': 'ip', 'access_to': '127.0.0.*'},
{'access_type': 'ip', 'access_to': '127.0.0.0/33'},
{'access_type': 'ip', 'access_to': '127.0.0.256'},
{'access_type': 'user', 'access_to': '1'},
{'access_type': 'user', 'access_to': '1' * 3},
{'access_type': 'user', 'access_to': '1' * 33},
{'access_type': 'user', 'access_to': 'root^'},
{'access_type': 'cert', 'access_to': ''},
{'access_type': 'cert', 'access_to': ' '},
{'access_type': 'cert', 'access_to': 'x' * 65},
)
def test_allow_access_error(self, access):
id = 'fake_share_id'
body = {'allow_access': access}
req = fakes.HTTPRequest.blank('/v2/tenant1/shares/%s/action' % id,
version="2.7")
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.allow_access, req, id, body)
@ddt.unpack
@ddt.data(
{'exc': None, 'access_to': 'alice', 'version': '2.13'},
{'exc': webob.exc.HTTPBadRequest, 'access_to': 'alice',
'version': '2.11'}
)
def test_allow_access_ceph(self, exc, access_to, version):
share_id = "fake_id"
self.mock_object(share_api.API,
'allow_access',
mock.Mock(return_value={'fake': 'fake'}))
self.mock_object(self.controller._access_view_builder, 'view',
mock.Mock(return_value={'access':
{'fake': 'fake'}}))
req = fakes.HTTPRequest.blank(
'/v2/shares/%s/action' % share_id, version=version)
body = {'allow_access':
{
'access_type': 'cephx',
'access_to': access_to,
'access_level': 'rw'
}}
if exc:
self.assertRaises(exc, self.controller.allow_access, req, share_id,
body)
else:
expected = {'access': {'fake': 'fake'}}
res = self.controller.allow_access(req, id, body)
self.assertEqual(expected, res)
def test_deny_access(self):
def _stub_deny_access(*args, **kwargs):
pass
self.mock_object(share_api.API, "deny_access", _stub_deny_access)
self.mock_object(share_api.API, "access_get", _fake_access_get)
id = 'fake_share_id'
body = {"os-deny_access": {"access_id": 'fake_acces_id'}}
req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id)
res = self.controller._deny_access(req, id, body)
self.assertEqual(202, res.status_int)
def test_deny_access_not_found(self):
def _stub_deny_access(*args, **kwargs):
pass
self.mock_object(share_api.API, "deny_access", _stub_deny_access)
self.mock_object(share_api.API, "access_get", _fake_access_get)
id = 'super_fake_share_id'
body = {"os-deny_access": {"access_id": 'fake_acces_id'}}
req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._deny_access,
req,
id,
body)
def test_access_list(self):
fake_access_list = [
{
"state": "fakestatus",
"id": "fake_access_id",
"access_type": "fakeip",
"access_to": "127.0.0.1",
}
]
self.mock_object(self.controller._access_view_builder, 'list_view',
mock.Mock(return_value={'access_list':
fake_access_list}))
id = 'fake_share_id'
body = {"os-access_list": None}
req = fakes.HTTPRequest.blank('/v2/tenant1/shares/%s/action' % id)
res_dict = self.controller._access_list(req, id, body)
self.assertEqual({'access_list': fake_access_list}, res_dict)
@ddt.unpack
@ddt.data(
{'body': {'os-extend': {'new_size': 2}}, 'version': '2.6'},
{'body': {'extend': {'new_size': 2}}, 'version': '2.7'},
)
def test_extend(self, body, version):
id = 'fake_share_id'
share = stubs.stub_share_get(None, None, id)
self.mock_object(share_api.API, 'get', mock.Mock(return_value=share))
self.mock_object(share_api.API, "extend")
size = '2'
req = fakes.HTTPRequest.blank(
'/v2/shares/%s/action' % id, version=version)
actual_response = self.controller._extend(req, id, body)
share_api.API.get.assert_called_once_with(mock.ANY, id)
share_api.API.extend.assert_called_once_with(
mock.ANY, share, int(size))
self.assertEqual(202, actual_response.status_int)
@ddt.data({"os-extend": ""},
{"os-extend": {"new_size": "foo"}},
{"os-extend": {"new_size": {'foo': 'bar'}}})
def test_extend_invalid_body(self, body):
id = 'fake_share_id'
req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._extend, req, id, body)
@ddt.data({'source': exception.InvalidInput,
'target': webob.exc.HTTPBadRequest},
{'source': exception.InvalidShare,
'target': webob.exc.HTTPBadRequest},
{'source': exception.ShareSizeExceedsAvailableQuota,
'target': webob.exc.HTTPForbidden})
@ddt.unpack
def test_extend_exception(self, source, target):
id = 'fake_share_id'
req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id)
body = {"os-extend": {'new_size': '123'}}
self.mock_object(share_api.API, "extend",
mock.Mock(side_effect=source('fake')))
self.assertRaises(target, self.controller._extend, req, id, body)
@ddt.unpack
@ddt.data(
{'body': {'os-shrink': {'new_size': 1}}, 'version': '2.6'},
{'body': {'shrink': {'new_size': 1}}, 'version': '2.7'},
)
def test_shrink(self, body, version):
id = 'fake_share_id'
share = stubs.stub_share_get(None, None, id)
self.mock_object(share_api.API, 'get', mock.Mock(return_value=share))
self.mock_object(share_api.API, "shrink")
size = '1'
req = fakes.HTTPRequest.blank(
'/v2/shares/%s/action' % id, version=version)
actual_response = self.controller._shrink(req, id, body)
share_api.API.get.assert_called_once_with(mock.ANY, id)
share_api.API.shrink.assert_called_once_with(
mock.ANY, share, int(size))
self.assertEqual(202, actual_response.status_int)
@ddt.data({"os-shrink": ""},
{"os-shrink": {"new_size": "foo"}},
{"os-shrink": {"new_size": {'foo': 'bar'}}})
def test_shrink_invalid_body(self, body):
id = 'fake_share_id'
req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._shrink, req, id, body)
@ddt.data({'source': exception.InvalidInput,
'target': webob.exc.HTTPBadRequest},
{'source': exception.InvalidShare,
'target': webob.exc.HTTPBadRequest})
@ddt.unpack
def test_shrink_exception(self, source, target):
id = 'fake_share_id'
req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id)
body = {"os-shrink": {'new_size': '123'}}
self.mock_object(share_api.API, "shrink",
mock.Mock(side_effect=source('fake')))
self.assertRaises(target, self.controller._shrink, req, id, body)
@ddt.ddt
class ShareAdminActionsAPITest(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
CONF.set_default("default_share_type", None)
self.flags(rpc_backend='manila.openstack.common.rpc.impl_fake')
self.share_api = share_api.API()
self.admin_context = context.RequestContext('admin', 'fake', True)
self.member_context = context.RequestContext('fake', 'fake')
def _get_context(self, role):
return getattr(self, '%s_context' % role)
def _setup_share_data(self, share=None, version='2.7'):
if share is None:
share = db_utils.create_share(status=constants.STATUS_AVAILABLE,
size='1',
override_defaults=True)
req = fakes.HTTPRequest.blank(
'/v2/fake/shares/%s/action' % share['id'], version=version)
return share, req
def _reset_status(self, ctxt, model, req, db_access_method,
valid_code, valid_status=None, body=None, version='2.7'):
if float(version) > 2.6:
action_name = 'reset_status'
else:
action_name = 'os-reset_status'
if body is None:
body = {action_name: {'status': constants.STATUS_ERROR}}
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.headers['X-Openstack-Manila-Api-Version'] = version
req.body = six.b(jsonutils.dumps(body))
req.environ['manila.context'] = ctxt
resp = req.get_response(fakes.app())
# validate response code and model status
self.assertEqual(valid_code, resp.status_int)
if valid_code == 404:
self.assertRaises(exception.NotFound,
db_access_method,
ctxt,
model['id'])
else:
actual_model = db_access_method(ctxt, model['id'])
self.assertEqual(valid_status, actual_model['status'])
@ddt.data(*fakes.fixture_reset_status_with_different_roles)
@ddt.unpack
def test_share_reset_status_with_different_roles(self, role, valid_code,
valid_status, version):
share, req = self._setup_share_data(version=version)
ctxt = self._get_context(role)
self._reset_status(ctxt, share, req, db.share_get, valid_code,
valid_status, version=version)
@ddt.data(*fakes.fixture_invalid_reset_status_body)
def test_share_invalid_reset_status_body(self, body):
share, req = self._setup_share_data(version='2.6')
ctxt = self.admin_context
self._reset_status(ctxt, share, req, db.share_get, 400,
constants.STATUS_AVAILABLE, body, version='2.6')
@ddt.data('2.6', '2.7')
def test_share_reset_status_for_missing(self, version):
fake_share = {'id': 'missing-share-id'}
req = fakes.HTTPRequest.blank(
'/v2/fake/shares/%s/action' % fake_share['id'], version=version)
self._reset_status(self.admin_context, fake_share, req,
db.share_snapshot_get, 404, version=version)
def _force_delete(self, ctxt, model, req, db_access_method, valid_code,
check_model_in_db=False, version='2.7'):
if float(version) > 2.6:
action_name = 'force_delete'
else:
action_name = 'os-force_delete'
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.headers['X-Openstack-Manila-Api-Version'] = version
req.body = six.b(jsonutils.dumps({action_name: {}}))
req.environ['manila.context'] = ctxt
resp = req.get_response(fakes.app())
# validate response
self.assertEqual(valid_code, resp.status_int)
if valid_code == 202 and check_model_in_db:
self.assertRaises(exception.NotFound,
db_access_method,
ctxt,
model['id'])
@ddt.data(*fakes.fixture_force_delete_with_different_roles)
@ddt.unpack
def test_share_force_delete_with_different_roles(self, role, resp_code,
version):
share, req = self._setup_share_data(version=version)
ctxt = self._get_context(role)
self._force_delete(ctxt, share, req, db.share_get, resp_code,
check_model_in_db=True, version=version)
@ddt.data('2.6', '2.7')
def test_share_force_delete_missing(self, version):
share, req = self._setup_share_data(
share={'id': 'fake'}, version=version)
ctxt = self._get_context('admin')
self._force_delete(
ctxt, share, req, db.share_get, 404, version=version)
@ddt.ddt
class ShareUnmanageTest(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.controller = shares.ShareController()
self.mock_object(share_api.API, 'get_all',
stubs.stub_get_all_shares)
self.mock_object(share_api.API, 'get',
stubs.stub_share_get)
self.mock_object(share_api.API, 'update', stubs.stub_share_update)
self.mock_object(share_api.API, 'delete', stubs.stub_share_delete)
self.mock_object(share_api.API, 'get_snapshot',
stubs.stub_snapshot_get)
self.share_id = 'fake'
self.request = fakes.HTTPRequest.blank(
'/share/%s/unmanage' % self.share_id,
use_admin_context=True, version='2.7',
)
def test_unmanage_share(self):
share = dict(status=constants.STATUS_AVAILABLE, id='foo_id',
instance={})
self.mock_object(share_api.API, 'get', mock.Mock(return_value=share))
self.mock_object(share_api.API, 'unmanage', mock.Mock())
self.mock_object(
self.controller.share_api.db, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=[]))
actual_result = self.controller.unmanage(self.request, share['id'])
self.assertEqual(202, actual_result.status_int)
self.controller.share_api.db.share_snapshot_get_all_for_share.\
assert_called_once_with(
self.request.environ['manila.context'], share['id'])
self.controller.share_api.get.assert_called_once_with(
self.request.environ['manila.context'], share['id'])
share_api.API.unmanage.assert_called_once_with(
self.request.environ['manila.context'], share)
def test_unmanage_share_that_has_snapshots(self):
share = dict(status=constants.STATUS_AVAILABLE, id='foo_id',
instance={})
snapshots = ['foo', 'bar']
self.mock_object(self.controller.share_api, 'unmanage')
self.mock_object(
self.controller.share_api.db, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=snapshots))
self.mock_object(
self.controller.share_api, 'get',
mock.Mock(return_value=share))
self.assertRaises(
webob.exc.HTTPForbidden,
self.controller.unmanage, self.request, share['id'])
self.assertFalse(self.controller.share_api.unmanage.called)
self.controller.share_api.db.share_snapshot_get_all_for_share.\
assert_called_once_with(
self.request.environ['manila.context'], share['id'])
self.controller.share_api.get.assert_called_once_with(
self.request.environ['manila.context'], share['id'])
def test_unmanage_share_based_on_share_server(self):
share = dict(instance=dict(share_server_id='foo_id'), id='bar_id')
self.mock_object(
self.controller.share_api, 'get',
mock.Mock(return_value=share))
self.assertRaises(
webob.exc.HTTPForbidden,
self.controller.unmanage, self.request, share['id'])
self.controller.share_api.get.assert_called_once_with(
self.request.environ['manila.context'], share['id'])
@ddt.data(*constants.TRANSITIONAL_STATUSES)
def test_unmanage_share_with_transitional_state(self, share_status):
share = dict(status=share_status, id='foo_id', instance={})
self.mock_object(
self.controller.share_api, 'get',
mock.Mock(return_value=share))
self.assertRaises(
webob.exc.HTTPForbidden,
self.controller.unmanage, self.request, share['id'])
self.controller.share_api.get.assert_called_once_with(
self.request.environ['manila.context'], share['id'])
def test_unmanage_share_not_found(self):
self.mock_object(share_api.API, 'get', mock.Mock(
side_effect=exception.NotFound))
self.mock_object(share_api.API, 'unmanage', mock.Mock())
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.unmanage,
self.request, self.share_id)
@ddt.data(exception.InvalidShare(reason="fake"),
exception.PolicyNotAuthorized(action="fake"),)
def test_unmanage_share_invalid(self, side_effect):
share = dict(status=constants.STATUS_AVAILABLE, id='foo_id',
instance={})
self.mock_object(share_api.API, 'get', mock.Mock(return_value=share))
self.mock_object(share_api.API, 'unmanage', mock.Mock(
side_effect=side_effect))
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.unmanage,
self.request, self.share_id)
def test_wrong_permissions(self):
share_id = 'fake'
req = fakes.HTTPRequest.blank('/share/%s/unmanage' % share_id,
use_admin_context=False, version='2.7')
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.unmanage,
req,
share_id)
def test_unsupported_version(self):
share_id = 'fake'
req = fakes.HTTPRequest.blank('/share/%s/unmanage' % share_id,
use_admin_context=False, version='2.6')
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.unmanage,
req,
share_id)
def get_fake_manage_body(export_path='/fake', service_host='fake@host#POOL',
protocol='fake', share_type='fake', **kwargs):
fake_share = {
'export_path': export_path,
'service_host': service_host,
'protocol': protocol,
'share_type': share_type,
}
fake_share.update(kwargs)
return {'share': fake_share}
@ddt.ddt
class ShareManageTest(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.controller = shares.ShareController()
self.resource_name = self.controller.resource_name
self.request = fakes.HTTPRequest.blank(
'/v2/shares/manage', use_admin_context=True, version='2.7')
self.mock_policy_check = self.mock_object(
policy, 'check_policy', mock.Mock(return_value=True))
def _setup_manage_mocks(self, service_is_up=True):
self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock(
return_value={'host': 'fake'}))
self.mock_object(share_types, 'get_share_type_by_name_or_id',
mock.Mock(return_value={'id': 'fake'}))
self.mock_object(utils, 'service_is_up', mock.Mock(
return_value=service_is_up))
if service_is_up:
self.mock_object(utils, 'validate_service_host')
else:
self.mock_object(
utils,
'validate_service_host',
mock.Mock(side_effect=exception.ServiceIsDown(service='fake')))
@ddt.data({},
{'shares': {}},
{'share': get_fake_manage_body('', None, None)})
def test_share_manage_invalid_body(self, body):
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.manage,
self.request,
body)
def test_share_manage_service_not_found(self):
body = get_fake_manage_body()
self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock(
side_effect=exception.ServiceNotFound(service_id='fake')))
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.manage,
self.request,
body)
def test_share_manage_share_type_not_found(self):
body = get_fake_manage_body()
self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock())
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(db, 'share_type_get_by_name', mock.Mock(
side_effect=exception.ShareTypeNotFoundByName(
share_type_name='fake')))
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.manage,
self.request,
body)
@ddt.data({'service_is_up': False, 'service_host': 'fake@host#POOL'},
{'service_is_up': True, 'service_host': 'fake@host'})
def test_share_manage_bad_request(self, settings):
body = get_fake_manage_body(service_host=settings.pop('service_host'))
self._setup_manage_mocks(**settings)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.manage,
self.request,
body)
def test_share_manage_duplicate_share(self):
body = get_fake_manage_body()
exc = exception.InvalidShare(reason="fake")
self._setup_manage_mocks()
self.mock_object(share_api.API, 'manage', mock.Mock(side_effect=exc))
self.assertRaises(webob.exc.HTTPConflict,
self.controller.manage,
self.request,
body)
def test_share_manage_forbidden_manage(self):
body = get_fake_manage_body()
self._setup_manage_mocks()
error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action=''))
self.mock_object(share_api.API, 'manage', error)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.manage,
self.request,
body)
def test_share_manage_forbidden_validate_service_host(self):
body = get_fake_manage_body()
self._setup_manage_mocks()
error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action=''))
self.mock_object(
utils, 'validate_service_host', mock.Mock(side_effect=error))
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.manage,
self.request,
body)
@ddt.data(
get_fake_manage_body(name='foo', description='bar'),
get_fake_manage_body(display_name='foo', description='bar'),
get_fake_manage_body(name='foo', display_description='bar'),
get_fake_manage_body(display_name='foo', display_description='bar'),
get_fake_manage_body(display_name='foo', display_description='bar',
driver_options=dict(volume_id='quuz')),
)
def test_share_manage(self, data):
self._test_share_manage(data, "2.7")
@ddt.data(
get_fake_manage_body(name='foo', description='bar', is_public=True),
get_fake_manage_body(name='foo', description='bar', is_public=False)
)
def test_share_manage_with_is_public(self, data):
self._test_share_manage(data, "2.8")
def test_share_manage_with_user_id(self):
self._test_share_manage(get_fake_manage_body(
name='foo', description='bar', is_public=True), "2.16")
def _test_share_manage(self, data, version):
expected = {
'share': {
'status': 'fakestatus',
'description': 'displaydesc',
'availability_zone': 'fakeaz',
'name': 'displayname',
'share_proto': 'FAKEPROTO',
'metadata': {},
'project_id': 'fakeproject',
'host': 'fakehost',
'id': 'fake',
'snapshot_id': '2',
'share_network_id': None,
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
'share_type_name': None,
'share_server_id': 'fake_share_server_id',
'share_type': '1',
'volume_type': '1',
'is_public': False,
'consistency_group_id': None,
'source_cgsnapshot_member_id': None,
'snapshot_support': True,
'task_state': None,
'links': [
{
'href': 'http://localhost/v1/fake/shares/fake',
'rel': 'self'
},
{
'href': 'http://localhost/fake/shares/fake',
'rel': 'bookmark'
}
],
}
}
self._setup_manage_mocks()
return_share = mock.Mock(
return_value=stubs.stub_share('fake', instance={}))
self.mock_object(
share_api.API, 'manage', return_share)
share = {
'host': data['share']['service_host'],
'export_location': data['share']['export_path'],
'share_proto': data['share']['protocol'].upper(),
'share_type_id': 'fake',
'display_name': 'foo',
'display_description': 'bar',
}
driver_options = data['share'].get('driver_options', {})
if (api_version.APIVersionRequest(version) <=
api_version.APIVersionRequest('2.8')):
expected['share']['export_location'] = 'fake_location'
expected['share']['export_locations'] = (
['fake_location', 'fake_location2'])
if (api_version.APIVersionRequest(version) >=
api_version.APIVersionRequest('2.10')):
expected['share']['access_rules_status'] = (
constants.STATUS_ACTIVE)
if (api_version.APIVersionRequest(version) >=
api_version.APIVersionRequest('2.11')):
expected['share']['has_replicas'] = False
expected['share']['replication_type'] = None
if (api_version.APIVersionRequest(version) >=
api_version.APIVersionRequest('2.16')):
expected['share']['user_id'] = 'fakeuser'
if (api_version.APIVersionRequest(version) >=
api_version.APIVersionRequest('2.8')):
share['is_public'] = data['share']['is_public']
req = fakes.HTTPRequest.blank('/v2/shares/manage', version=version,
use_admin_context=True)
actual_result = self.controller.manage(req, data)
share_api.API.manage.assert_called_once_with(
mock.ANY, share, driver_options)
self.assertIsNotNone(actual_result)
self.assertEqual(expected, actual_result)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'manage')
def test_wrong_permissions(self):
body = get_fake_manage_body()
self.assertRaises(
webob.exc.HTTPForbidden,
self.controller.manage,
fakes.HTTPRequest.blank(
'/share/manage', use_admin_context=False, version='2.7'),
body,
)
def test_unsupported_version(self):
share_id = 'fake'
req = fakes.HTTPRequest.blank(
'/share/manage', use_admin_context=False, version='2.6')
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.manage,
req,
share_id)
|
NetApp/manila
|
manila/tests/api/v2/test_shares.py
|
Python
|
apache-2.0
| 85,903
|
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves.urllib import parse as urlparse
import requests
from oslo_log import log as logging
from tricircle.common import client
from tricircle.common import constants as cons
from tricircle.db import api as db_api
LOG = logging.getLogger(__name__)
# the url could be endpoint registered in the keystone
# or url sent to tricircle service, which is stored in
# pecan.request.url
def get_version_from_url(url):
components = urlparse.urlsplit(url)
path = components.path
pos = path.find('/')
ver = ''
if pos == 0:
path = path[1:]
i = path.find('/')
if i >= 0:
ver = path[:i]
else:
ver = path
elif pos > 0:
ver = path[:pos]
else:
ver = path
return ver
def get_bottom_url(t_ver, t_url, b_ver, b_endpoint):
"""get_bottom_url
convert url received by Tricircle service to bottom OpenStack
request url through the configured endpoint in the KeyStone
:param t_ver: version of top service
:param t_url: request url to the top service
:param b_ver: version of bottom service
:param b_endpoint: endpoint registered in keystone for bottom service
:return: request url to bottom service
"""
t_parse = urlparse.urlsplit(t_url)
after_ver = t_parse.path
remove_ver = '/' + t_ver + '/'
pos = after_ver.find(remove_ver)
if pos == 0:
after_ver = after_ver[len(remove_ver):]
else:
remove_ver = t_ver + '/'
pos = after_ver.find(remove_ver)
if pos == 0:
after_ver = after_ver[len(remove_ver):]
if after_ver == t_parse.path:
# wrong t_url
return ''
b_parse = urlparse.urlsplit(b_endpoint)
scheme = b_parse.scheme
netloc = b_parse.netloc
path = '/' + b_ver + '/' + after_ver
if b_ver == '':
path = '/' + after_ver
# Remove availability_zone filter since it is handled by VolumeController.
# VolumeController will send GET request only to bottom pods whose AZ
# is specified in availability_zone filter.
query_filters = []
for k, v in urlparse.parse_qsl(t_parse.query):
if k == 'availability_zone':
continue
query_filters.append((k, v))
query = urlparse.urlencode(query_filters)
fragment = t_parse.fragment
b_url = urlparse.urlunsplit((scheme,
netloc,
path,
query,
fragment))
return b_url
def get_pod_service_endpoint(context, region_name, st):
pod = db_api.get_pod_by_name(context, region_name)
if pod:
c = client.Client()
return c.get_endpoint(context, pod['pod_id'], st)
return ''
def get_pod_service_ctx(context, t_url, region_name, s_type=cons.ST_NEUTRON):
t_ver = get_version_from_url(t_url)
b_endpoint = get_pod_service_endpoint(context,
region_name,
s_type)
b_ver = get_version_from_url(b_endpoint)
b_url = ''
if b_endpoint != '':
b_url = get_bottom_url(t_ver, t_url, b_ver, b_endpoint)
return {'t_ver': t_ver, 'b_ver': b_ver,
't_url': t_url, 'b_url': b_url}
def forward_req(context, action, b_headers, b_url, b_body):
s = requests.Session()
req = requests.Request(action, b_url,
data=b_body,
headers=b_headers)
prepped = req.prepare()
# do something with prepped.body
# do something with prepped.headers
resp = s.send(prepped,
timeout=60)
return resp
def get_res_routing_ref(context, _id, t_url, s_type):
"""Get the service context according to resource routing.
:param _id: the top id of resource
:param t_url: request url
:param s_type: service type
:returns: service context
"""
pod = db_api.get_pod_by_top_id(context, _id)
if not pod:
return None
region_name = pod['region_name']
s_ctx = get_pod_service_ctx(context, t_url, region_name,
s_type=s_type)
if s_ctx['b_url'] == '':
LOG.error("bottom pod endpoint incorrect %s" %
region_name)
return s_ctx
def convert_header(from_release, to_release, header):
b_header = {}
# remove invalid header item, requests lib will strictly check
# header for security purpose, non-string or non-bytes value
# will lead to exception, and leading space will also be removed
# by requests.util.check_header_validity function
for k, v in header.items():
if v:
b_header[k] = v
return b_header
def convert_object(from_release, to_release, res_object,
res_type=cons.RT_NETWORK):
return res_object
|
stackforge/tricircle
|
tricircle/common/httpclient.py
|
Python
|
apache-2.0
| 5,505
|
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GradingRecord represents a cluster (mentor/student) of SurveyRecords
for an evaluation period.
"""
from google.appengine.ext import db
from django.utils.translation import ugettext
from soc.modules.gsoc.models.grading_project_survey_record \
import GSoCGradingProjectSurveyRecord
from soc.modules.gsoc.models.grading_project_survey_record \
import GradingProjectSurveyRecord
from soc.modules.gsoc.models.grading_survey_group import GSoCGradingSurveyGroup
from soc.modules.gsoc.models.grading_survey_group import GradingSurveyGroup
from soc.modules.gsoc.models.project_survey_record import GSoCProjectSurveyRecord
from soc.modules.gsoc.models.project_survey_record import ProjectSurveyRecord
from soc.modules.gsoc.models.student_project import StudentProject
class GradingRecord(db.Model):
"""Explicitly group SurveyRecords with a common project.
Because Mentors and Students take different surveys,
we cannot simply link survey records by a common project and survey.
Instead, we establish a GradingRecord.
A GradingRecord links a group of survey records with a common
project, and links back to its records.
This entity can be edited by Program Administrators to edit the outcome
of a the Grading surveys without touching the real survey's answers.
Also if a ProjectSurvey has been coupled to the GradingSurveyGroup this must
be on record as well for the GradingRecord to state a pass, even if the
Mentor has filled in a passing grade.
"""
#: The GradingSurveyGroup to which this record belongs
grading_survey_group = db.ReferenceProperty(
reference_class=GradingSurveyGroup, required=True,
collection_name='grading_records')
#: Mentor's GradingProjectSurveyRecord for this evaluation. Iff exists.
mentor_record = db.ReferenceProperty(
reference_class=GradingProjectSurveyRecord, required=False,
collection_name='mentor_grading_records')
#: Student's ProjectSurveyRecord for this evaluation. Iff exists.
student_record = db.ReferenceProperty(
reference_class=ProjectSurveyRecord, required=False,
collection_name='student_grading_records')
#: Project for this evaluation.
project = db.ReferenceProperty(
reference_class=StudentProject, required=True,
collection_name='grading_records')
#: Grade decision set for this grading record.
#: pass: Iff the mentor_record states that the student has passed.
#: And if a ProjectSurvey has been set in the GradingSurveyGroup
#: then the student_record must be set as well.
#: fail: If the mentor_record states that the student has failed. The
#: student_record does not matter in this case. However if the mentor
#: states that the student has passed, a ProjectSurvey has been
#: set in the GradingSurveyGroup and the student_record property is not
#: set the decision will be fail.
#: undecided: If no mentor_record has been set.
grade_decision = db.StringProperty(required=True, default='undecided',
choices=['pass', 'fail', 'undecided'])
#: Boolean that states if the grade_decision property has been locked
#: This is to prevent an automatic update from a GradingSurveyGroup to
#: overwrite the decision made by for example a Program Administrator.
locked = db.BooleanProperty(required=False, default=False,
verbose_name=ugettext('Grade Decision locked'))
#: Property containing the date that this GradingRecord was created.
created = db.DateTimeProperty(auto_now_add=True)
#: Property containing the last date that this GradingRecord was modified.
modified = db.DateTimeProperty(auto_now=True)
class GSoCGradingRecord(db.Model):
"""Explicitly group SurveyRecords with a common project.
Because Mentors and Students take different surveys,
we cannot simply link survey records by a common project and survey.
Instead, we establish a GradingRecord.
A GradingRecord links a group of survey records with a common
project, and links back to its records.
This entity can be edited by Program Administrators to edit the outcome
of a the Grading surveys without touching the real survey's answers.
Also if a ProjectSurvey has been coupled to the GradingSurveyGroup this must
be on record as well for the GradingRecord to state a pass, even if the
Mentor has filled in a passing grade.
Parent:
soc.modules.gsoc.models.project.GSoCProject
"""
#: The GradingSurveyGroup to which this record belongs
grading_survey_group = db.ReferenceProperty(
reference_class=GSoCGradingSurveyGroup, required=True,
collection_name='gsoc_grading_records')
#: Mentor's GradingProjectSurveyRecord for this evaluation. Iff exists.
mentor_record = db.ReferenceProperty(
reference_class=GSoCGradingProjectSurveyRecord, required=False,
collection_name='gsoc_mentor_grading_records')
#: Student's ProjectSurveyRecord for this evaluation. Iff exists.
student_record = db.ReferenceProperty(
reference_class=GSoCProjectSurveyRecord, required=False,
collection_name='gsoc_student_grading_records')
#: Grade decision set for this grading record.
#: pass: Iff the mentor_record states that the student has passed.
#: And if a ProjectSurvey has been set in the GradingSurveyGroup
#: then the student_record must be set as well.
#: fail: If the mentor_record states that the student has failed. The
#: student_record does not matter in this case. However if the mentor
#: states that the student has passed, a ProjectSurvey has been
#: set in the GradingSurveyGroup and the student_record property is not
#: set the decision will be fail.
#: undecided: If no mentor_record has been set.
grade_decision = db.StringProperty(required=True, default='undecided',
choices=['pass', 'fail', 'undecided'],
verbose_name=ugettext('Grade'))
#: Boolean that states if the grade_decision property has been locked
#: This is to prevent an automatic update from a GradingSurveyGroup to
#: overwrite the decision made by for example a Program Administrator.
locked = db.BooleanProperty(required=False, default=False,
verbose_name=ugettext('Grade locked'))
locked.help_text = ugettext('When locked the grade can only be changed manually.')
#: Property containing the date that this GradingRecord was created.
created = db.DateTimeProperty(auto_now_add=True)
#: Property containing the last date that this GradingRecord was modified.
modified = db.DateTimeProperty(auto_now=True)
|
adviti/melange
|
app/soc/modules/gsoc/models/grading_record.py
|
Python
|
apache-2.0
| 7,310
|
from django.conf.urls.defaults import *
from views import thumbnail_create, thumbnail_get, share_link_thumbnail_get, \
share_link_thumbnail_create
urlpatterns = patterns('',
url(r'^(?P<repo_id>[-0-9a-f]{36})/create/$', thumbnail_create, name='thumbnail_create'),
url(r'^(?P<repo_id>[-0-9a-f]{36})/(?P<size>[0-9]+)/(?P<path>.*)$', thumbnail_get, name='thumbnail_get'),
url(r'^(?P<token>[a-f0-9]{10})/create/$', share_link_thumbnail_create, name='share_link_thumbnail_create'),
url(r'^(?P<token>[a-f0-9]{10})/(?P<size>[0-9]+)/(?P<path>.*)$', share_link_thumbnail_get, name='share_link_thumbnail_get'),
)
|
madflow/seahub
|
seahub/thumbnail/urls.py
|
Python
|
apache-2.0
| 624
|
""" Cisco_IOS_XR_ha_eem_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ha\-eem package configuration.
This module contains definitions
for the following management objects\:
event\-manager\: Event manager configuration
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class EventManagerChecksumEnum(Enum):
"""
EventManagerChecksumEnum
Event manager checksum
.. data:: SHA_1 = 1
Use SHA-1 checksum
.. data:: MD5 = 2
Use MD5 checksum
"""
SHA_1 = 1
MD5 = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_cfg as meta
return meta._meta_table['EventManagerChecksumEnum']
class EventManagerPolicyEnum(Enum):
"""
EventManagerPolicyEnum
Event manager policy
.. data:: SYSTEM = 0
Event manager system policy
.. data:: USER = 1
Event manager user policy
"""
SYSTEM = 0
USER = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_cfg as meta
return meta._meta_table['EventManagerPolicyEnum']
class EventManagerPolicyModeEnum(Enum):
"""
EventManagerPolicyModeEnum
Event manager policy mode
.. data:: CISCO = 1
Cisco Signature
.. data:: TRUST = 2
Trust Signature
"""
CISCO = 1
TRUST = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_cfg as meta
return meta._meta_table['EventManagerPolicyModeEnum']
class EventManagerPolicySecEnum(Enum):
"""
EventManagerPolicySecEnum
Event manager policy sec
.. data:: RSA_2048 = 2
Cisco Signature
.. data:: TRUST = 3
Trust Signature
"""
RSA_2048 = 2
TRUST = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_cfg as meta
return meta._meta_table['EventManagerPolicySecEnum']
class EventManager(object):
"""
Event manager configuration
.. attribute:: directory_user_library
Path of the user policy library directory
**type**\: str
.. attribute:: directory_user_policy
Set event manager user policy directory
**type**\: str
.. attribute:: environments
Set an event manager global variable for event manager policies
**type**\: :py:class:`Environments <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_cfg.EventManager.Environments>`
.. attribute:: policies
Register an event manager policy
**type**\: :py:class:`Policies <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_cfg.EventManager.Policies>`
.. attribute:: refresh_time
Set refresh time (in seconds) for policy username's AAA taskmap
**type**\: int
**range:** 10..4294967295
.. attribute:: schedule_suspend
Enable suspend policy scheduling
**type**\: bool
.. attribute:: scheduler_script
scheduler classs type
**type**\: :py:class:`SchedulerScript <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_cfg.EventManager.SchedulerScript>`
"""
_prefix = 'ha-eem-cfg'
_revision = '2015-07-30'
def __init__(self):
self.directory_user_library = None
self.directory_user_policy = None
self.environments = EventManager.Environments()
self.environments.parent = self
self.policies = EventManager.Policies()
self.policies.parent = self
self.refresh_time = None
self.schedule_suspend = None
self.scheduler_script = EventManager.SchedulerScript()
self.scheduler_script.parent = self
class Policies(object):
"""
Register an event manager policy
.. attribute:: policy
Name of the policy file
**type**\: list of :py:class:`Policy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_cfg.EventManager.Policies.Policy>`
"""
_prefix = 'ha-eem-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.policy = YList()
self.policy.parent = self
self.policy.name = 'policy'
class Policy(object):
"""
Name of the policy file
.. attribute:: policy_name <key>
Name of the policy file
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: check_sum_value
CheckSum Value
**type**\: str
.. attribute:: checksum_type
Specify Embedded Event Manager policy checksum
**type**\: :py:class:`EventManagerChecksumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_cfg.EventManagerChecksumEnum>`
.. attribute:: persist_time
Time of validity (in seconds) for cached AAA taskmap of username (default is 3600)
**type**\: int
**range:** 0..4294967295
**mandatory**\: True
.. attribute:: policy_security_level
Event Manager policy security Level
**type**\: :py:class:`EventManagerPolicySecEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_cfg.EventManagerPolicySecEnum>`
.. attribute:: policy_security_mode
Specify Embedded Event Manager policy security mode
**type**\: :py:class:`EventManagerPolicyModeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_cfg.EventManagerPolicyModeEnum>`
.. attribute:: policy_type
Event manager type of this policy
**type**\: :py:class:`EventManagerPolicyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_cfg.EventManagerPolicyEnum>`
.. attribute:: username
A configured username
**type**\: str
**mandatory**\: True
"""
_prefix = 'ha-eem-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.policy_name = None
self.check_sum_value = None
self.checksum_type = None
self.persist_time = None
self.policy_security_level = None
self.policy_security_mode = None
self.policy_type = None
self.username = None
@property
def _common_path(self):
if self.policy_name is None:
raise YPYModelError('Key property policy_name is None')
return '/Cisco-IOS-XR-ha-eem-cfg:event-manager/Cisco-IOS-XR-ha-eem-cfg:policies/Cisco-IOS-XR-ha-eem-cfg:policy[Cisco-IOS-XR-ha-eem-cfg:policy-name = ' + str(self.policy_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.policy_name is not None:
return True
if self.check_sum_value is not None:
return True
if self.checksum_type is not None:
return True
if self.persist_time is not None:
return True
if self.policy_security_level is not None:
return True
if self.policy_security_mode is not None:
return True
if self.policy_type is not None:
return True
if self.username is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_cfg as meta
return meta._meta_table['EventManager.Policies.Policy']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ha-eem-cfg:event-manager/Cisco-IOS-XR-ha-eem-cfg:policies'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.policy is not None:
for child_ref in self.policy:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_cfg as meta
return meta._meta_table['EventManager.Policies']['meta_info']
class SchedulerScript(object):
"""
scheduler classs type
.. attribute:: thread_classes
scheduler thread classs
**type**\: :py:class:`ThreadClasses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_cfg.EventManager.SchedulerScript.ThreadClasses>`
"""
_prefix = 'ha-eem-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.thread_classes = EventManager.SchedulerScript.ThreadClasses()
self.thread_classes.parent = self
class ThreadClasses(object):
"""
scheduler thread classs
.. attribute:: thread_class
scheduler classs type argument
**type**\: list of :py:class:`ThreadClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_cfg.EventManager.SchedulerScript.ThreadClasses.ThreadClass>`
"""
_prefix = 'ha-eem-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.thread_class = YList()
self.thread_class.parent = self
self.thread_class.name = 'thread_class'
class ThreadClass(object):
"""
scheduler classs type argument
.. attribute:: thread_class_name <key>
Name of the global variable
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: num_threads
number of scheduler threads
**type**\: int
**range:** 1..5
**mandatory**\: True
"""
_prefix = 'ha-eem-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.thread_class_name = None
self.num_threads = None
@property
def _common_path(self):
if self.thread_class_name is None:
raise YPYModelError('Key property thread_class_name is None')
return '/Cisco-IOS-XR-ha-eem-cfg:event-manager/Cisco-IOS-XR-ha-eem-cfg:scheduler-script/Cisco-IOS-XR-ha-eem-cfg:thread-classes/Cisco-IOS-XR-ha-eem-cfg:thread-class[Cisco-IOS-XR-ha-eem-cfg:thread-class-name = ' + str(self.thread_class_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.thread_class_name is not None:
return True
if self.num_threads is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_cfg as meta
return meta._meta_table['EventManager.SchedulerScript.ThreadClasses.ThreadClass']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ha-eem-cfg:event-manager/Cisco-IOS-XR-ha-eem-cfg:scheduler-script/Cisco-IOS-XR-ha-eem-cfg:thread-classes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.thread_class is not None:
for child_ref in self.thread_class:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_cfg as meta
return meta._meta_table['EventManager.SchedulerScript.ThreadClasses']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ha-eem-cfg:event-manager/Cisco-IOS-XR-ha-eem-cfg:scheduler-script'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.thread_classes is not None and self.thread_classes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_cfg as meta
return meta._meta_table['EventManager.SchedulerScript']['meta_info']
class Environments(object):
"""
Set an event manager global variable for event
manager policies
.. attribute:: environment
Name of the global variable
**type**\: list of :py:class:`Environment <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_cfg.EventManager.Environments.Environment>`
"""
_prefix = 'ha-eem-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.environment = YList()
self.environment.parent = self
self.environment.name = 'environment'
class Environment(object):
"""
Name of the global variable
.. attribute:: environment_name <key>
Name of the global variable
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: environment_value
Value of the global variable
**type**\: str
**mandatory**\: True
"""
_prefix = 'ha-eem-cfg'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.environment_name = None
self.environment_value = None
@property
def _common_path(self):
if self.environment_name is None:
raise YPYModelError('Key property environment_name is None')
return '/Cisco-IOS-XR-ha-eem-cfg:event-manager/Cisco-IOS-XR-ha-eem-cfg:environments/Cisco-IOS-XR-ha-eem-cfg:environment[Cisco-IOS-XR-ha-eem-cfg:environment-name = ' + str(self.environment_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.environment_name is not None:
return True
if self.environment_value is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_cfg as meta
return meta._meta_table['EventManager.Environments.Environment']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ha-eem-cfg:event-manager/Cisco-IOS-XR-ha-eem-cfg:environments'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.environment is not None:
for child_ref in self.environment:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_cfg as meta
return meta._meta_table['EventManager.Environments']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ha-eem-cfg:event-manager'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.directory_user_library is not None:
return True
if self.directory_user_policy is not None:
return True
if self.environments is not None and self.environments._has_data():
return True
if self.policies is not None and self.policies._has_data():
return True
if self.refresh_time is not None:
return True
if self.schedule_suspend is not None:
return True
if self.scheduler_script is not None and self.scheduler_script._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ha_eem_cfg as meta
return meta._meta_table['EventManager']['meta_info']
|
abhikeshav/ydk-py
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ha_eem_cfg.py
|
Python
|
apache-2.0
| 18,997
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Gandi driver for compute
"""
import sys
from datetime import datetime
from libcloud.common.gandi import BaseGandiDriver, GandiException,\
NetworkInterface, IPAddress, Disk
from libcloud.compute.base import StorageVolume
from libcloud.compute.types import NodeState, Provider
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
NODE_STATE_MAP = {
'running': NodeState.RUNNING,
'halted': NodeState.TERMINATED,
'paused': NodeState.TERMINATED,
'locked': NodeState.TERMINATED,
'being_created': NodeState.PENDING,
'invalid': NodeState.UNKNOWN,
'legally_locked': NodeState.PENDING,
'deleted': NodeState.TERMINATED
}
NODE_PRICE_HOURLY_USD = 0.02
INSTANCE_TYPES = {
'small': {
'id': 'small',
'name': 'Small instance',
'cpu': 1,
'memory': 256,
'disk': 3,
'bandwidth': 10240,
},
'medium': {
'id': 'medium',
'name': 'Medium instance',
'cpu': 1,
'memory': 1024,
'disk': 20,
'bandwidth': 10240,
},
'large': {
'id': 'large',
'name': 'Large instance',
'cpu': 2,
'memory': 2048,
'disk': 50,
'bandwidth': 10240,
},
'x-large': {
'id': 'x-large',
'name': 'Extra Large instance',
'cpu': 4,
'memory': 4096,
'disk': 100,
'bandwidth': 10240,
},
}
class GandiNodeDriver(BaseGandiDriver, NodeDriver):
"""
Gandi node driver
"""
api_name = 'gandi'
friendly_name = 'Gandi.net'
website = 'http://www.gandi.net/'
country = 'FR'
type = Provider.GANDI
# TODO : which features to enable ?
features = {}
def __init__(self, *args, **kwargs):
"""
@inherits: L{NodeDriver.__init__}
"""
super(BaseGandiDriver, self).__init__(*args, **kwargs)
def _resource_info(self, type, id):
try:
obj = self.connection.request('hosting.%s.info' % type, int(id))
return obj.object
except Exception:
e = sys.exc_info()[1]
raise GandiException(1003, e)
return None
def _node_info(self, id):
return self._resource_info('vm', id)
def _volume_info(self, id):
return self._resource_info('disk', id)
# Generic methods for driver
def _to_node(self, vm):
return Node(
id=vm['id'],
name=vm['hostname'],
state=NODE_STATE_MAP.get(
vm['state'],
NodeState.UNKNOWN
),
public_ips=vm.get('ips', []),
private_ips=[],
driver=self,
extra={
'ai_active': vm.get('ai_active'),
'datacenter_id': vm.get('datacenter_id'),
'description': vm.get('description')
}
)
def _to_nodes(self, vms):
return [self._to_node(v) for v in vms]
def _to_volume(self, disk):
extra = {'can_snapshot': disk['can_snapshot']}
return StorageVolume(
id=disk['id'],
name=disk['name'],
size=int(disk['size']),
driver=self,
extra=extra)
def _to_volumes(self, disks):
return [self._to_volume(d) for d in disks]
def list_nodes(self):
vms = self.connection.request('hosting.vm.list').object
ips = self.connection.request('hosting.ip.list').object
for vm in vms:
vm['ips'] = []
for ip in ips:
if vm['ifaces_id'][0] == ip['iface_id']:
ip = ip.get('ip', None)
if ip:
vm['ips'].append(ip)
nodes = self._to_nodes(vms)
return nodes
def reboot_node(self, node):
op = self.connection.request('hosting.vm.reboot', int(node.id))
self._wait_operation(op.object['id'])
vm = self._node_info(int(node.id))
if vm['state'] == 'running':
return True
return False
def destroy_node(self, node):
vm = self._node_info(node.id)
if vm['state'] == 'running':
# Send vm_stop and wait for accomplish
op_stop = self.connection.request('hosting.vm.stop', int(node.id))
if not self._wait_operation(op_stop.object['id']):
raise GandiException(1010, 'vm.stop failed')
# Delete
op = self.connection.request('hosting.vm.delete', int(node.id))
if self._wait_operation(op.object['id']):
return True
return False
def deploy_node(self, **kwargs):
"""
deploy_node is not implemented for gandi driver
@rtype: C{bool}
"""
raise NotImplementedError(
'deploy_node not implemented for gandi driver')
def create_node(self, **kwargs):
"""
Create a new Gandi node
@keyword name: String with a name for this new node (required)
@type name: C{str}
@keyword image: OS Image to boot on node. (required)
@type image: L{NodeImage}
@keyword location: Which data center to create a node in. If empty,
undefined behavior will be selected. (optional)
@type location: L{NodeLocation}
@keyword size: The size of resources allocated to this node.
(required)
@type size: L{NodeSize}
@keyword login: user name to create for login on machine (required)
@type login: C{str}
@keyword password: password for user that'll be created (required)
@type password: C{str}
@keyword inet_family: version of ip to use, default 4 (optional)
@type inet_family: C{int}
@rtype: L{Node}
"""
if kwargs.get('login') is None or kwargs.get('password') is None:
raise GandiException(
1020, 'login and password must be defined for node creation')
location = kwargs.get('location')
if location and isinstance(location, NodeLocation):
dc_id = int(location.id)
else:
raise GandiException(
1021, 'location must be a subclass of NodeLocation')
size = kwargs.get('size')
if not size and not isinstance(size, NodeSize):
raise GandiException(
1022, 'size must be a subclass of NodeSize')
# If size name is in INSTANCE_TYPE we use new rating model
instance = INSTANCE_TYPES.get(size.id)
cores = instance['cpu'] if instance else int(size.id)
src_disk_id = int(kwargs['image'].id)
disk_spec = {
'datacenter_id': dc_id,
'name': 'disk_%s' % kwargs['name']
}
vm_spec = {
'datacenter_id': dc_id,
'hostname': kwargs['name'],
'login': kwargs['login'],
'password': kwargs['password'], # TODO : use NodeAuthPassword
'memory': int(size.ram),
'cores': cores,
'bandwidth': int(size.bandwidth),
'ip_version': kwargs.get('inet_family', 4),
}
# Call create_from helper api. Return 3 operations : disk_create,
# iface_create,vm_create
(op_disk, op_iface, op_vm) = self.connection.request(
'hosting.vm.create_from',
vm_spec, disk_spec, src_disk_id
).object
# We wait for vm_create to finish
if self._wait_operation(op_vm['id']):
# after successful operation, get ip information
# thru first interface
node = self._node_info(op_vm['vm_id'])
ifaces = node.get('ifaces')
if len(ifaces) > 0:
ips = ifaces[0].get('ips')
if len(ips) > 0:
node['ip'] = ips[0]['ip']
return self._to_node(node)
return None
def _to_image(self, img):
return NodeImage(
id=img['disk_id'],
name=img['label'],
driver=self.connection.driver
)
def list_images(self, location=None):
try:
if location:
filtering = {'datacenter_id': int(location.id)}
else:
filtering = {}
images = self.connection.request('hosting.image.list', filtering)
return [self._to_image(i) for i in images.object]
except Exception:
e = sys.exc_info()[1]
raise GandiException(1011, e)
def _to_size(self, id, size):
return NodeSize(
id=id,
name='%s cores' % id,
ram=size['memory'],
disk=size['disk'],
bandwidth=size['bandwidth'],
price=(self._get_size_price(size_id='1') * id),
driver=self.connection.driver,
)
def _instance_type_to_size(self, instance):
return NodeSize(
id=instance['id'],
name=instance['name'],
ram=instance['memory'],
disk=instance['disk'],
bandwidth=instance['bandwidth'],
price=self._get_size_price(size_id=instance['id']),
driver=self.connection.driver,
)
def list_instance_type(self, location=None):
return [self._instance_type_to_size(instance)
for name, instance in INSTANCE_TYPES.items()]
def list_sizes(self, location=None):
account = self.connection.request('hosting.account.info').object
if account.get('rating_enabled'):
# This account use new rating model
return self.list_instance_type(location)
# Look for available shares, and return a list of share_definition
available_res = account['resources']['available']
if available_res['shares'] == 0:
return None
else:
share_def = account['share_definition']
available_cores = available_res['cores']
# 0.75 core given when creating a server
max_core = int(available_cores + 0.75)
shares = []
if available_res['servers'] < 1:
# No server quota, no way
return shares
for i in range(1, max_core + 1):
share = {id: i}
share_is_available = True
for k in ['memory', 'disk', 'bandwidth']:
if share_def[k] * i > available_res[k]:
# We run out for at least one resource inside
share_is_available = False
else:
share[k] = share_def[k] * i
if share_is_available:
nb_core = i
shares.append(self._to_size(nb_core, share))
return shares
def _to_loc(self, loc):
return NodeLocation(
id=loc['id'],
name=loc['name'],
country=loc['country'],
driver=self
)
def list_locations(self):
res = self.connection.request('hosting.datacenter.list')
return [self._to_loc(l) for l in res.object]
def list_volumes(self):
"""
@rtype: C{list} of L{StorageVolume}
"""
res = self.connection.request('hosting.disk.list', {})
return self._to_volumes(res.object)
def create_volume(self, size, name, location=None, snapshot=None):
disk_param = {
'name': name,
'size': int(size),
'datacenter_id': int(location.id)
}
if snapshot:
op = self.connection.request('hosting.disk.create_from',
disk_param, int(snapshot.id))
else:
op = self.connection.request('hosting.disk.create', disk_param)
if self._wait_operation(op.object['id']):
disk = self._volume_info(op.object['disk_id'])
return self._to_volume(disk)
return None
def attach_volume(self, node, volume, device=None):
op = self.connection.request('hosting.vm.disk_attach',
int(node.id), int(volume.id))
if self._wait_operation(op.object['id']):
return True
return False
def detach_volume(self, node, volume):
"""
Detaches a volume from a node.
@param node: Node which should be used
@type node: L{Node}
@param volume: Volume to be detached
@type volume: L{StorageVolume}
@rtype: C{bool}
"""
op = self.connection.request('hosting.vm.disk_detach',
int(node.id), int(volume.id))
if self._wait_operation(op.object['id']):
return True
return False
def destroy_volume(self, volume):
op = self.connection.request('hosting.disk.delete', int(volume.id))
if self._wait_operation(op.object['id']):
return True
return False
def _to_iface(self, iface):
ips = []
for ip in iface.get('ips', []):
new_ip = IPAddress(
ip['id'],
NODE_STATE_MAP.get(
ip['state'],
NodeState.UNKNOWN
),
ip['ip'],
self.connection.driver,
version=ip.get('version'),
extra={'reverse': ip['reverse']}
)
ips.append(new_ip)
return NetworkInterface(
iface['id'],
NODE_STATE_MAP.get(
iface['state'],
NodeState.UNKNOWN
),
mac_address=None,
driver=self.connection.driver,
ips=ips,
node_id=iface.get('vm_id'),
extra={'bandwidth': iface['bandwidth']},
)
def _to_ifaces(self, ifaces):
return [self._to_iface(i) for i in ifaces]
def ex_list_interfaces(self):
"""
Specific method to list network interfaces
@rtype: C{list} of L{GandiNetworkInterface}
"""
ifaces = self.connection.request('hosting.iface.list').object
ips = self.connection.request('hosting.ip.list').object
for iface in ifaces:
iface['ips'] = list(
filter(lambda i: i['iface_id'] == iface['id'], ips))
return self._to_ifaces(ifaces)
def _to_disk(self, element):
disk = Disk(
id=element['id'],
state=NODE_STATE_MAP.get(
element['state'],
NodeState.UNKNOWN
),
name=element['name'],
driver=self.connection.driver,
size=element['size'],
extra={'can_snapshot': element['can_snapshot']}
)
return disk
def _to_disks(self, elements):
return [self._to_disk(el) for el in elements]
def ex_list_disks(self):
"""
Specific method to list all disk
@rtype: C{list} of L{GandiDisk}
"""
res = self.connection.request('hosting.disk.list', {})
return self._to_disks(res.object)
def ex_node_attach_disk(self, node, disk):
"""
Specific method to attach a disk to a node
@param node: Node which should be used
@type node: L{Node}
@param disk: Disk which should be used
@type disk: L{GandiDisk}
@rtype: C{bool}
"""
op = self.connection.request('hosting.vm.disk_attach',
int(node.id), int(disk.id))
if self._wait_operation(op.object['id']):
return True
return False
def ex_node_detach_disk(self, node, disk):
"""
Specific method to detach a disk from a node
@param node: Node which should be used
@type node: L{Node}
@param disk: Disk which should be used
@type disk: L{GandiDisk}
@rtype: C{bool}
"""
op = self.connection.request('hosting.vm.disk_detach',
int(node.id), int(disk.id))
if self._wait_operation(op.object['id']):
return True
return False
def ex_node_attach_interface(self, node, iface):
"""
Specific method to attach an interface to a node
@param node: Node which should be used
@type node: L{Node}
@param iface: Network interface which should be used
@type iface: L{GandiNetworkInterface}
@rtype: C{bool}
"""
op = self.connection.request('hosting.vm.iface_attach',
int(node.id), int(iface.id))
if self._wait_operation(op.object['id']):
return True
return False
def ex_node_detach_interface(self, node, iface):
"""
Specific method to detach an interface from a node
@param node: Node which should be used
@type node: L{Node}
@param iface: Network interface which should be used
@type iface: L{GandiNetworkInterface}
@rtype: C{bool}
"""
op = self.connection.request('hosting.vm.iface_detach',
int(node.id), int(iface.id))
if self._wait_operation(op.object['id']):
return True
return False
def ex_snapshot_disk(self, disk, name=None):
"""
Specific method to make a snapshot of a disk
@param disk: Disk which should be used
@type disk: L{GandiDisk}
@param name: Name which should be used
@type name: C{str}
@rtype: C{bool}
"""
if not disk.extra.get('can_snapshot'):
raise GandiException(1021, 'Disk %s can\'t snapshot' % disk.id)
if not name:
suffix = datetime.today().strftime('%Y%m%d')
name = 'snap_%s' % (suffix)
op = self.connection.request(
'hosting.disk.create_from',
{'name': name, 'type': 'snapshot', },
int(disk.id),
)
if self._wait_operation(op.object['id']):
return True
return False
def ex_update_disk(self, disk, new_size=None, new_name=None):
"""Specific method to update size or name of a disk
WARNING: if a server is attached it'll be rebooted
@param disk: Disk which should be used
@type disk: L{GandiDisk}
@param new_size: New size
@type new_size: C{int}
@param new_name: New name
@type new_name: C{str}
@rtype: C{bool}
"""
params = {}
if new_size:
params.update({'size': new_size})
if new_name:
params.update({'name': new_name})
op = self.connection.request('hosting.disk.update',
int(disk.id),
params)
if self._wait_operation(op.object['id']):
return True
return False
|
Jc2k/libcloud
|
libcloud/compute/drivers/gandi.py
|
Python
|
apache-2.0
| 20,033
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.neutron import neutron
from heat.engine import scheduler
from heat.engine import support
class HealthMonitor(neutron.NeutronResource):
"""
A resource for managing health monitors for load balancers in Neutron.
"""
PROPERTIES = (
DELAY, TYPE, MAX_RETRIES, TIMEOUT, ADMIN_STATE_UP,
HTTP_METHOD, EXPECTED_CODES, URL_PATH,
) = (
'delay', 'type', 'max_retries', 'timeout', 'admin_state_up',
'http_method', 'expected_codes', 'url_path',
)
ATTRIBUTES = (
ADMIN_STATE_UP_ATTR, DELAY_ATTR, EXPECTED_CODES_ATTR, HTTP_METHOD_ATTR,
MAX_RETRIES_ATTR, TIMEOUT_ATTR, TYPE_ATTR, URL_PATH_ATTR, TENANT_ID,
SHOW,
) = (
'admin_state_up', 'delay', 'expected_codes', 'http_method',
'max_retries', 'timeout', 'type', 'url_path', 'tenant_id',
'show',
)
properties_schema = {
DELAY: properties.Schema(
properties.Schema.INTEGER,
_('The minimum time in seconds between regular connections of '
'the member.'),
required=True,
update_allowed=True
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('One of predefined health monitor types.'),
required=True,
constraints=[
constraints.AllowedValues(['PING', 'TCP', 'HTTP', 'HTTPS']),
]
),
MAX_RETRIES: properties.Schema(
properties.Schema.INTEGER,
_('Number of permissible connection failures before changing the '
'member status to INACTIVE.'),
required=True,
update_allowed=True
),
TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Maximum number of seconds for a monitor to wait for a '
'connection to be established before it times out.'),
required=True,
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of the health monitor.'),
default=True,
update_allowed=True
),
HTTP_METHOD: properties.Schema(
properties.Schema.STRING,
_('The HTTP method used for requests by the monitor of type '
'HTTP.'),
update_allowed=True
),
EXPECTED_CODES: properties.Schema(
properties.Schema.STRING,
_('The list of HTTP status codes expected in response from the '
'member to declare it healthy.'),
update_allowed=True
),
URL_PATH: properties.Schema(
properties.Schema.STRING,
_('The HTTP path used in the HTTP request used by the monitor to '
'test a member health.'),
update_allowed=True
),
}
attributes_schema = {
ADMIN_STATE_UP_ATTR: attributes.Schema(
_('The administrative state of this health monitor.')
),
DELAY_ATTR: attributes.Schema(
_('The minimum time in seconds between regular connections '
'of the member.')
),
EXPECTED_CODES_ATTR: attributes.Schema(
_('The list of HTTP status codes expected in response '
'from the member to declare it healthy.')
),
HTTP_METHOD_ATTR: attributes.Schema(
_('The HTTP method used for requests by the monitor of type HTTP.')
),
MAX_RETRIES_ATTR: attributes.Schema(
_('Number of permissible connection failures before changing '
'the member status to INACTIVE.')
),
TIMEOUT_ATTR: attributes.Schema(
_('Maximum number of seconds for a monitor to wait for a '
'connection to be established before it times out.')
),
TYPE_ATTR: attributes.Schema(
_('One of predefined health monitor types.')
),
URL_PATH_ATTR: attributes.Schema(
_('The HTTP path used in the HTTP request used by the monitor '
'to test a member health.')
),
TENANT_ID: attributes.Schema(
_('Tenant owning the health monitor.')
),
SHOW: attributes.Schema(
_('All attributes.')
),
}
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
health_monitor = self.neutron().create_health_monitor(
{'health_monitor': properties})['health_monitor']
self.resource_id_set(health_monitor['id'])
def _show_resource(self):
return self.neutron().show_health_monitor(
self.resource_id)['health_monitor']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.neutron().update_health_monitor(
self.resource_id, {'health_monitor': prop_diff})
def handle_delete(self):
try:
self.neutron().delete_health_monitor(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
class Pool(neutron.NeutronResource):
"""
A resource for managing load balancer pools in Neutron.
"""
PROPERTIES = (
PROTOCOL, SUBNET_ID, SUBNET, LB_METHOD, NAME, DESCRIPTION,
ADMIN_STATE_UP, VIP, MONITORS,
) = (
'protocol', 'subnet_id', 'subnet', 'lb_method', 'name', 'description',
'admin_state_up', 'vip', 'monitors',
)
_VIP_KEYS = (
VIP_NAME, VIP_DESCRIPTION, VIP_SUBNET, VIP_ADDRESS,
VIP_CONNECTION_LIMIT, VIP_PROTOCOL_PORT,
VIP_SESSION_PERSISTENCE, VIP_ADMIN_STATE_UP,
) = (
'name', 'description', 'subnet', 'address',
'connection_limit', 'protocol_port',
'session_persistence', 'admin_state_up',
)
_VIP_SESSION_PERSISTENCE_KEYS = (
VIP_SESSION_PERSISTENCE_TYPE, VIP_SESSION_PERSISTENCE_COOKIE_NAME,
) = (
'type', 'cookie_name',
)
ATTRIBUTES = (
ADMIN_STATE_UP_ATTR, NAME_ATTR, PROTOCOL_ATTR, SUBNET_ID_ATTR,
LB_METHOD_ATTR, DESCRIPTION_ATTR, TENANT_ID, VIP_ATTR,
) = (
'admin_state_up', 'name', 'protocol', 'subnet_id',
'lb_method', 'description', 'tenant_id', 'vip',
)
properties_schema = {
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Protocol for balancing.'),
required=True,
constraints=[
constraints.AllowedValues(['TCP', 'HTTP', 'HTTPS']),
]
),
SUBNET_ID: properties.Schema(
properties.Schema.STRING,
support_status=support.SupportStatus(
support.DEPRECATED,
_('Use property %s.') % SUBNET),
required=False
),
SUBNET: properties.Schema(
properties.Schema.STRING,
_('The subnet for the port on which the members '
'of the pool will be connected.'),
required=False
),
LB_METHOD: properties.Schema(
properties.Schema.STRING,
_('The algorithm used to distribute load between the members of '
'the pool.'),
required=True,
constraints=[
constraints.AllowedValues(['ROUND_ROBIN',
'LEAST_CONNECTIONS', 'SOURCE_IP']),
],
update_allowed=True
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the pool.')
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the pool.'),
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this pool.'),
default=True,
update_allowed=True
),
VIP: properties.Schema(
properties.Schema.MAP,
_('IP address and port of the pool.'),
schema={
VIP_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the vip.')
),
VIP_DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the vip.')
),
VIP_SUBNET: properties.Schema(
properties.Schema.STRING,
_('Subnet of the vip.')
),
VIP_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address of the vip.')
),
VIP_CONNECTION_LIMIT: properties.Schema(
properties.Schema.INTEGER,
_('The maximum number of connections per second '
'allowed for the vip.')
),
VIP_PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('TCP port on which to listen for client traffic '
'that is associated with the vip address.'),
required=True
),
VIP_SESSION_PERSISTENCE: properties.Schema(
properties.Schema.MAP,
_('Configuration of session persistence.'),
schema={
VIP_SESSION_PERSISTENCE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Method of implementation of session '
'persistence feature.'),
required=True,
constraints=[constraints.AllowedValues(
['SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE']
)]
),
VIP_SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the cookie, '
'required if type is APP_COOKIE.')
)
}
),
VIP_ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this vip.'),
default=True
),
},
required=True
),
MONITORS: properties.Schema(
properties.Schema.LIST,
_('List of health monitors associated with the pool.'),
default=[],
update_allowed=True
),
}
attributes_schema = {
ADMIN_STATE_UP_ATTR: attributes.Schema(
_('The administrative state of this pool.')
),
NAME_ATTR: attributes.Schema(
_('Name of the pool.')
),
PROTOCOL_ATTR: attributes.Schema(
_('Protocol to balance.')
),
SUBNET_ID_ATTR: attributes.Schema(
_('The subnet for the port on which the members of the pool '
'will be connected.')
),
LB_METHOD_ATTR: attributes.Schema(
_('The algorithm used to distribute load between the members '
'of the pool.')
),
DESCRIPTION_ATTR: attributes.Schema(
_('Description of the pool.')
),
TENANT_ID: attributes.Schema(
_('Tenant owning the pool.')
),
VIP_ATTR: attributes.Schema(
_('Vip associated with the pool.')
),
}
def validate(self):
res = super(Pool, self).validate()
if res:
return res
self._validate_depr_property_required(
self.properties, self.SUBNET, self.SUBNET_ID)
session_p = self.properties[self.VIP].get(self.VIP_SESSION_PERSISTENCE)
if session_p is None:
# session persistence is not configured, skip validation
return
persistence_type = session_p[self.VIP_SESSION_PERSISTENCE_TYPE]
if persistence_type == 'APP_COOKIE':
if session_p.get(self.VIP_SESSION_PERSISTENCE_COOKIE_NAME):
return
msg = _('Property cookie_name is required, when '
'session_persistence type is set to APP_COOKIE.')
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
self.client_plugin().resolve_subnet(
properties, self.SUBNET, 'subnet_id')
vip_properties = properties.pop(self.VIP)
monitors = properties.pop(self.MONITORS)
client = self.neutron()
pool = client.create_pool({'pool': properties})['pool']
self.resource_id_set(pool['id'])
for monitor in monitors:
client.associate_health_monitor(
pool['id'], {'health_monitor': {'id': monitor}})
vip_arguments = self.prepare_properties(
vip_properties,
'%s.vip' % (self.name,))
session_p = vip_arguments.get(self.VIP_SESSION_PERSISTENCE)
if session_p is not None:
prepared_props = self.prepare_properties(session_p, None)
vip_arguments['session_persistence'] = prepared_props
vip_arguments['protocol'] = self.properties[self.PROTOCOL]
if vip_arguments.get(self.VIP_SUBNET) is None:
vip_arguments['subnet_id'] = properties[self.SUBNET_ID]
else:
vip_arguments['subnet_id'] = self.client_plugin().resolve_subnet(
vip_arguments, self.VIP_SUBNET, 'subnet_id')
vip_arguments['pool_id'] = pool['id']
vip = client.create_vip({'vip': vip_arguments})['vip']
self.metadata_set({'vip': vip['id']})
def _show_resource(self):
return self.neutron().show_pool(self.resource_id)['pool']
def check_create_complete(self, data):
attributes = self._show_resource()
status = attributes['status']
if status == 'PENDING_CREATE':
return False
elif status == 'ACTIVE':
vip_attributes = self.neutron().show_vip(
self.metadata_get()['vip'])['vip']
vip_status = vip_attributes['status']
if vip_status == 'PENDING_CREATE':
return False
if vip_status == 'ACTIVE':
return True
if vip_status == 'ERROR':
raise resource.ResourceInError(
resource_status=vip_status,
status_reason=_('error in vip'))
raise resource.ResourceUnknownStatus(
resource_status=vip_status,
result=_('Pool creation failed due to vip'))
elif status == 'ERROR':
raise resource.ResourceInError(
resource_status=status,
status_reason=_('error in pool'))
else:
raise resource.ResourceUnknownStatus(
resource_status=status,
result=_('Pool creation failed'))
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
client = self.neutron()
if self.MONITORS in prop_diff:
monitors = set(prop_diff.pop(self.MONITORS))
old_monitors = set(self.properties[self.MONITORS])
for monitor in old_monitors - monitors:
client.disassociate_health_monitor(self.resource_id,
monitor)
for monitor in monitors - old_monitors:
client.associate_health_monitor(
self.resource_id, {'health_monitor': {'id': monitor}})
if prop_diff:
client.update_pool(self.resource_id, {'pool': prop_diff})
def _resolve_attribute(self, name):
if name == self.VIP_ATTR:
return self.neutron().show_vip(self.metadata_get()['vip'])['vip']
return super(Pool, self)._resolve_attribute(name)
def _confirm_vip_delete(self):
client = self.neutron()
while True:
try:
yield
client.show_vip(self.metadata_get()['vip'])
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
break
def handle_delete(self):
checkers = []
if self.metadata_get():
try:
self.neutron().delete_vip(self.metadata_get()['vip'])
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
checkers.append(scheduler.TaskRunner(self._confirm_vip_delete))
try:
self.neutron().delete_pool(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
checkers.append(scheduler.TaskRunner(self._confirm_delete))
return checkers
def check_delete_complete(self, checkers):
'''Push all checkers to completion in list order.'''
for checker in checkers:
if not checker.started():
checker.start()
if not checker.step():
return False
return True
class PoolMember(neutron.NeutronResource):
"""
A resource to handle load balancer members.
"""
PROPERTIES = (
POOL_ID, ADDRESS, PROTOCOL_PORT, WEIGHT, ADMIN_STATE_UP,
) = (
'pool_id', 'address', 'protocol_port', 'weight', 'admin_state_up',
)
ATTRIBUTES = (
ADMIN_STATE_UP_ATTR, TENANT_ID, WEIGHT_ATTR, ADDRESS_ATTR,
POOL_ID_ATTR, PROTOCOL_PORT_ATTR, SHOW,
) = (
'admin_state_up', 'tenant_id', 'weight', 'address',
'pool_id', 'protocol_port', 'show',
)
properties_schema = {
POOL_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the load balancing pool.'),
required=True,
update_allowed=True
),
ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address of the pool member on the pool network.'),
required=True
),
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('TCP port on which the pool member listens for requests or '
'connections.'),
required=True,
constraints=[
constraints.Range(0, 65535),
]
),
WEIGHT: properties.Schema(
properties.Schema.INTEGER,
_('Weight of pool member in the pool (default to 1).'),
constraints=[
constraints.Range(0, 256),
],
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of the pool member.'),
default=True
),
}
attributes_schema = {
ADMIN_STATE_UP_ATTR: attributes.Schema(
_('The administrative state of this pool member.')
),
TENANT_ID: attributes.Schema(
_('Tenant owning the pool member.')
),
WEIGHT_ATTR: attributes.Schema(
_('Weight of the pool member in the pool.')
),
ADDRESS_ATTR: attributes.Schema(
_('IP address of the pool member.')
),
POOL_ID_ATTR: attributes.Schema(
_('The ID of the load balancing pool.')
),
PROTOCOL_PORT_ATTR: attributes.Schema(
_('TCP port on which the pool member listens for requests or '
'connections.')
),
SHOW: attributes.Schema(
_('All attributes.')
),
}
def handle_create(self):
pool = self.properties[self.POOL_ID]
client = self.neutron()
protocol_port = self.properties[self.PROTOCOL_PORT]
address = self.properties[self.ADDRESS]
admin_state_up = self.properties[self.ADMIN_STATE_UP]
weight = self.properties.get(self.WEIGHT)
params = {
'pool_id': pool,
'address': address,
'protocol_port': protocol_port,
'admin_state_up': admin_state_up
}
if weight is not None:
params['weight'] = weight
member = client.create_member({'member': params})['member']
self.resource_id_set(member['id'])
def _show_resource(self):
return self.neutron().show_member(self.resource_id)['member']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.neutron().update_member(
self.resource_id, {'member': prop_diff})
def handle_delete(self):
client = self.neutron()
try:
client.delete_member(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return self._delete_task()
class LoadBalancer(resource.Resource):
"""
A resource to link a neutron pool with servers.
"""
PROPERTIES = (
POOL_ID, PROTOCOL_PORT, MEMBERS,
) = (
'pool_id', 'protocol_port', 'members',
)
properties_schema = {
POOL_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the load balancing pool.'),
required=True,
update_allowed=True
),
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('Port number on which the servers are running on the members.'),
required=True
),
MEMBERS: properties.Schema(
properties.Schema.LIST,
_('The list of Nova server IDs load balanced.'),
update_allowed=True
),
}
default_client_name = 'neutron'
def handle_create(self):
pool = self.properties[self.POOL_ID]
client = self.neutron()
protocol_port = self.properties[self.PROTOCOL_PORT]
for member in self.properties[self.MEMBERS] or []:
address = self.client_plugin('nova').server_to_ipaddress(member)
lb_member = client.create_member({
'member': {
'pool_id': pool,
'address': address,
'protocol_port': protocol_port}})['member']
self.data_set(member, lb_member['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
new_props = json_snippet.properties(self.properties_schema,
self.context)
# Valid use cases are:
# - Membership controlled by members property in template
# - Empty members property in template; membership controlled by
# "updates" triggered from autoscaling group.
# Mixing the two will lead to undefined behaviour.
if (self.MEMBERS in prop_diff and
(self.properties[self.MEMBERS] is not None or
new_props[self.MEMBERS] is not None)):
members = set(new_props[self.MEMBERS] or [])
rd_members = self.data()
old_members = set(rd_members.keys())
client = self.neutron()
for member in old_members - members:
member_id = rd_members[member]
try:
client.delete_member(member_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
self.data_delete(member)
pool = self.properties[self.POOL_ID]
protocol_port = self.properties[self.PROTOCOL_PORT]
for member in members - old_members:
address = self.client_plugin('nova').server_to_ipaddress(
member)
lb_member = client.create_member({
'member': {
'pool_id': pool,
'address': address,
'protocol_port': protocol_port}})['member']
self.data_set(member, lb_member['id'])
def handle_delete(self):
client = self.neutron()
for member, member_id in self.data().items():
try:
client.delete_member(member_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
self.data_delete(member)
def resource_mapping():
return {
'OS::Neutron::HealthMonitor': HealthMonitor,
'OS::Neutron::Pool': Pool,
'OS::Neutron::PoolMember': PoolMember,
'OS::Neutron::LoadBalancer': LoadBalancer,
}
|
redhat-openstack/heat
|
heat/engine/resources/neutron/loadbalancer.py
|
Python
|
apache-2.0
| 25,830
|
################################################################################
##
## Verifying that Python can define features as categorical or continuous
##
################################################################################
import sys, os
sys.path.insert(1, "../../")
import h2o
def continuous_or_categorical(ip, port):
# connect to h2o
h2o.init(ip, port)
aa = {
'h1': [1, 8, 4, 3, 6],
'h2': ["fish", "cat", "fish", "dog", "bird"],
'h3': [0, 1, 0, 0, 1]
}
df_hex = h2o.H2OFrame(python_obj = aa)
df_hex.show()
df_hex.summary()
assert (not df_hex['h1'].isfactor())
assert (df_hex['h2'].isfactor())
assert (not df_hex['h3'].isfactor())
df_hex['h1'] = df_hex['h1'].asfactor()
df_hex['h2'] = df_hex['h2'].asfactor()
df_hex['h3'] = df_hex['h3'].asfactor()
df_hex.show()
df_hex.summary()
assert (df_hex['h1'].isfactor())
assert (df_hex['h2'].isfactor())
assert (df_hex['h3'].isfactor())
df_hex['h1'] = df_hex['h1'].asnumeric()
df_hex['h2'] = df_hex['h2'].asnumeric()
df_hex['h3'] = df_hex['h3'].asnumeric()
df_hex.show()
df_hex.summary()
assert (not df_hex['h1'].isfactor())
assert (not df_hex['h2'].isfactor())
assert (not df_hex['h3'].isfactor())
if __name__ == "__main__":
h2o.run_test(sys.argv, continuous_or_categorical)
|
ChristosChristofidis/h2o-3
|
h2o-py/tests/testdir_jira/pyunit_hexdev_29_categorical_continuous.py
|
Python
|
apache-2.0
| 1,318
|
import logging
import pytest
import sdk_auth
import sdk_cmd
import sdk_install
import sdk_utils
from security import transport_encryption
from tests import auth
from tests import client
from tests import config
log = logging.getLogger(__name__)
pytestmark = pytest.mark.skipif(
sdk_utils.is_open_dcos(), reason="Feature only supported in DC/OS EE"
)
@pytest.fixture(scope="module", autouse=True)
def service_account(configure_security):
"""
Sets up a service account for use with TLS.
"""
try:
name = config.SERVICE_NAME
service_account_info = transport_encryption.setup_service_account(name)
yield service_account_info
finally:
transport_encryption.cleanup_service_account(config.SERVICE_NAME, service_account_info)
@pytest.fixture(scope="module", autouse=True)
def kerberos(configure_security):
try:
kerberos_env = sdk_auth.KerberosEnvironment()
principals = auth.get_service_principals(config.SERVICE_NAME, kerberos_env.get_realm())
kerberos_env.add_principals(principals)
kerberos_env.finalize()
yield kerberos_env
finally:
kerberos_env.cleanup()
@pytest.fixture(scope="module", autouse=True)
def kafka_server(kerberos, service_account):
"""
A pytest fixture that installs a Kerberized kafka service.
On teardown, the service is uninstalled.
"""
service_kerberos_options = {
"service": {
"name": config.SERVICE_NAME,
"service_account": service_account["name"],
"service_account_secret": service_account["secret"],
"security": {
"kerberos": {
"enabled": True,
"kdc": {"hostname": kerberos.get_host(), "port": int(kerberos.get_port())},
"realm": kerberos.get_realm(),
"keytab_secret": kerberos.get_keytab_path(),
},
"transport_encryption": {
"enabled": True,
"ciphers": "TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_CBC_SHA256,TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384",
},
},
}
}
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
try:
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_BROKER_COUNT,
additional_options=service_kerberos_options,
timeout_seconds=30 * 60,
)
yield {**service_kerberos_options, **{"package_name": config.PACKAGE_NAME}}
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.fixture(scope="module", autouse=True)
def kafka_client(kerberos):
try:
kafka_client = client.KafkaClient(
"kafka-client", config.PACKAGE_NAME, config.SERVICE_NAME, kerberos
)
kafka_client.install()
# TODO: This flag should be set correctly.
kafka_client._is_tls = True
transport_encryption.create_tls_artifacts(cn="client", marathon_task=kafka_client.get_id())
yield kafka_client
finally:
kafka_client.uninstall()
@pytest.mark.dcos_min_version("1.10")
@sdk_utils.dcos_ee_only
@pytest.mark.sanity
def test_client_can_read_and_write(kafka_client: client.KafkaClient, kafka_server, kerberos):
topic_name = "tls.topic"
sdk_cmd.svc_cli(
kafka_server["package_name"],
kafka_server["service"]["name"],
"topic create {}".format(topic_name),
parse_json=True,
)
kafka_client.connect()
user = "client"
write_success, read_successes, _ = kafka_client.can_write_and_read(user, topic_name)
assert write_success, "Write failed (user={})".format(user)
assert read_successes, (
"Read failed (user={}): "
"MESSAGES={} "
"read_successes={}".format(user, kafka_client.MESSAGES, read_successes)
)
|
mesosphere/dcos-kafka-service
|
frameworks/kafka/tests/test_ssl_kerberos_auth.py
|
Python
|
apache-2.0
| 4,284
|
import json
import random
from datetime import date, datetime, time, timedelta
import requests
def get_config(config_category, config_name):
with open('/algernon', 'r') as secret_file:
json_string = secret_file.read()
config_dict = json.loads(json_string)
return config_dict[config_category][config_name]
def get_secret(secret_category, secret_name):
try:
with open('/run/secrets/alg_secrets', 'r') as secret_file:
json_string = secret_file.read()
try:
secret_dict = json.loads(json_string)
return secret_dict[secret_category][secret_name]
except ValueError:
return json_string
except IOError:
return None
def get_audit_dates():
today = date.today()
last_monday = today + timedelta(days=-today.weekday())
next_sunday = last_monday + timedelta(days=6)
return {'start_date': last_monday, 'end_date': next_sunday}
def select_unique_values(target_dict, target_field):
unique_values = set()
for key in target_dict:
try:
target_value = target_dict[key][target_field]
unique_values.add(target_value)
except KeyError:
print('specified field not in top level of dictionary')
return unique_values
return unique_values
def select_random_service(possible_targets):
target_id = random.choice(list(possible_targets.keys()))
client_id = possible_targets[target_id]['client_id']
return {'clientvisit_id': target_id, "client_id": client_id}
def py_date_to_credible_date(py_date):
credible_date = py_date.strftime('%m/%d/%Y')
return credible_date
def humanize_red_x_package(red_x_package):
human_readable = ''
for red_x_reason in red_x_package:
if red_x_reason is 'hotwords':
human_readable += ' identified clinical hotwords: '
for field in red_x_package[red_x_reason]:
human_readable += ' in ' + field + '('
for hotword in red_x_package[red_x_reason][field]:
human_readable += hotword + ', '
human_readable = human_readable[0:len(human_readable) - 2]
human_readable += ')'
human_readable += '\n'
elif red_x_reason is 'clones':
human_readable += ' note contains cloned elements in the following fields: '
for match_type in red_x_package[red_x_reason]:
human_readable += match_type + ' ('
clone_ids = red_x_package[red_x_reason][match_type]
if len(clone_ids) > 3:
human_readable += str(clone_ids[0:3]) + ' + ' + str(len(clone_ids) - 3) + ' others), '
else:
for clone_id in clone_ids:
human_readable += (str(clone_id) + ', ')
human_readable = human_readable[0:len(human_readable) - 2]
human_readable += ')'
human_readable = human_readable[0:len(human_readable) - 2]
human_readable += '\n'
elif red_x_reason is 'ghosts':
human_readable += ' potential ghost note: '
for ghost_trigger in red_x_package[red_x_reason]:
if ghost_trigger is 'character_results':
human_readable += ' very few characters in '
for field_name in red_x_package[red_x_reason][ghost_trigger]:
human_readable += (field_name + ', ')
human_readable = human_readable[0:len(human_readable) - 2]
elif ghost_trigger is 'word_results':
human_readable += ' very few words in '
for field_name in red_x_package[red_x_reason][ghost_trigger]:
human_readable += (field_name + ', ')
human_readable = human_readable[0:len(human_readable) - 2]
elif ghost_trigger is 'spelling':
human_readable += ' lots of non-standard words in '
for field_name in red_x_package[red_x_reason][ghost_trigger]:
human_readable += (field_name + ', ')
human_readable = human_readable[0:len(human_readable) - 2]
human_readable += '\n'
elif red_x_reason is 'tx_plan':
if 'expired' in red_x_package[red_x_reason].keys():
human_readable += ' tx plan is expired '
else:
human_readable += ' something off with tx plan '
human_readable += '\n'
else:
human_readable += str(red_x_reason)
return human_readable.rstrip()
def requests_cookie_jar_to_json(cookie_jar):
cookie = cookie_jar._cookies['.crediblebh.com']['/']['cbh']
tidy_cookie = {
'domain': cookie.domain,
'name': cookie.name,
'path': cookie.path,
'value': cookie.value,
'version': cookie.version
}
return json.dumps(tidy_cookie)
def json_to_requests_cookie_jar(voorhees_cookies):
cookie_dict = json.loads(voorhees_cookies)
jar = requests.cookies.RequestsCookieJar()
jar.set(
domain='MBI',
name=cookie_dict['name'],
path=cookie_dict['path'],
value=cookie_dict['value'],
version=cookie_dict['version']
)
return jar
def credible_backend_datetime_to_credible_frontend_datetime(backend_datetime):
py_datetime = credible_datetime_to_py_date(backend_datetime)
return py_datetime_to_credible_human_datetime(py_datetime)
def py_timestamp_to_mysql_timestamp(py_timestamp):
mysql_timestamp = datetime.fromtimestamp(py_timestamp).strftime('%Y-%m-%d %H:%M:%S')
return mysql_timestamp
def py_datetime_to_credible_human_datetime(py_datetime):
return py_datetime.strftime('%m/%d/%y %I:%M:%S %p')
def credible_datetime_to_iso_date(credible_datetime):
py_date = credible_datetime_to_py_date(credible_datetime)
return py_date.strftime('%Y%m%d')
def credible_datetime_to_py_date(credible_datetime):
py_datetime = credible_datetime_to_py_datetime(credible_datetime)
return py_datetime.date()
def credible_datetime_to_py_datetime(credible_datetime):
credible_datetime = credible_datetime.replace('-04:00', '')
py_datetime = datetime.strptime(credible_datetime, '%Y-%m-%dT%H:%M:%S')
return py_datetime
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date, time)):
serial = obj.isoformat()
return serial
raise TypeError("Type %s not serializable" % type(obj))
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def timedelta_to_time(target):
return (datetime.min + target).time()
def json_time_to_credible_time(json_time):
py_time = datetime.strptime(json_time, '%H:%M:%S').time()
credible_time = py_time.strftime('%I:%M %p')
return credible_time
def json_date_to_credible_date(json_date):
py_date = datetime.strptime(json_date, '%Y-%m-%d')
credible_date = py_date.strftime('%m/%d/%y')
return credible_date
def calculate_missing_punch(time_string, hours):
py_time = datetime.strptime(time_string, '%H:%M:%S')
advanced_date = py_time + timedelta(hours=hours)
advanced_time = advanced_date.time()
credible_time = advanced_time.strftime('%I:%M %p')
return credible_time
|
jkcubeta/algernon
|
src/overwatch/src/overwatch/alg_tasks/alg_utils.py
|
Python
|
apache-2.0
| 7,463
|
import numpy as np
import rospy
from std_msgs.msg import Float32
class FloatHandler(object):
"""
Handler for ROS topics of type: std_msgs/Float32
Args:
topic_name: Name of ROS topic to be subscribed
buffer_size: Variable buffer, depend on frame rate of topic, default: 500
queue_size: Subscriber queue_size
"""
def __init__(self, topic_name, buffer_size=500, queue_size=10):
self.data_msg = Float32()
self.data = 0.0
self.topic_float = topic_name
self.queue_size = queue_size
self.buffer_size = buffer_size
self.counter = 0
self.buffer = np.zeros([self.buffer_size, 1])
self.sub = rospy.Subscriber(self.topic_float, Float32, self.callback, queue_size=self.queue_size)
def callback(self, msg):
self.data_msg = msg
if self.counter < self.buffer_size:
self.buffer[self.counter] = [self.data_msg.data]
else:
rospy.loginfo("FloatHandler for: " + self.topic_float + " has reach buffer size.")
self.counter += 1
def get_value(self):
if self.counter > 0:
self.data = np.sum(self.buffer[:, 0]) / self.counter
self.buffer = np.zeros([self.buffer_size, 1])
self.counter = 0
return self.data
|
francisc0garcia/autonomous_bicycle
|
src/classes/handlers/FloatHandler.py
|
Python
|
apache-2.0
| 1,308
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions
from ironicclient.tests.functional.osc.v1 import base
@ddt.ddt
class BaremetalAllocationTests(base.TestCase):
"""Functional tests for baremetal allocation commands."""
def test_create(self):
"""Check baremetal allocation create command.
Test steps:
1) Create baremetal allocation in setUp.
2) Check that allocation successfully created.
"""
allocation_info = self.allocation_create()
self.assertTrue(allocation_info['resource_class'])
self.assertEqual(allocation_info['state'], 'allocating')
allocation_list = self.allocation_list()
self.assertIn(allocation_info['uuid'],
[x['UUID'] for x in allocation_list])
def test_create_name_uuid(self):
"""Check baremetal allocation create command with name and UUID.
Test steps:
1) Create baremetal allocation with specified name and UUID.
2) Check that allocation successfully created.
"""
uuid = data_utils.rand_uuid()
name = data_utils.rand_name('baremetal-allocation')
allocation_info = self.allocation_create(
params='--uuid {0} --name {1}'.format(uuid, name))
self.assertEqual(allocation_info['uuid'], uuid)
self.assertEqual(allocation_info['name'], name)
self.assertTrue(allocation_info['resource_class'])
self.assertEqual(allocation_info['state'], 'allocating')
allocation_list = self.allocation_list()
self.assertIn(uuid, [x['UUID'] for x in allocation_list])
self.assertIn(name, [x['Name'] for x in allocation_list])
def test_create_traits(self):
"""Check baremetal allocation create command with traits.
Test steps:
1) Create baremetal allocation with specified traits.
2) Check that allocation successfully created.
"""
allocation_info = self.allocation_create(
params='--trait CUSTOM_1 --trait CUSTOM_2')
self.assertTrue(allocation_info['resource_class'])
self.assertEqual(allocation_info['state'], 'allocating')
self.assertIn('CUSTOM_1', allocation_info['traits'])
self.assertIn('CUSTOM_2', allocation_info['traits'])
def test_create_candidate_nodes(self):
"""Check baremetal allocation create command with candidate nodes.
Test steps:
1) Create two nodes.
2) Create baremetal allocation with specified traits.
3) Check that allocation successfully created.
"""
name = data_utils.rand_name('baremetal-allocation')
node1 = self.node_create(name=name)
node2 = self.node_create()
allocation_info = self.allocation_create(
params='--candidate-node {0} --candidate-node {1}'
.format(node1['name'], node2['uuid']))
self.assertEqual(allocation_info['state'], 'allocating')
# NOTE(dtantsur): names are converted to uuids in the API
self.assertIn(node1['uuid'], allocation_info['candidate_nodes'])
self.assertIn(node2['uuid'], allocation_info['candidate_nodes'])
@ddt.data('name', 'uuid')
def test_delete(self, key):
"""Check baremetal allocation delete command with name/UUID argument.
Test steps:
1) Create baremetal allocation.
2) Delete baremetal allocation by name/UUID.
3) Check that allocation deleted successfully.
"""
name = data_utils.rand_name('baremetal-allocation')
allocation = self.allocation_create(params='--name {}'.format(name))
output = self.allocation_delete(allocation[key])
self.assertIn('Deleted allocation {0}'.format(allocation[key]), output)
allocation_list = self.allocation_list()
self.assertNotIn(allocation['name'],
[x['Name'] for x in allocation_list])
self.assertNotIn(allocation['uuid'],
[x['UUID'] for x in allocation_list])
@ddt.data('name', 'uuid')
def test_show(self, key):
"""Check baremetal allocation show command with name and UUID.
Test steps:
1) Create baremetal allocation.
2) Show baremetal allocation calling it with name and UUID arguments.
3) Check name, uuid and resource_class in allocation show output.
"""
name = data_utils.rand_name('baremetal-allocation')
allocation = self.allocation_create(params='--name {}'.format(name))
result = self.allocation_show(allocation[key],
['name', 'uuid', 'resource_class'])
self.assertEqual(allocation['name'], result['name'])
self.assertEqual(allocation['uuid'], result['uuid'])
self.assertTrue(result['resource_class'])
self.assertNotIn('state', result)
@ddt.data(
('--uuid', '', 'expected one argument'),
('--uuid', '!@#$^*&%^', 'Expected UUID for uuid'),
('--extra', '', 'expected one argument'),
('--name', '', 'expected one argument'),
('--name', 'not/a/name', 'invalid name'),
('--resource-class', '', 'expected one argument'),
('--resource-class', 'x' * 81, 'is too long'),
('--trait', '', 'expected one argument'),
('--trait', 'foo', 'does not match'),
('--candidate-node', '', 'expected one argument'),
('--candidate-node', 'banana?', 'Nodes cannot be found'),
('--wait', 'meow', 'invalid int value'))
@ddt.unpack
def test_create_negative(self, argument, value, ex_text):
"""Check errors on invalid input parameters."""
base_cmd = 'baremetal allocation create'
if argument != '--resource-class':
base_cmd += ' --resource-class allocation-test'
command = self.construct_cmd(base_cmd, argument, value)
self.assertRaisesRegex(exceptions.CommandFailed, ex_text,
self.openstack, command)
def test_create_no_resource_class(self):
"""Check errors on missing resource class."""
base_cmd = 'baremetal allocation create'
self.assertRaisesRegex(exceptions.CommandFailed,
'--resource-class',
self.openstack, base_cmd)
def test_set_unset(self):
"""Check baremetal allocation set and unset commands.
Test steps:
1) Create baremetal allocation in setUp.
2) Set extra data for allocation.
3) Check that baremetal allocation extra data was set.
4) Unset extra data for allocation.
5) Check that baremetal allocation extra data was unset.
"""
name = data_utils.rand_name('baremetal-allocation')
allocation = self.allocation_create(params='--name {}'.format(name))
extra_key = 'ext'
extra_value = 'testdata'
self.openstack(
'baremetal allocation set --extra {0}={1} {2}'
.format(extra_key, extra_value, allocation['uuid']))
show_prop = self.allocation_show(allocation['uuid'],
fields=['extra'])
self.assertEqual(extra_value, show_prop['extra'][extra_key])
self.openstack('baremetal allocation unset --extra {0} {1}'
.format(extra_key, allocation['uuid']))
show_prop = self.allocation_show(allocation['uuid'],
fields=['extra'])
self.assertNotIn(extra_key, show_prop['extra'])
|
openstack/python-ironicclient
|
ironicclient/tests/functional/osc/v1/test_baremetal_allocation.py
|
Python
|
apache-2.0
| 8,146
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################################
#
# Handles VM images (e.g., shortnames) for simplifying config specification.
import json
import os
class InvalidImageShortName(Exception):
def __init__(self, value):
self.__value = value
def __str__(self):
return repr(self.__value)
PROJECT_IMAGES = None
if PROJECT_IMAGES is None:
vm_images_path = os.path.join(os.path.dirname(
__file__), 'cache', 'vm_images.json')
with open(vm_images_path, 'r') as vm_images_fd:
PROJECT_IMAGES = json.loads(vm_images_fd.read())
def ImageShortNameToUrl(image):
image_url_fmt = 'https://www.googleapis.com/compute/v1/projects/%(project)s/global/images/%(image)s'
for (project, data) in PROJECT_IMAGES.items():
if image in data['images']:
return image_url_fmt % {
'project': project,
'image': image,
}
elif ('pseudo' in data) and (image in data['pseudo']):
return image_url_fmt % {
'project': project,
'image': data['pseudo'][image],
}
raise InvalidImageShortName('Unknown short image name: %s' % image)
|
mbrukman/cloud-launcher
|
src/vm_images.py
|
Python
|
apache-2.0
| 1,790
|
"""Represent a device to a worker."""
class DeviceRep:
"""Helper class to represent a device to a worker."""
HEARTBEAT_LIVES = 3
def __init__(self, device_id):
"""Create a DeviceRep object."""
self.device_id = device_id
self.lives = self.HEARTBEAT_LIVES
def heartbeat(self):
"""Decrease live count by ."""
if self.lives > 0:
self.lives -= 1
def on_message_received(self):
"""Set live count to max when message received."""
self.lives = self.HEARTBEAT_LIVES
def is_alive(self):
"""Determine if the device is still alive."""
return self.lives > 0
|
nstoik/farm_monitor
|
server/fm_server/device/device_representation.py
|
Python
|
apache-2.0
| 660
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 23 11:51:41 2016
数据库基本操作,从配置文件中读取基本配置信息
@author: lywen
"""
import traceback
from config import get_mongo_config ##database config
class database(object):
"""
数据库操作抽象类
"""
def __init__(self,db,user,password,host,port):
self.db = db## 数据库实例
self.user = user ## 用户
self.password = password ##密码
self.host = host ##数据库IP
self.port = port ## 数据库端口
def connect(self):
"""
数据库连接
"""
pass
def update(self,sql):
"""
数据update更新操作
"""
pass
def insert(self,sql):
"""
数据插入操作
"""
pass
def create(self,tablename):
"""
创建表
"""
pass
def select(self,sql):
"""
数据查询
"""
pass
def run(self):
"""
运行
"""
pass
def close(self):
"""
关闭连接
"""
pass
class mongodb(database):
"""
mongo数据库相关操作
"""
def __init__(self):
user,password,host,port,db = get_mongo_config()
database.__init__(self,db,user,password,host,port)##继承父类的__init__方法
self.connect()
def connect(self):
"""
连接mongo数据库
"""
from pymongo import MongoClient
try:
self.Client = MongoClient(host=self.host, port=self.port)
db = self.Client[self.db]
if self.host !='127.0.0.1':
db.authenticate(name=self.user,password=self.password)
self.__conn = db
except:
traceback.print_exc()
#logs('database','mongodb','connect',traceback.format_exc())
self.__conn = None
def select(self,collectname,where,kwargs):
"""
collectname:数据库文档名称
where:{'a':1}
"""
collection = self.__conn[collectname]
data = collection.find(where,kwargs)
return [lst for lst in data]
def group(self,collectname,key, condition, initial, reduce, finalize=None, **kwargs):
"""
group function
"""
collection = self.__conn[collectname]
try:
return collection.group(key, condition, initial, reduce)
except:
traceback.print_exc()
def close(self):
"""
关闭连接
"""
self.__conn.client.close()
|
lywen52/quantproject
|
database/mongodb.py
|
Python
|
apache-2.0
| 2,759
|
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
import json
import os
from glob import glob
from os.path import join
from textwrap import dedent
from lru import LRU
import bcolz
from bcolz import ctable
from intervaltree import IntervalTree
import logbook
import numpy as np
import pandas as pd
from pandas import HDFStore
import tables
from six import with_metaclass
from toolz import keymap, valmap
from catalyst.data._minute_bar_internal import (
minute_value,
find_position_of_minute,
find_last_traded_position_internal
)
from catalyst.gens.sim_engine import NANOS_IN_MINUTE
from catalyst.data.bar_reader import BarReader, NoDataOnDate
from catalyst.data.us_equity_pricing import check_uint64_safe
from catalyst.utils.calendars import get_calendar
from catalyst.utils.cli import maybe_show_progress
from catalyst.utils.memoize import lazyval
from catalyst.constants import LOG_LEVEL
logger = logbook.Logger('MinuteBars', level=LOG_LEVEL)
US_EQUITIES_MINUTES_PER_DAY = 390
FUTURES_MINUTES_PER_DAY = 1440
DEFAULT_EXPECTEDLEN = US_EQUITIES_MINUTES_PER_DAY * 252 * 15
OHLC_RATIO = 100000000
class BcolzMinuteOverlappingData(Exception):
pass
class BcolzMinuteWriterColumnMismatch(Exception):
pass
class MinuteBarReader(BarReader):
@property
def data_frequency(self):
return "minute"
def _calc_minute_index(market_opens, minutes_per_day):
minutes = np.zeros(len(market_opens) * minutes_per_day,
dtype='datetime64[ns]')
deltas = np.arange(0, minutes_per_day, dtype='timedelta64[m]')
for i, market_open in enumerate(market_opens):
start = market_open.asm8
minute_values = start + deltas
start_ix = minutes_per_day * i
end_ix = start_ix + minutes_per_day
minutes[start_ix:end_ix] = minute_values
return pd.to_datetime(minutes, utc=True, box=True)
def _sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, '06')
return os.path.join(
# subdir 1 00/XX
padded_sid[0:2],
# subdir 2 XX/00
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
)
def convert_cols(cols, scale_factor, sid, invalid_data_behavior):
"""Adapt OHLCV columns into uint64 columns.
Parameters
----------
cols : dict
A dict mapping each column name (open, high, low, close, volume)
to a float column to convert to uint64.
scale_factor : int
Factor to use to scale float values before converting to uint64.
sid : int
Sid of the relevant asset, for logging.
invalid_data_behavior : str
Specifies behavior when data cannot be converted to uint32.
If 'raise', raises an exception.
If 'warn', logs a warning and filters out incompatible values.
If 'ignore', silently filters out incompatible values.
"""
scaled_opens = np.nan_to_num(cols['open']) * scale_factor
scaled_highs = np.nan_to_num(cols['high']) * scale_factor
scaled_lows = np.nan_to_num(cols['low']) * scale_factor
scaled_closes = np.nan_to_num(cols['close']) * scale_factor
scaled_volumes = np.nan_to_num(cols['volume']) * scale_factor
exclude_mask = np.zeros_like(scaled_opens, dtype=bool)
for col_name, scaled_col in [
('open', scaled_opens),
('high', scaled_highs),
('low', scaled_lows),
('close', scaled_closes),
('volume', scaled_volumes),
]:
max_val = scaled_col.max()
try:
check_uint64_safe(max_val, col_name)
except ValueError:
if invalid_data_behavior == 'raise':
raise
if invalid_data_behavior == 'warn':
logger.warn(
'Values for sid={}, col={} contain some too large for '
'uint64 (max={}), filtering them out',
sid, col_name, max_val,
)
# We want to exclude all rows that have an unsafe value in
# this column.
exclude_mask &= (scaled_col >= np.iinfo(np.uint64).max)
# Convert all cols to uint32.
opens = scaled_opens.astype(np.uint64)
highs = scaled_highs.astype(np.uint64)
lows = scaled_lows.astype(np.uint64)
closes = scaled_closes.astype(np.uint64)
volumes = scaled_volumes.astype(np.uint64)
# Exclude rows with unsafe values by setting to zero.
opens[exclude_mask] = 0
highs[exclude_mask] = 0
lows[exclude_mask] = 0
closes[exclude_mask] = 0
volumes[exclude_mask] = 0
return opens, highs, lows, closes, volumes
class BcolzMinuteBarMetadata(object):
"""
Parameters
----------
ohlc_ratio : int
The factor by which the pricing data is multiplied so that the
float data can be stored as an integer.
calendar : catalyst.utils.calendars.trading_calendar.TradingCalendar
The TradingCalendar on which the minute bars are based.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
minutes_per_day : int
The number of minutes per each period.
"""
FORMAT_VERSION = 3
METADATA_FILENAME = 'metadata.json'
@classmethod
def metadata_path(cls, rootdir):
return os.path.join(rootdir, cls.METADATA_FILENAME)
@classmethod
def read(cls, rootdir):
path = cls.metadata_path(rootdir)
with open(path) as fp:
raw_data = json.load(fp)
try:
version = raw_data['version']
except KeyError:
# Version was first written with version 1, assume 0,
# if version does not match.
version = 0
default_ohlc_ratio = raw_data['ohlc_ratio']
if version >= 1:
minutes_per_day = raw_data['minutes_per_day']
else:
# version 0 always assumed US equities.
minutes_per_day = US_EQUITIES_MINUTES_PER_DAY
if version >= 2:
calendar = get_calendar(raw_data['calendar_name'])
start_session = pd.Timestamp(
raw_data['start_session'], tz='UTC')
end_session = pd.Timestamp(raw_data['end_session'], tz='UTC')
else:
# No calendar info included in older versions, so
# default to NYSE.
calendar = get_calendar('NYSE')
start_session = pd.Timestamp(
raw_data['first_trading_day'], tz='UTC')
end_session = calendar.minute_to_session_label(
pd.Timestamp(
raw_data['market_closes'][-1], unit='m', tz='UTC')
)
if version >= 3:
ohlc_ratios_per_sid = raw_data['ohlc_ratios_per_sid']
if ohlc_ratios_per_sid is not None:
ohlc_ratios_per_sid = keymap(int, ohlc_ratios_per_sid)
else:
ohlc_ratios_per_sid = None
return cls(
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=version,
)
def __init__(
self,
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=FORMAT_VERSION,
):
self.calendar = calendar
self.start_session = start_session
self.end_session = end_session
self.default_ohlc_ratio = default_ohlc_ratio
self.ohlc_ratios_per_sid = ohlc_ratios_per_sid
self.minutes_per_day = minutes_per_day
self.version = version
def write(self, rootdir):
"""
Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
version : int
The value of FORMAT_VERSION of this class.
ohlc_ratio : int
The default ratio by which to multiply the pricing data to
convert the floats from floats to an integer to fit within
the np.uint64. If ohlc_ratios_per_sid is None or does not
contain a mapping for a given sid, this ratio is used.
ohlc_ratios_per_sid : dict
A dict mapping each sid in the output to the factor by
which the pricing data is multiplied so that the float data
can be stored as an integer.
minutes_per_day : int
The number of minutes per each period.
calendar_name : str
The name of the TradingCalendar on which the minute bars are
based.
start_session : datetime
'YYYY-MM-DD' formatted representation of the first trading
session in the data set.
end_session : datetime
'YYYY-MM-DD' formatted representation of the last trading
session in the data set.
Deprecated, but included for backwards compatibility:
first_trading_day : string
'YYYY-MM-DD' formatted representation of the first trading day
available in the dataset.
market_opens : list
List of int64 values representing UTC market opens as
minutes since epoch.
market_closes : list
List of int64 values representing UTC market closes as
minutes since epoch.
"""
calendar = self.calendar
slicer = calendar.schedule.index.slice_indexer(
self.start_session,
self.end_session,
)
schedule = calendar.schedule[slicer]
market_opens = schedule.market_open
market_closes = schedule.market_close
metadata = {
'version': self.version,
'ohlc_ratio': self.default_ohlc_ratio,
'ohlc_ratios_per_sid': self.ohlc_ratios_per_sid,
'minutes_per_day': self.minutes_per_day,
'calendar_name': self.calendar.name,
'start_session': str(self.start_session.date()),
'end_session': str(self.end_session.date()),
# Write these values for backwards compatibility
'first_trading_day': str(self.start_session.date()),
'market_opens': (market_opens.values.astype('datetime64[m]').
astype(np.int64).tolist()),
'market_closes': (market_closes.values.astype('datetime64[m]').
astype(np.int64).tolist()),
}
with open(self.metadata_path(rootdir), 'w+') as fp:
json.dump(metadata, fp)
class BcolzMinuteBarWriter(object):
"""
Class capable of writing minute OHLCV data to disk into bcolz format.
Parameters
----------
rootdir : string
Path to the root directory into which to write the metadata and
bcolz subdirectories.
calendar : catalyst.utils.calendars.trading_calendar.TradingCalendar
The trading calendar on which to base the minute bars. Used to
get the market opens used as a starting point for each periodic
span of minutes in the index, and the market closes that
correspond with the market opens.
minutes_per_day : int
The number of minutes per each period. Defaults to 390, the mode
of minutes in NYSE trading days.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
default_ohlc_ratio : int, optional
The default ratio by which to multiply the pricing data to
convert from floats to integers that fit within np.uint64. If
ohlc_ratios_per_sid is None or does not contain a mapping for a
given sid, this ratio is used. Default is OHLC_RATIO (10^8).
ohlc_ratios_per_sid : dict, optional
A dict mapping each sid in the output to the ratio by which to
multiply the pricing data to convert the floats from floats to
an integer to fit within the np.uint64.
expectedlen : int, optional
The expected length of the dataset, used when creating the initial
bcolz ctable.
If the expectedlen is not used, the chunksize and corresponding
compression ratios are not ideal.
Defaults to supporting 15 years of NYSE equity market data.
see: http://bcolz.blosc.org/opt-tips.html#informing-about-the-length-of-your-carrays # noqa
write_metadata : bool, optional
If True, writes the minute bar metadata (on init of the writer).
If False, no metadata is written (existing metadata is
retained). Default is True.
Notes
-----
Writes a bcolz directory for each individual sid, all contained within
a root directory which also contains metadata about the entire dataset.
Each individual asset's data is stored as a bcolz table with a column for
each pricing field: (open, high, low, close, volume)
The open, high, low, close and volume columns are integers which are 10^8 times
the quoted price, so that the data can represented and stored as an
np.uint64, supporting market prices quoted up to the 1/10^8-th place.
The 'index' for each individual asset are a repeating period of minutes of
length `minutes_per_day` starting from each market open.
The file format does not account for half-days.
e.g.:
2016-01-19 14:31
2016-01-19 14:32
...
2016-01-19 20:59
2016-01-19 21:00
2016-01-20 14:31
2016-01-20 14:32
...
2016-01-20 20:59
2016-01-20 21:00
All assets are written with a common 'index', sharing a common first
trading day. Assets that do not begin trading until after the first trading
day will have zeros for all pricing data up and until data is traded.
'index' is in quotations, because bcolz does not provide an index. The
format allows index-like behavior by writing each minute's data into the
corresponding position of the enumeration of the aforementioned datetime
index.
The datetimes which correspond to each position are written in the metadata
as integer nanoseconds since the epoch into the `minute_index` key.
See Also
--------
catalyst.data.minute_bars.BcolzMinuteBarReader
"""
COL_NAMES = ('open', 'high', 'low', 'close', 'volume')
def __init__(self,
rootdir,
calendar,
start_session,
end_session,
minutes_per_day,
default_ohlc_ratio=OHLC_RATIO,
ohlc_ratios_per_sid=None,
expectedlen=DEFAULT_EXPECTEDLEN,
write_metadata=True):
self._rootdir = rootdir
self._start_session = start_session
self._end_session = end_session
self._calendar = calendar
slicer = (
calendar.schedule.index.slice_indexer(start_session, end_session))
self._schedule = calendar.schedule[slicer]
self._session_labels = self._schedule.index
self._minutes_per_day = minutes_per_day
self._expectedlen = expectedlen
self._default_ohlc_ratio = default_ohlc_ratio
self._ohlc_ratios_per_sid = ohlc_ratios_per_sid
self._minute_index = _calc_minute_index(
self._schedule.market_open, self._minutes_per_day)
if write_metadata:
metadata = BcolzMinuteBarMetadata(
self._default_ohlc_ratio,
self._ohlc_ratios_per_sid,
self._calendar,
self._start_session,
self._end_session,
self._minutes_per_day,
)
metadata.write(self._rootdir)
@classmethod
def open(cls, rootdir, end_session=None):
"""
Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``.
"""
metadata = BcolzMinuteBarMetadata.read(rootdir)
return BcolzMinuteBarWriter(
rootdir,
metadata.calendar,
metadata.start_session,
end_session if end_session is not None else metadata.end_session,
metadata.minutes_per_day,
metadata.default_ohlc_ratio,
metadata.ohlc_ratios_per_sid,
write_metadata=end_session is not None
)
@property
def first_trading_day(self):
return self._start_session
def ohlc_ratio_for_sid(self, sid):
if self._ohlc_ratios_per_sid is not None:
try:
return self._ohlc_ratios_per_sid[sid]
except KeyError:
pass
# If no ohlc_ratios_per_sid dict is passed, or if the specified
# sid is not in the dict, fallback to the general ohlc_ratio.
return self._default_ohlc_ratio
def sidpath(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
Full path to the bcolz rootdir for the given sid.
"""
sid_subdir = _sid_subdir_path(sid)
return join(self._rootdir, sid_subdir)
def last_date_in_output_for_sid(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : pd.Timestamp
The midnight of the last date written in to the output for the
given sid.
"""
sizes_path = "{0}/close/meta/sizes".format(self.sidpath(sid))
if not os.path.exists(sizes_path):
return pd.NaT
with open(sizes_path, mode='r') as f:
sizes = f.read()
data = json.loads(sizes)
# use integer division so that the result is an int
# for pandas index later https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/base.py#L247 # noqa
num_days = data['shape'][0] // self._minutes_per_day
if num_days == 0:
# empty container
return pd.NaT
return self._session_labels[num_days - 1]
def _init_ctable(self, path):
"""
Create empty ctable for given path.
Parameters
----------
path : string
The path to rootdir of the new ctable.
"""
# Only create the containing subdir on creation.
# This is not to be confused with the `.bcolz` directory, but is the
# directory up one level from the `.bcolz` directories.
sid_containing_dirname = os.path.dirname(path)
if not os.path.exists(sid_containing_dirname):
# Other sids may have already created the containing directory.
os.makedirs(sid_containing_dirname)
initial_array = np.empty(0, np.uint64)
table = ctable(
rootdir=path,
columns=[
initial_array,
initial_array,
initial_array,
initial_array,
initial_array,
],
names=[
'open',
'high',
'low',
'close',
'volume'
],
expectedlen=self._expectedlen,
mode='w',
)
table.flush()
return table
def _ensure_ctable(self, sid):
"""Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid)
if not os.path.exists(sidpath):
return self._init_ctable(sidpath)
return bcolz.ctable(rootdir=sidpath, mode='a')
def _zerofill(self, table, numdays):
# Compute the number of minutes to be filled, accounting for the
# possibility of a partial day's worth of minutes existing for
# the previous day.
minute_offset = len(table) % self._minutes_per_day
num_to_prepend = numdays * self._minutes_per_day - minute_offset
prepend_array = np.zeros(num_to_prepend, np.uint64)
# Fill all OHLCV with zeros.
table.append([prepend_array] * 5)
table.flush()
def pad(self, sid, date):
"""
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters
----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
"""
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
tds = self._session_labels
if date <= last_date or date < tds[0]:
# No need to pad.
return
if last_date == pd.NaT:
# If there is no data, determine how many days to add so that
# desired days are written to the correct slots.
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[tds.slice_indexer(
start=last_date + tds.freq,
end=date)]
self._zerofill(table, len(days_to_zerofill))
new_last_date = self.last_date_in_output_for_sid(sid)
assert new_last_date == date, "new_last_date={0} != date={1}".format(
new_last_date, date)
def set_sid_attrs(self, sid, **kwargs):
"""Write all the supplied kwargs as attributes of the sid's file.
"""
table = self._ensure_ctable(sid)
for k, v in kwargs.items():
table.attrs[k] = v
def write(self, data, show_progress=False, invalid_data_behavior='warn'):
"""Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
"""
ctx = maybe_show_progress(
data,
show_progress=show_progress,
item_show_func=lambda e: e if e is None else str(e[0]),
label="Merging minute equity files:",
)
write_sid = self.write_sid
with ctx as it:
for e in it:
write_sid(*e, invalid_data_behavior=invalid_data_behavior)
def write_sid(self, sid, df, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifer for the data being written.
df : pd.DataFrame
DataFrame of market data with the following characteristics.
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
"""
cols = {
'open': df.open.values,
'high': df.high.values,
'low': df.low.values,
'close': df.close.values,
'volume': df.volume.values,
}
dts = df.index.values
# Call internal method, since DataFrame has already ensured matching
# index and value lengths.
self._write_cols(sid, dts, cols, invalid_data_behavior)
def write_cols(self, sid, dts, cols, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES):
raise BcolzMinuteWriterColumnMismatch(
"Length of dts={0} should match cols: {1}".format(
len(dts),
" ".join("{0}={1}".format(name, len(cols[name]))
for name in self.COL_NAMES)))
self._write_cols(sid, dts, cols, invalid_data_behavior)
def _write_cols(self, sid, dts, cols, invalid_data_behavior):
"""
Internal method for `write_cols` and `write`.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
table = self._ensure_ctable(sid)
tds = self._session_labels
input_first_day = self._calendar.minute_to_session_label(
pd.Timestamp(dts[0]), direction='previous')
last_date = self.last_date_in_output_for_sid(sid)
day_before_input = input_first_day - tds.freq
self.pad(sid, day_before_input)
table = self._ensure_ctable(sid)
# Get the number of minutes already recorded in this sid's ctable
num_rec_mins = table.size
all_minutes = self._minute_index
# Get the latest minute we wish to write to the ctable
last_minute_to_write = pd.Timestamp(dts[-1], tz='UTC')
# In the event that we've already written some minutely data to the
# ctable, guard against overwriting that data.
if num_rec_mins > 0:
last_recorded_minute = all_minutes[num_rec_mins - 1]
if last_minute_to_write <= last_recorded_minute:
raise BcolzMinuteOverlappingData(dedent("""
Data with last_date={0} already includes input start={1} for
sid={2}""".strip()).format(last_date, input_first_day, sid))
latest_min_count = all_minutes.get_loc(last_minute_to_write)
# Get all the minutes we wish to write (all market minutes after the
# latest currently written, up to and including last_minute_to_write)
all_minutes_in_window = all_minutes[num_rec_mins:latest_min_count + 1]
minutes_count = all_minutes_in_window.size
open_col = np.zeros(minutes_count, dtype=np.uint64)
high_col = np.zeros(minutes_count, dtype=np.uint64)
low_col = np.zeros(minutes_count, dtype=np.uint64)
close_col = np.zeros(minutes_count, dtype=np.uint64)
vol_col = np.zeros(minutes_count, dtype=np.uint64)
dt_ixs = np.searchsorted(all_minutes_in_window.values,
dts.astype('datetime64[ns]'))
ohlc_ratio = self.ohlc_ratio_for_sid(sid)
(
open_col[dt_ixs],
high_col[dt_ixs],
low_col[dt_ixs],
close_col[dt_ixs],
vol_col[dt_ixs],
) = convert_cols(cols, ohlc_ratio, sid, invalid_data_behavior)
table.append([
open_col,
high_col,
low_col,
close_col,
vol_col
])
table.flush()
def data_len_for_day(self, day):
"""
Return the number of data points up to and including the
provided day.
"""
day_ix = self._session_labels.get_loc(day)
# Add one to the 0-indexed day_ix to get the number of days.
num_days = day_ix + 1
return num_days * self._minutes_per_day
def truncate(self, date):
"""Truncate data beyond this date in all ctables."""
truncate_slice_end = self.data_len_for_day(date)
glob_path = os.path.join(self._rootdir, "*", "*", "*.bcolz")
sid_paths = sorted(glob(glob_path))
for sid_path in sid_paths:
file_name = os.path.basename(sid_path)
try:
table = bcolz.open(rootdir=sid_path)
except IOError:
continue
if table.len <= truncate_slice_end:
logger.info("{0} not past truncate date={1}.", file_name, date)
continue
logger.info(
"Truncating {0} at end_date={1}", file_name, date.date()
)
table.resize(truncate_slice_end)
# Update end session in metadata.
metadata = BcolzMinuteBarMetadata.read(self._rootdir)
metadata.end_session = date
metadata.write(self._rootdir)
class BcolzMinuteBarReader(MinuteBarReader):
"""
Reader for data written by BcolzMinuteBarWriter
Parameters
----------
rootdir : string
The root directory containing the metadata and asset bcolz
directories.
See Also
--------
catalyst.data.minute_bars.BcolzMinuteBarWriter
"""
FIELDS = ('open', 'high', 'low', 'close', 'volume')
def __init__(self, rootdir, sid_cache_size=1000):
self._rootdir = rootdir
metadata = self._get_metadata()
self._start_session = metadata.start_session
self._end_session = metadata.end_session
self.calendar = metadata.calendar
slicer = self.calendar.schedule.index.slice_indexer(
self._start_session,
self._end_session,
)
self._schedule = self.calendar.schedule[slicer]
self._market_opens = self._schedule.market_open
self._market_open_values = self._market_opens.values. \
astype('datetime64[m]').astype(np.int64)
self._market_closes = self._schedule.market_close
self._market_close_values = self._market_closes.values. \
astype('datetime64[m]').astype(np.int64)
self._default_ohlc_inverse = 1.0 / metadata.default_ohlc_ratio
ohlc_ratios = metadata.ohlc_ratios_per_sid
if ohlc_ratios:
self._ohlc_inverses_per_sid = (
valmap(lambda x: 1.0 / x, ohlc_ratios))
else:
self._ohlc_inverses_per_sid = None
self._minutes_per_day = metadata.minutes_per_day
self._carrays = {
field: LRU(sid_cache_size)
for field in self.FIELDS
}
self._last_get_value_dt_position = None
self._last_get_value_dt_value = None
# This is to avoid any bad data or other performance-killing situation
# where there a consecutive streak of 0 (no volume) starting at an
# asset's start date.
# if asset 1 started on 2015-01-03 but its first trade is 2015-01-06
# 10:31 AM US/Eastern, this dict would store {1: 23675971},
# which is the minute epoch of that date.
self._known_zero_volume_dict = {}
def _get_metadata(self):
return BcolzMinuteBarMetadata.read(self._rootdir)
@property
def trading_calendar(self):
return self.calendar
@lazyval
def last_available_dt(self):
_, close = self.calendar.open_and_close_for_session(self._end_session)
return close
@property
def first_trading_day(self):
return self._start_session
def _ohlc_ratio_inverse_for_sid(self, sid):
if self._ohlc_inverses_per_sid is not None:
try:
return self._ohlc_inverses_per_sid[sid]
except KeyError:
pass
# If we can not get a sid-specific OHLC inverse for this sid,
# fallback to the default.
return self._default_ohlc_inverse
def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
market_opens = self._market_opens.values.astype('datetime64[m]')
market_closes = self._market_closes.values.astype('datetime64[m]')
minutes_per_day = (market_closes - market_opens).astype(np.int64)
early_indices = np.where(
minutes_per_day != self._minutes_per_day - 1)[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [(market_open, early_close)
for market_open, early_close
in zip(early_opens, early_closes)]
return minutes
@lazyval
def _minute_exclusion_tree(self):
"""
Build an interval tree keyed by the start and end of each range
of positions should be dropped from windows. (These are the minutes
between an early close and the minute which would be the close based
on the regular period if there were no early close.)
The value of each node is the same start and end position stored as
a tuple.
The data is stored as such in support of a fast answer to the question,
does a given start and end position overlap any of the exclusion spans?
Returns
-------
IntervalTree containing nodes which represent the minutes to exclude
because of early closes.
"""
itree = IntervalTree()
for market_open, early_close in self._minutes_to_exclude():
start_pos = self._find_position_of_minute(early_close) + 1
end_pos = (
self._find_position_of_minute(market_open)
+
self._minutes_per_day
-
1
)
data = (start_pos, end_pos)
itree[start_pos:end_pos + 1] = data
return itree
def _exclusion_indices_for_range(self, start_idx, end_idx):
"""
Returns
-------
List of tuples of (start, stop) which represent the ranges of minutes
which should be excluded when a market minute window is requested.
"""
itree = self._minute_exclusion_tree
if itree.overlaps(start_idx, end_idx):
ranges = []
intervals = itree[start_idx:end_idx]
for interval in intervals:
ranges.append(interval.data)
return sorted(ranges)
else:
return None
def _get_carray_path(self, sid, field):
sid_subdir = _sid_subdir_path(sid)
# carrays are subdirectories of the sid's rootdir
return os.path.join(self._rootdir, sid_subdir, field)
def _open_minute_file(self, field, sid):
sid = int(sid)
try:
carray = self._carrays[field][sid]
except KeyError:
carray = self._carrays[field][sid] = \
bcolz.carray(rootdir=self._get_carray_path(sid, field),
mode='r')
return carray
def table_len(self, sid):
"""Returns the length of the underlying table for this sid."""
return len(self._open_minute_file('close', sid))
def get_sid_attr(self, sid, name):
sid_subdir = _sid_subdir_path(sid)
sid_path = os.path.join(self._rootdir, sid_subdir)
attrs = bcolz.attrs.attrs(sid_path, 'r')
try:
return attrs[name]
except KeyError:
return None
def get_value(self, sid, dt, field):
"""
Retrieve the pricing info for the given sid, dt, and field.
Parameters
----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns
-------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
"""
if self._last_get_value_dt_value == dt.value:
minute_pos = self._last_get_value_dt_position
else:
try:
minute_pos = self._find_position_of_minute(dt)
except ValueError:
raise NoDataOnDate()
self._last_get_value_dt_value = dt.value
self._last_get_value_dt_position = minute_pos
# a patch for requesting non existing time frames
# due to the different candles labeling + wrong start dates
if minute_pos < 0:
return np.nan
try:
value = self._open_minute_file(field, sid)[minute_pos]
except IndexError:
value = 0
if value == 0:
if field == 'volume':
return 0
else:
return np.nan
# if field != 'volume':
value *= self._ohlc_ratio_inverse_for_sid(sid)
return value
def get_last_traded_dt(self, asset, dt):
minute_pos = self._find_last_traded_position(asset, dt)
if minute_pos == -1:
return pd.NaT
return self._pos_to_minute(minute_pos)
def _find_last_traded_position(self, asset, dt):
volumes = self._open_minute_file('volume', asset)
start_date_minute = asset.start_date.value / NANOS_IN_MINUTE
dt_minute = dt.value / NANOS_IN_MINUTE
try:
# if we know of a dt before which this asset has no volume,
# don't look before that dt
earliest_dt_to_search = self._known_zero_volume_dict[asset.sid]
except KeyError:
earliest_dt_to_search = start_date_minute
if dt_minute < earliest_dt_to_search:
return -1
pos = find_last_traded_position_internal(
self._market_open_values,
self._market_close_values,
dt_minute,
earliest_dt_to_search,
volumes,
self._minutes_per_day,
)
if pos == -1:
# if we didn't find any volume before this dt, save it to avoid
# work in the future.
try:
self._known_zero_volume_dict[asset.sid] = max(
dt_minute,
self._known_zero_volume_dict[asset.sid]
)
except KeyError:
self._known_zero_volume_dict[asset.sid] = dt_minute
return pos
def _pos_to_minute(self, pos):
minute_epoch = minute_value(
self._market_open_values,
pos,
self._minutes_per_day
)
return pd.Timestamp(minute_epoch, tz='UTC', unit="m")
def _find_position_of_minute(self, minute_dt):
"""
Internal method that returns the position of the given minute in the
list of every trading minute since market open of the first trading
day. Adjusts non market minutes to the last close.
ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if
2002-01-02 is the first trading day of the dataset.
Parameters
----------
minute_dt: pd.Timestamp
The minute whose position should be calculated.
Returns
-------
int: The position of the given minute in the list of all trading
minutes since market open on the first trading day.
"""
return find_position_of_minute(
self._market_open_values,
self._market_close_values,
minute_dt.value / NANOS_IN_MINUTE,
self._minutes_per_day,
False,
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
start_idx = self._find_position_of_minute(start_dt)
end_idx = self._find_position_of_minute(end_dt)
num_minutes = (end_idx - start_idx + 1)
results = []
indices_to_exclude = self._exclusion_indices_for_range(
start_idx, end_idx)
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude:
length = excl_stop - excl_start + 1
num_minutes -= length
shape = num_minutes, len(sids)
for field in fields:
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.float64)
for i, sid in enumerate(sids):
carray = self._open_minute_file(field, sid)
values = carray[start_idx:end_idx + 1]
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude[::-1]:
excl_slice = np.s_[excl_start - start_idx:excl_stop
- start_idx + 1]
values = np.delete(values, excl_slice)
where = values != 0
# first slice down to len(where) because we might not have
# written data for all the minutes requested
# if field != 'volume':
out[:len(where), i][where] = (
values[where] * self._ohlc_ratio_inverse_for_sid(sid))
# else:
# out[:len(where), i][where] = values[where]
results.append(out)
return results
class MinuteBarUpdateReader(with_metaclass(ABCMeta, object)):
"""
Abstract base class for minute update readers.
"""
@abstractmethod
def read(self, dts, sids):
"""
Read and return pricing update data.
Parameters
----------
dts : DatetimeIndex
The minutes for which to read the pricing updates.
sids : iter[int]
The sids for which to read the pricing updates.
Returns
-------
data : iter[(int, DataFrame)]
Returns an iterable of ``sid`` to the corresponding OHLCV data.
"""
raise NotImplementedError()
class H5MinuteBarUpdateWriter(object):
"""
Writer for files containing minute bar updates for consumption by a writer
for a ``MinuteBarReader`` format.
Parameters
----------
path : str
The destination path.
complevel : int, optional
The HDF5 complevel, defaults to ``5``.
complib : str, optional
The HDF5 complib, defaults to ``zlib``.
"""
FORMAT_VERSION = 0
_COMPLEVEL = 5
_COMPLIB = 'zlib'
def __init__(self, path, complevel=None, complib=None):
self._complevel = complevel if complevel \
is not None else self._COMPLEVEL
self._complib = complib if complib is not None else self._COMPLIB
self._path = path
def write(self, frames):
"""
Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data.
"""
with HDFStore(self._path, 'w',
complevel=self._complevel, complib=self._complib) \
as store:
panel = pd.Panel.from_dict(dict(frames))
panel.to_hdf(store, 'updates')
with tables.open_file(self._path, mode='r+') as h5file:
h5file.set_node_attr('/', 'version', 0)
class H5MinuteBarUpdateReader(MinuteBarUpdateReader):
"""
Reader for minute bar updates stored in HDF5 files.
Parameters
----------
path : str
The path of the HDF5 file from which to source data.
"""
def __init__(self, path):
self._panel = pd.read_hdf(path)
def read(self, dts, sids):
panel = self._panel[sids, dts, :]
return panel.iteritems()
|
enigmampc/catalyst
|
catalyst/data/minute_bars.py
|
Python
|
apache-2.0
| 47,563
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from .__init__ import *
from .object import Object
from .control import Control
class Curve(Object):
"""This class represents a curve"""
__name__ = "Curve"
def __init__(self):
Object.__init__(self)
#self.handler.line = True
self.dash = list()
self.radius = 20
self.control = MANUAL
control = Control()
self.handler.control.append(control)
self.block = False
def post(self):
self.handler.control[NORTHWEST].x = self.x
self.handler.control[NORTHWEST].y = self.y
self.handler.control[SOUTHEAST].x = self.x + self.width
self.handler.control[SOUTHEAST].y = self.y + self.height
if not self.block:
self.handler.control[8].x = self.x + self.width
self.handler.control[8].y = self.y
self.handler.control[8].limbus = True
self.block ^= 1
def draw(self, context):
###context.save()
context.set_dash(self.dash)
context.set_line_width(self.thickness)
context.move_to(self.x, self.y)
context.curve_to(self.x, self.y, self.handler.control[8].x, self.handler.control[8].y, self.x + self.width,
self.y + self.height)
#context.curve_to(self.x, self.y, self.x + self.width, self.y, self.x + self.width, self.y + self.height)
context.set_source_rgba(self.stroke_color.red, self.stroke_color.green,
self.stroke_color.blue, self.stroke_color.alpha)
context.stroke()
Object.draw(self, context)
###context.restore()
def transform(self, x, y):
self.handler.control[8].x = x
self.handler.control[8].y = y
|
jmouriz/sanaviron
|
sanaviron/objects/curve.py
|
Python
|
apache-2.0
| 1,728
|
#!/usr/bin/env python
# Copyright (c) 2013. Mark E. Madsen <mark@madsenlab.org>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
"""
For a specific experiment name, we subsample raw data into a "full dataset".
Given a collection of individual samples, raw from a simuPOP/CTPy simulation, which have
been done at maximum levels of individual sample size and trait dimensionality, subsample
for smaller sample sizes and dimensionalities.
See http://notebook.madsenlab.org/coarse%20grained%20model%20project/2013/07/29/classification-experiment-protocol.html
for more details on this data reduction approach.
The resulting samples are copies of the original individual sample documents, but with a subset
of the original genotype (e.g., 3 loci instead of 4), or number of individuals (e.g., 30 individuals
sampled rather than 100).
The original sample document **and** each of the subsampled documents are inserted into
individual_sample_fulldataset. This collection is then usable for further data reduction, such
as classification, time averaging, or other statistical analysis.
"""
import logging as log
import argparse
import ming
import ctpy.data as data
import ctpy.utils as utils
import datetime as datetime
import itertools
## setup
def setup():
global sargs, config, simconfig
sargs = utils.ScriptArgs()
if sargs.debug:
log.basicConfig(level=log.DEBUG, format='%(asctime)s %(levelname)s: %(message)s')
else:
log.basicConfig(level=log.INFO, format='%(asctime)s %(levelname)s: %(message)s')
simconfig = utils.CTPyConfiguration(sargs.configuration)
log.debug("experiment name: %s", sargs.experiment_name)
data.set_experiment_name(sargs.experiment_name)
data.set_database_hostname(sargs.database_hostname)
data.set_database_port(sargs.database_port)
#### main program ####
log.info("SUBSAMPLE_INDIVIDUAL_SAMPLES - Starting program")
log.info("Performing subsampling for experiment named: %s", data.experiment_name)
config = data.getMingConfiguration()
ming.configure(**config)
def check_prior_completion():
"""
We do not want to run this subsampling if we've done it previously to the same raw
data collection, because we'll be creating duplicate data sets.
:return: boolean
"""
experiment_record = data.ExperimentTracking.m.find(dict(experiment_name=sargs.experiment_name)).one()
if experiment_record["subsampling_complete"] == True:
return True
else:
return False
def record_completion():
"""
Once subsampling is complete, we want to record it in the database so we don't do it
again for the same data set.
:return: none
"""
experiment_record = data.ExperimentTracking.m.find(dict(experiment_name=sargs.experiment_name)).one()
experiment_record["subsampling_complete"] = True
experiment_record["subsampling_tstamp"] = datetime.datetime.utcnow()
experiment_record.m.save()
def record_full_subsample(s,new_dimen,new_ssize,new_sample):
data.storeIndividualSampleFullDataset(s.replication,
new_dimen,
new_ssize,
s.simulation_time,
s.mutation_rate,
s.population_size,
s.simulation_run_id,
new_sample
)
def subsample_for_dimension(dimension, sample_id, genotype_list):
"""
Takes a list of genotypes and the desired number of loci to subsample, and returns
that number of loci from the list
:param dimension:
:param genotype_list:
:return: list of genotypes
"""
return dict(id=sample_id,genotype=genotype_list[:dimension])
####### main loop #######
if __name__ == "__main__":
setup()
if check_prior_completion() == True:
log.info("Subsampling of experiment %s already complete -- exiting", sargs.experiment_name)
exit(1)
# do the subsampling
# one caveat is that we already have a sample from max(DIMENSIONS_STUDIED) and
# max(SAMPLE_SIZES_STUDIED) so we need to skip doing that one, and just insert the
# original data sample into the fulldataset collection.
existing_dimensionality = max(simconfig.DIMENSIONS_STUDIED)
existing_sample_size = max(simconfig.SAMPLE_SIZES_STUDIED)
state_space = [
simconfig.SAMPLE_SIZES_STUDIED,
simconfig.DIMENSIONS_STUDIED
]
individual_samples = data.IndividualSample.m.find()
# each "individual_sample" has a field called "sample" which is an array of individuals's genotypes
for s in individual_samples:
for param_combination in itertools.product(*state_space):
ssize = param_combination[0]
dimen = param_combination[1]
log.debug("subsampling for ssize %s and dim %s", ssize, dimen)
# this is the original sample document
if ssize == existing_sample_size and dimen == existing_dimensionality:
# record original document into the full sample data set
log.debug("Skipping subsampling for documents with existing ssize and dim, just copying")
record_full_subsample(s,s.dimensionality,s.sample_size,s.sample)
continue
# we subsample each id in the samples array, and then construct a new overall
# "individual_sample_fulldataset" document from the original "individual_sample"
# document, inserting "subsampled_indiv" in place of the original list.
dim_subsampled = []
for sample in s.sample:
dim_subsampled.append(subsample_for_dimension(dimen, sample.id, sample.genotype))
log.debug("dim_subsampled: %s", dim_subsampled)
# now dim_subsampled contains the dimension-reduced samples, so we just have
# to reduce it by ssize, and the result is our completely subsampled sample.
final_subsample = dim_subsampled[:ssize]
log.debug("final subsample: %s", final_subsample)
record_full_subsample(s,dimen,ssize,final_subsample)
# log completion of the subsampling
record_completion()
|
mmadsen/CTPy
|
analytics/subsample_individual_samples.py
|
Python
|
apache-2.0
| 6,418
|
from __future__ import absolute_import
from kvkit import (
ReferenceProperty,
DateTimeProperty,
ListProperty,
BooleanProperty,
DictProperty,
StringProperty
)
from ...models import BaseDocument, Content, Comment, CommentParentMixin, Project, User, rc
from settings import DATABASES
class Todo(CommentParentMixin, BaseDocument, Content):
_riak_options = {"bucket": rc.bucket(DATABASES["todos"])}
_child_class = Comment
parent = ReferenceProperty(Project, index=True, load_on_demand=True)
assigned = ReferenceProperty(User, index=True, load_on_demand=True)
due = DateTimeProperty(default=lambda: None)
tags = ListProperty(index=True)
done = BooleanProperty(default=False)
content = DictProperty() # markdown -> markdown, html -> html
# For this, to avoid things like spaces in the name, we use the md5 of the name.
milestone = StringProperty(index=True)
def archive(self):
archived_item = ArchivedTodo(key=self.key, data=self)
archived_item.save()
self.delete()
return archived_item
class ArchivedTodo(Todo):
_riak_options = {"bucket": rc.bucket(DATABASES["archived_todos"])}
|
shuhaowu/projecto
|
projecto/apiv1/todos/models.py
|
Python
|
apache-2.0
| 1,135
|
"""Test the FireServiceRota config flow."""
from unittest.mock import patch
from pyfireservicerota import InvalidAuthError
from homeassistant import data_entry_flow
from homeassistant.components.fireservicerota.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_URL, CONF_USERNAME
from tests.common import MockConfigEntry
MOCK_CONF = {
CONF_USERNAME: "my@email.address",
CONF_PASSWORD: "mypassw0rd",
CONF_URL: "www.brandweerrooster.nl",
}
MOCK_DATA = {
"auth_implementation": DOMAIN,
CONF_URL: MOCK_CONF[CONF_URL],
CONF_USERNAME: MOCK_CONF[CONF_USERNAME],
"token": {
"access_token": "test-access-token",
"token_type": "Bearer",
"expires_in": 1234,
"refresh_token": "test-refresh-token",
"created_at": 4321,
},
}
MOCK_TOKEN_INFO = {
"access_token": "test-access-token",
"token_type": "Bearer",
"expires_in": 1234,
"refresh_token": "test-refresh-token",
"created_at": 4321,
}
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_abort_if_already_setup(hass):
"""Test abort if already setup."""
entry = MockConfigEntry(
domain=DOMAIN, data=MOCK_CONF, unique_id=MOCK_CONF[CONF_USERNAME]
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_CONF
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_invalid_credentials(hass):
"""Test that invalid credentials throws an error."""
with patch(
"homeassistant.components.fireservicerota.FireServiceRota.request_tokens",
side_effect=InvalidAuthError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_CONF
)
assert result["errors"] == {"base": "invalid_auth"}
async def test_step_user(hass):
"""Test the start of the config flow."""
with patch(
"homeassistant.components.fireservicerota.config_flow.FireServiceRota"
) as mock_fsr, patch(
"homeassistant.components.fireservicerota.async_setup_entry",
return_value=True,
) as mock_setup_entry:
mock_fireservicerota = mock_fsr.return_value
mock_fireservicerota.request_tokens.return_value = MOCK_TOKEN_INFO
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_CONF
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == MOCK_CONF[CONF_USERNAME]
assert result["data"] == {
"auth_implementation": "fireservicerota",
CONF_URL: "www.brandweerrooster.nl",
CONF_USERNAME: "my@email.address",
"token": {
"access_token": "test-access-token",
"token_type": "Bearer",
"expires_in": 1234,
"refresh_token": "test-refresh-token",
"created_at": 4321,
},
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_reauth(hass):
"""Test the start of the config flow."""
entry = MockConfigEntry(
domain=DOMAIN, data=MOCK_CONF, unique_id=MOCK_CONF[CONF_USERNAME]
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.fireservicerota.config_flow.FireServiceRota"
) as mock_fsr:
mock_fireservicerota = mock_fsr.return_value
mock_fireservicerota.request_tokens.return_value = MOCK_TOKEN_INFO
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "reauth", "unique_id": entry.unique_id},
data=MOCK_CONF,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
with patch(
"homeassistant.components.fireservicerota.config_flow.FireServiceRota"
) as mock_fsr, patch(
"homeassistant.components.fireservicerota.async_setup_entry",
return_value=True,
):
mock_fireservicerota = mock_fsr.return_value
mock_fireservicerota.request_tokens.return_value = MOCK_TOKEN_INFO
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PASSWORD: "any"},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "reauth_successful"
|
w1ll1am23/home-assistant
|
tests/components/fireservicerota/test_config_flow.py
|
Python
|
apache-2.0
| 4,871
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import sys
from twitter.common.collections import OrderedSet
from pants.backend.core.tasks.task import Task
from pants.backend.jvm.ivy_utils import IvyModuleRef, IvyUtils
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.util.contextutil import open_zip64
from pants.util.dirutil import safe_mkdir
# XXX(pl): This task is very broken and has been for a long time.
# Remove it after confirming it has no users.
class Provides(Task):
@classmethod
def register_options(cls, register):
register('--transitive', default=False, action='store_true',
help='Shows the symbols provided not just by the specified targets but by all their '
'transitive dependencies.')
register('--also-write-to-stdout', default=False, action='store_true',
help='Also write the provides information to stdout.')
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data('jars')
round_manager.require_data('ivy_jar_products')
def __init__(self, *args, **kwargs):
super(Provides, self).__init__(*args, **kwargs)
self.confs = self.context.config.getlist('ivy', 'confs', default=['default'])
self.target_roots = self.context.target_roots
self.transitive = self.get_options().transitive
self.also_write_to_stdout = self.get_options().also_write_to_stdout
# Create a fake target, in case we were run directly on a JarLibrary containing nothing but JarDependencies.
# TODO(benjy): Get rid of this special-casing of jar dependencies.
# TODO(pl): Is this necessary? Now JarLibrary only contains a payload of JarDependency
# self.context.add_new_target(self.workdir,
# JvmBinary,
# name='provides',
# dependencies=self.target_roots,
# configurations=self.confs)
# self.context.products.require('jars')
def execute(self):
safe_mkdir(self.workdir)
targets = self.context.targets()
for conf in self.confs:
outpath = os.path.join(self.workdir,
'{0}.{1}.provides'.format(IvyUtils.identify(targets)[1], conf))
if self.transitive:
outpath += '.transitive'
ivy_jar_products = self.context.products.get_data('ivy_jar_products') or {}
# This product is a list for historical reasons (exclusives groups) but in practice should
# have either 0 or 1 entries.
ivy_info_list = ivy_jar_products.get(conf)
if ivy_info_list:
assert len(ivy_info_list) == 1, (
'The values in ivy_jar_products should always be length 1,'
' since we no longer have exclusives groups.'
)
ivy_info = ivy_info_list[0]
else:
ivy_info = None
jar_paths = OrderedSet()
for root in self.target_roots:
jar_paths.update(self.get_jar_paths(ivy_info, root, conf))
with open(outpath, 'w') as outfile:
def do_write(s):
outfile.write(s)
if self.also_write_to_stdout:
sys.stdout.write(s)
for jar in jar_paths:
do_write('# from jar %s\n' % jar)
for line in self.list_jar(jar):
if line.endswith('.class'):
class_name = line[:-6].replace('/', '.')
do_write(class_name)
do_write('\n')
self.context.log.info('Wrote provides information to %s' % outpath)
def get_jar_paths(self, ivy_info, target, conf):
jar_paths = OrderedSet()
if isinstance(target, JarLibrary):
# Jar library proxies jar dependencies or jvm targets, so the jars are just those of the
# dependencies.
for paths in [self.get_jar_paths(ivy_info, dep, conf) for dep in target.dependencies]:
jar_paths.update(paths)
elif isinstance(target, JarDependency):
ref = IvyModuleRef(target.org, target.name, target.rev, conf)
jar_paths.update(self.get_jar_paths_for_ivy_module(ivy_info, ref))
elif target.is_jvm:
for basedir, jars in self.context.products.get('jars').get(target).items():
jar_paths.update([os.path.join(basedir, jar) for jar in jars])
if self.transitive:
for dep in target.dependencies:
jar_paths.update(self.get_jar_paths(ivy_info, dep, conf))
return jar_paths
def get_jar_paths_for_ivy_module(self, ivy_info, ref):
def create_collection(current_ref):
module = ivy_info.modules_by_ref[current_ref]
return OrderedSet([a.path for a in module.artifacts])
if self.transitive:
return ivy_info.traverse_dependency_graph(ref, create_collection)
else:
return create_collection(ref)
def list_jar(self, path):
with open_zip64(path, 'r') as jar:
return jar.namelist()
|
tejal29/pants
|
src/python/pants/backend/jvm/tasks/provides.py
|
Python
|
apache-2.0
| 5,050
|
import os
import os.path
from setuptools import setup, find_packages
def get_package_data():
package_data = []
for root, dirnames, filenames in os.walk('./eva_cttv_pipeline/evidence_string_generation/resources'):
root = root.replace("./eva_cttv_pipeline/", "")
for filename in filenames:
new_fn = os.path.join(root, filename)
package_data.append(new_fn)
return package_data
def get_requires():
requires = []
with open("requirements.txt", "rt") as req_file:
for line in req_file:
requires.append(line.rstrip())
return requires
setup(name='eva_cttv_pipeline',
version='2.3.0',
packages=find_packages(),
install_requires=get_requires(),
package_data={
'eva_cttv_pipeline': get_package_data()
},
tests_require=get_requires(),
setup_requires=get_requires(),
test_suite='tests'
)
|
EBIvariation/eva-cttv-pipeline
|
setup.py
|
Python
|
apache-2.0
| 928
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import itertools
import json
import random
import re
from typing import Dict, Text
from absl.testing import parameterized
from language.canine import modeling
import tensorflow.compat.v1 as tf
class CanineModelTest(parameterized.TestCase, tf.test.TestCase):
class CanineModelTester(object):
def __init__(self,
parent,
batch_size=13,
dynamic_batch_size=False,
seq_length=16,
is_training=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
downsampling_rate=4,
num_attention_heads=4,
intermediate_size=37,
max_position_embeddings=64,
type_vocab_size=16):
self.parent = parent
self.batch_size = batch_size
self.dynamic_batch_size = dynamic_batch_size
self.seq_length = seq_length
self.is_training = is_training
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.downsampling_rate = downsampling_rate
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
def create_model(self, seed=None):
input_ids = ids_tensor(
[self.batch_size, self.seq_length],
self.vocab_size,
seed=seed,
dynamic_batch_size=self.dynamic_batch_size)
if seed is not None:
seed *= 7
input_mask = ids_tensor(
[self.batch_size, self.seq_length],
vocab_size=2,
seed=seed,
dynamic_batch_size=self.dynamic_batch_size)
if seed is not None:
seed *= 5
token_type_ids = ids_tensor(
[self.batch_size, self.seq_length],
self.type_vocab_size,
seed=seed,
dynamic_batch_size=self.dynamic_batch_size)
config = modeling.CanineModelConfig(
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
max_positions=self.max_position_embeddings,
downsampling_rate=self.downsampling_rate)
model = modeling.CanineModel(
config=config,
atom_input_ids=input_ids,
atom_input_mask=input_mask,
atom_segment_ids=token_type_ids,
is_training=self.is_training)
outputs = {
"pooled_output": model.get_pooled_output(),
"sequence_output": model.get_sequence_output(),
"downsampled_layers": model.get_downsampled_layers(),
}
return outputs
def check_output(self, result):
self.parent.assertAllEqual(result["pooled_output"].shape,
[self.batch_size, self.hidden_size])
self.parent.assertAllEqual(
result["sequence_output"].shape,
[self.batch_size, self.seq_length, self.hidden_size])
for layer in result["downsampled_layers"]:
self.parent.assertEqual(layer.shape[0], self.batch_size)
# NOTE: Not checking sequence molecule length.
self.parent.assertEqual(layer.shape[2], self.hidden_size)
def test_model_static_batch_size(self):
self.run_tester(
CanineModelTest.CanineModelTester(self), check_reachable=False)
def test_model_dynamic_batch_size(self):
self.run_tester(
CanineModelTest.CanineModelTester(
self, dynamic_batch_size=True),
check_reachable=False)
@parameterized.named_parameters(
dict(testcase_name="5", rate=5),
dict(testcase_name="6", rate=6),
dict(testcase_name="7", rate=7))
def test_model_downsampling_rate(self, rate):
self.run_tester(
CanineModelTest.CanineModelTester(self, downsampling_rate=rate),
check_reachable=False)
def test_config_to_json_string(self):
config = modeling.CanineModelConfig(hidden_size=37)
obj = json.loads(config.to_json_string())
tf.logging.info(str(obj))
self.assertEqual(obj["hidden_size"], 37)
def test_determinism_same_graph(self):
# Deterministic only at inference (training has dropout)
tester = CanineModelTest.CanineModelTester(
self, is_training=False)
with self.session() as sess:
ops = tester.create_model()
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
run1 = sess.run(ops)
run2 = sess.run(ops)
tester.check_output(run1)
tester.check_output(run2)
self.assertAllClose(run1, run2)
def run_tester(self, tester, check_reachable=True):
with self.session() as sess:
ops: Dict[Text, tf.Tensor] = tester.create_model()
init_op: tf.Operation = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
output_result = sess.run(ops)
tester.check_output(output_result)
if check_reachable:
self.assert_all_tensors_reachable(sess, [init_op, ops])
def assert_all_tensors_reachable(self, sess, outputs):
"""Checks that all the tensors in the graph are reachable from outputs."""
ignore_strings = [
"^.*/assert_less_equal/.*$",
"^.*/dilation_rate$",
"^.*/Tensordot/concat$",
"^.*/Tensordot/concat/axis$",
"^testing/.*$",
# TensorContracts:
"^Require.*$",
"^Ensure.*$",
"^Assert.*$",
"^Identity.*$",
"^ExpandDims.*$",
"^Squeeze.*$",
"^.*Const.*$",
"^MaxPool2d.*$",
"^.*Repeat.*shape$",
"^ones.*$",
"^concat.*$",
]
ignore_pattern = "|".join(ignore_strings)
ignore_regex = re.compile(ignore_pattern)
unreachable = get_unreachable_ops(sess.graph, outputs)
unreachable = [x for x in unreachable if not ignore_regex.match(x.name)]
self.assertEmpty(
unreachable, "The following ops are unreachable: {}".format(" ".join(
x.name for x in unreachable)))
def ids_tensor(shape,
vocab_size,
dynamic_batch_size,
rng=None,
name=None,
seed=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
if seed is not None:
rng.seed(seed)
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
const_tensor = tf.constant(
value=values, dtype=tf.int32, shape=shape, name=name)
if dynamic_batch_size:
placeholder_shape = copy.deepcopy(shape)
if dynamic_batch_size:
placeholder_shape[0] = None
# Rather than having to pass back out values for the feed_dict all over
# the place, we use placholder_with_default and always use those defaults
# during testing.
return tf.placeholder_with_default(const_tensor, shape=placeholder_shape)
else:
return const_tensor
def get_unreachable_ops(graph, outputs):
"""Finds all of the tensors in graph that are unreachable from outputs."""
outputs = flatten_recursive(outputs)
output_to_op = collections.defaultdict(list)
op_to_all = collections.defaultdict(list)
assign_out_to_in = collections.defaultdict(list)
for op in graph.get_operations():
for x in op.inputs:
op_to_all[op.name].append(x.name)
for y in op.outputs:
output_to_op[y.name].append(op.name)
op_to_all[op.name].append(y.name)
if str(op.type) == "Assign":
for y in op.outputs:
for x in op.inputs:
assign_out_to_in[y.name].append(x.name)
assign_groups = collections.defaultdict(list)
for out_name in assign_out_to_in.keys():
name_group = assign_out_to_in[out_name]
for n1 in name_group:
assign_groups[n1].append(out_name)
for n2 in name_group:
if n1 != n2:
assign_groups[n1].append(n2)
seen_tensors = set()
stack = [x.name for x in outputs]
while stack:
name = stack.pop()
if name in seen_tensors:
continue
seen_tensors.add(name)
if name in output_to_op:
for op_name in output_to_op[name]:
if op_name in op_to_all:
for input_name in op_to_all[op_name]:
if input_name not in stack:
stack.append(input_name)
stack.extend([n for n in assign_groups[name] if n not in stack])
unreachable_ops = []
for op in graph.get_operations():
all_names = [x.name for x in list(op.inputs) + op.outputs]
is_unreachable = any(name for name in all_names if name not in seen_tensors)
if is_unreachable:
unreachable_ops.append(op)
return unreachable_ops
def flatten_recursive(item):
"""Flattens (potentially nested) a tuple/dictionary/list to a list."""
output = []
if isinstance(item, (list, tuple)):
output.extend(item)
elif isinstance(item, collections.Mapping):
output.extend(item.values())
else:
return [item]
return itertools.chain.from_iterable(map(flatten_recursive, output))
if __name__ == "__main__":
tf.test.main()
|
google-research/language
|
language/canine/modeling_test.py
|
Python
|
apache-2.0
| 9,977
|
#!/usr/bin/env python
# Copyright 2016 Daniel Nunes
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import makedirs, listdir
from os.path import expanduser, normpath, basename, join, relpath, isdir, isfile, abspath
from io import BytesIO
from threading import Thread
from queue import Queue
from webbrowser import open_new_tab
from datetime import datetime
from collections import deque
from json import JSONDecodeError
from jsonpickle import encode, decode, set_encoder_options
from lxml.etree import parse, tostring, Comment
from PyQt5.QtWidgets import (QFileDialog, QColorDialog, QMessageBox, QLabel, QHBoxLayout, QCommandLinkButton, QDialog,
QFormLayout, QLineEdit, QSpinBox, QComboBox, QWidget, QPushButton, QSizePolicy, QStatusBar,
QCompleter, QApplication, QMainWindow, QUndoCommand, QUndoStack, QMenu, QHeaderView,
QAction, QVBoxLayout, QGroupBox, QCheckBox, QRadioButton)
from PyQt5.QtGui import QIcon, QPixmap, QColor, QFont, QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt, pyqtSignal, QStringListModel, QMimeData, QEvent
from PyQt5.uic import loadUi
from requests import get, head, codes, ConnectionError, Timeout
from validator import validate_tree, check_warnings, ValidatorError, ValidationError, WarningError, MissingFolderError
from . import cur_folder, __version__
from .nodes import _NodeElement, NodeComment
from .io import import_, new, export, node_factory, copy_node
from .previews import PreviewDispatcherThread
from .props import PropertyFile, PropertyColour, PropertyFolder, PropertyCombo, PropertyInt, PropertyText, \
PropertyFlagLabel, PropertyFlagValue, PropertyHTML
from .exceptions import DesignerError
from .ui_templates import window_intro, window_mainframe, window_about, window_settings, window_texteditor, \
window_plaintexteditor, preview_mo
class IntroWindow(QMainWindow, window_intro.Ui_MainWindow):
"""
The class for the intro window. Subclassed from QDialog and created in Qt Designer.
"""
def __init__(self):
super().__init__()
self.setupUi(self)
self.setWindowIcon(QIcon(join(cur_folder, "resources/window_icon.svg")))
self.setWindowTitle("FOMOD Designer")
self.version.setText("Version " + __version__)
self.settings_dict = read_settings()
recent_files = self.settings_dict["Recent Files"]
for path in recent_files:
if not isdir(path):
recent_files.remove(path)
continue
button = QCommandLinkButton(basename(path), path, self)
button.setIcon(QIcon(join(cur_folder, "resources/logos/logo_enter.png")))
button.clicked.connect(lambda _, path_=path: self.open_path(path_))
self.scroll_layout.addWidget(button)
if not self.settings_dict["General"]["show_intro"]:
main_window = MainFrame()
main_window.move(self.pos())
main_window.show()
self.close()
else:
self.show()
self.new_button.clicked.connect(lambda: self.open_path(""))
self.button_help.clicked.connect(MainFrame.help)
self.button_about.clicked.connect(lambda _, self_=self: MainFrame.about(self_))
def open_path(self, path):
"""
Method used to open a path in the main window - closes the intro window and show the main.
:param path: The path to open.
"""
main_window = MainFrame()
self_center = self.mapToGlobal(self.rect().center())
main_center = main_window.mapToGlobal(main_window.rect().center())
main_window.move(self_center - main_center)
main_window.open(path)
main_window.show()
self.close()
if self.settings_dict["General"]["tutorial_advanced"]:
main_window.setEnabled(False)
tutorial = loadUi(join(cur_folder, "resources/templates/tutorial_advanced.ui"))
tutorial.frame_node.resize(main_window.node_tree_view.size())
tutorial.frame_node.move(
main_window.node_tree_view.mapTo(main_window, main_window.node_tree_view.pos())
)
tutorial.frame_preview.resize(main_window.tabWidget.size())
tutorial.frame_preview.move(
main_window.tabWidget.mapTo(main_window, main_window.tabWidget.pos())
)
tutorial.frame_prop.resize(main_window.dockWidgetContents.size())
tutorial.frame_prop.move(
main_window.dockWidgetContents.mapTo(main_window, main_window.dockWidgetContents.pos())
)
tutorial.frame_child.resize(main_window.dockWidgetContents_3.size())
tutorial.frame_child.move(
main_window.dockWidgetContents_3.mapTo(main_window, main_window.dockWidgetContents_3.pos())
)
tutorial.button_exit.clicked.connect(lambda: main_window.setEnabled(True))
tutorial.button_exit.clicked.connect(tutorial.close)
tutorial.setParent(main_window)
tutorial.setWindowFlags(Qt.FramelessWindowHint | Qt.Dialog)
tutorial.setAttribute(Qt.WA_TranslucentBackground)
main_center = main_window.mapToGlobal(main_window.rect().center())
tutorial_center = tutorial.mapToGlobal(tutorial.rect().center())
tutorial.move(main_center - tutorial_center)
tutorial.setEnabled(True)
tutorial.exec_()
self.settings_dict["General"]["tutorial_advanced"] = False
self.settings_dict["General"]["show_intro"] = not self.check_intro.isChecked()
self.settings_dict["General"]["show_advanced"] = self.check_advanced.isChecked()
makedirs(join(expanduser("~"), ".fomod"), exist_ok=True)
with open(join(expanduser("~"), ".fomod", ".designer"), "w") as configfile:
set_encoder_options("json", indent=4)
configfile.write(encode(self.settings_dict))
class MainFrame(QMainWindow, window_mainframe.Ui_MainWindow):
"""
The class for the main window. Subclassed from QMainWindow and created in Qt Designer.
"""
#: Signals the xml code has changed.
xml_code_changed = pyqtSignal([object])
#: Signals the code preview is updated.
update_code_preview = pyqtSignal([str])
#: Signals there is an update available.
update_check_update_available = pyqtSignal()
#: Signals the app is up-to-date.
update_check_up_to_date = pyqtSignal()
#: Signals a connection timed out.
update_check_timeout = pyqtSignal()
#: Signals there was an error with the internet connection.
update_check_connection_error = pyqtSignal()
#: Signals a new node has been selected in the node tree.
select_node = pyqtSignal([object])
#: Signals the previews need to be updated.
update_previews = pyqtSignal([object])
class NodeMimeData(QMimeData):
def __init__(self):
super().__init__()
self._node = None
self._item = None
self._original_item = None
def has_node(self):
if self._node is None:
return False
else:
return True
def node(self):
return self._node
def set_node(self, node):
self._node = node
def has_item(self):
if self._item is None:
return False
else:
return True
def item(self):
return self._item
def set_item(self, item):
self._item = item
def original_item(self):
return self._original_item
def set_original_item(self, item):
self._original_item = item
class NodeStandardModel(QStandardItemModel):
def mimeData(self, index_list):
if not index_list:
return 0
mime_data = MainFrame.NodeMimeData()
new_node = copy_node(self.itemFromIndex(index_list[0]).xml_node)
mime_data.set_item(new_node.model_item)
mime_data.set_node(new_node)
mime_data.set_original_item(self.itemFromIndex(index_list[0]))
return mime_data
def canDropMimeData(self, mime_data, drop_action, row, col, parent_index):
if self.itemFromIndex(parent_index) and mime_data.has_node() and mime_data.has_item() and drop_action == 2:
if isinstance(self.itemFromIndex(parent_index).xml_node, type(mime_data.node().getparent())):
return True
else:
return False
else:
return False
def dropMimeData(self, mime_data, drop_action, row, col, parent_index):
if not self.canDropMimeData(mime_data, drop_action, row, col, parent_index):
return False
parent = self.itemFromIndex(parent_index)
xml_node = mime_data.node()
parent.xml_node.remove(mime_data.original_item().xml_node)
parent.xml_node.append(mime_data.node())
parent.insertRow(row, xml_node.model_item)
for row_index in range(0, parent.rowCount()):
if parent.child(row_index) == mime_data.original_item():
continue
parent.child(row_index).xml_node.user_sort_order = str(parent.child(row_index).row()).zfill(7)
parent.child(row_index).xml_node.save_metadata()
return True
def supportedDragActions(self):
return Qt.MoveAction
class LineEditChangeCommand(QUndoCommand):
def __init__(self, original_text, new_text, current_prop_widgets, widget_index, tree_model, item, select_node):
super().__init__("Line edit changed.")
self.original_text = original_text
self.new_text = new_text
self.current_prop_widgets = current_prop_widgets
self.widget_index = widget_index
self.tree_model = tree_model
self.item = item
self.select_node = select_node
def redo(self):
self.select_node.emit(self.tree_model.indexFromItem(self.item))
self.current_prop_widgets[self.widget_index].setText(self.new_text)
def undo(self):
self.select_node.emit(self.tree_model.indexFromItem(self.item))
self.current_prop_widgets[self.widget_index].setText(self.original_text)
class WidgetLineEditChangeCommand(QUndoCommand):
def __init__(self, original_text, new_text, current_prop_widgets, widget_index, tree_model, item, select_node):
super().__init__("Widget with line edit changed.")
self.original_text = original_text
self.new_text = new_text
self.current_prop_widgets = current_prop_widgets
self.widget_index = widget_index
self.tree_model = tree_model
self.item = item
self.select_node = select_node
def redo(self):
self.select_node.emit(self.tree_model.indexFromItem(self.item))
line_edit = None
for index in range(self.current_prop_widgets[self.widget_index].layout().count()):
widget = self.current_prop_widgets[self.widget_index].layout().itemAt(index).widget()
if isinstance(widget, QLineEdit):
line_edit = widget
line_edit.setText(self.new_text)
def undo(self):
self.select_node.emit(self.tree_model.indexFromItem(self.item))
line_edit = None
for index in range(self.current_prop_widgets[self.widget_index].layout().count()):
widget = self.current_prop_widgets[self.widget_index].layout().itemAt(index).widget()
if isinstance(widget, QLineEdit):
line_edit = widget
line_edit.setText(self.original_text)
class ComboBoxChangeCommand(QUndoCommand):
def __init__(self, original_text, new_text, current_prop_widgets, widget_index, tree_model, item, select_node):
super().__init__("Combo box changed.")
self.original_text = original_text
self.new_text = new_text
self.current_prop_widgets = current_prop_widgets
self.widget_index = widget_index
self.tree_model = tree_model
self.item = item
self.select_node = select_node
def redo(self):
self.select_node.emit(self.tree_model.indexFromItem(self.item))
self.current_prop_widgets[self.widget_index].setCurrentText(self.new_text)
def undo(self):
self.select_node.emit(self.tree_model.indexFromItem(self.item))
self.current_prop_widgets[self.widget_index].setCurrentText(self.original_text)
class SpinBoxChangeCommand(QUndoCommand):
def __init__(self, original_int, new_int, current_prop_widgets, widget_index, tree_model, item, select_node):
super().__init__("Spin box changed.")
self.original_int = original_int
self.new_int = new_int
self.current_prop_widgets = current_prop_widgets
self.widget_index = widget_index
self.tree_model = tree_model
self.item = item
self.select_node = select_node
def redo(self):
self.select_node.emit(self.tree_model.indexFromItem(self.item))
self.current_prop_widgets[self.widget_index].setValue(self.new_int)
def undo(self):
self.select_node.emit(self.tree_model.indexFromItem(self.item))
self.current_prop_widgets[self.widget_index].setValue(self.original_int)
class RunWizardCommand(QUndoCommand):
def __init__(self, parent_node, original_node, modified_node, tree_model, select_node_signal):
super().__init__("Wizard was run on this node.")
self.parent_node = parent_node
self.original_node = original_node
self.modified_node = modified_node
self.tree_model = tree_model
self.select_node_signal = select_node_signal
def redo(self):
self.parent_node.remove_child(self.original_node)
self.parent_node.add_child(self.modified_node)
self.parent_node.model_item.sortChildren(0)
self.select_node_signal.emit(self.tree_model.indexFromItem(self.modified_node.model_item))
def undo(self):
self.parent_node.remove_child(self.modified_node)
self.parent_node.add_child(self.original_node)
self.parent_node.model_item.sortChildren(0)
self.select_node_signal.emit(self.tree_model.indexFromItem(self.original_node.model_item))
class DeleteCommand(QUndoCommand):
def __init__(self, node_to_delete, tree_model, select_node_signal):
super().__init__("Node deleted.")
self.node_to_delete = node_to_delete
self.parent_node = node_to_delete.getparent()
self.tree_model = tree_model
self.select_node_signal = select_node_signal
def redo(self):
object_to_delete = self.node_to_delete
new_index = self.tree_model.indexFromItem(self.parent_node.model_item)
self.parent_node.remove_child(object_to_delete)
self.select_node_signal.emit(new_index)
def undo(self):
self.parent_node.add_child(self.node_to_delete)
self.select_node_signal.emit(self.tree_model.indexFromItem(self.node_to_delete.model_item))
self.tree_model.sort(0)
class AddChildCommand(QUndoCommand):
def __init__(self, child_tag, parent_node, tree_model, settings_dict, select_node_signal):
super().__init__("Child added.")
self.child_tag = child_tag
self.parent_node = parent_node
self.tree_model = tree_model
self.settings_dict = settings_dict
self.select_node_signal = select_node_signal
self.new_child_node = None
def redo(self):
if self.new_child_node is None:
self.new_child_node = node_factory(self.child_tag, self.parent_node)
defaults_dict = self.settings_dict["Defaults"]
if self.child_tag in defaults_dict and defaults_dict[self.child_tag].enabled():
self.new_child_node.properties[defaults_dict[self.child_tag].key()].set_value(
defaults_dict[self.child_tag].value()
)
self.parent_node.add_child(self.new_child_node)
self.tree_model.sort(0)
# select the new item
self.select_node_signal.emit(self.tree_model.indexFromItem(self.new_child_node.model_item))
def undo(self):
self.parent_node.remove_child(self.new_child_node)
# select the parent after removing
self.select_node_signal.emit(self.tree_model.indexFromItem(self.parent_node.model_item))
class PasteCommand(QUndoCommand):
def __init__(self, parent_item, status_bar, tree_model, select_node_signal):
super().__init__("Node pasted.")
self.parent_item = parent_item
self.status_bar = status_bar
self.tree_model = tree_model
self.select_node_signal = select_node_signal
self.pasted_node = None
def redo(self):
self.pasted_node = copy_node(QApplication.clipboard().mimeData().node())
self.parent_item.xml_node.append(self.pasted_node)
self.parent_item.appendRow(self.pasted_node.model_item)
self.parent_item.sortChildren(0)
def undo(self):
self.parent_item.xml_node.remove_child(self.pasted_node)
# select the parent after removing
self.select_node_signal.emit(self.tree_model.indexFromItem(self.parent_item.xml_node.model_item))
def __init__(self):
super().__init__()
self.setupUi(self)
# setup the icons properly
self.setWindowIcon(QIcon(join(cur_folder, "resources/window_icon.svg")))
self.action_Open.setIcon(QIcon(join(cur_folder, "resources/logos/logo_open_file.png")))
self.action_Save.setIcon(QIcon(join(cur_folder, "resources/logos/logo_floppy_disk.png")))
self.actionO_ptions.setIcon(QIcon(join(cur_folder, "resources/logos/logo_gear.png")))
self.action_Refresh.setIcon(QIcon(join(cur_folder, "resources/logos/logo_refresh.png")))
self.action_Delete.setIcon(QIcon(join(cur_folder, "resources/logos/logo_cross.png")))
self.action_About.setIcon(QIcon(join(cur_folder, "resources/logos/logo_notepad.png")))
self.actionHe_lp.setIcon(QIcon(join(cur_folder, "resources/logos/logo_info.png")))
self.actionCopy.setIcon(QIcon(join(cur_folder, "resources/logos/logo_copy.png")))
self.actionPaste.setIcon(QIcon(join(cur_folder, "resources/logos/logo_paste.png")))
self.actionRedo.setIcon(QIcon(join(cur_folder, "resources/logos/logo_redo.png")))
self.actionUndo.setIcon(QIcon(join(cur_folder, "resources/logos/logo_undo.png")))
self.actionClear.setIcon(QIcon(join(cur_folder, "resources/logos/logo_clear.png")))
self.menu_Recent_Files.setIcon(QIcon(join(cur_folder, "resources/logos/logo_recent.png")))
self.actionExpand_All.setIcon(QIcon(join(cur_folder, "resources/logos/logo_expand.png")))
self.actionCollapse_All.setIcon(QIcon(join(cur_folder, "resources/logos/logo_collapse.png")))
self.actionHide_Node.setIcon(QIcon(join(cur_folder, "resources/logos/logo_hide.png")))
self.actionShow_Node.setIcon(QIcon(join(cur_folder, "resources/logos/logo_show.png")))
# manage undo and redo
self.undo_stack = QUndoStack(self)
self.undo_stack.setUndoLimit(25)
self.undo_stack.canRedoChanged.connect(self.actionRedo.setEnabled)
self.undo_stack.canUndoChanged.connect(self.actionUndo.setEnabled)
self.actionRedo.triggered.connect(self.undo_stack.redo)
self.actionUndo.triggered.connect(self.undo_stack.undo)
# manage the node tree view
self.node_tree_view.clicked.connect(self.select_node.emit)
self.node_tree_view.activated.connect(self.select_node.emit)
self.node_tree_view.setContextMenuPolicy(Qt.CustomContextMenu)
self.node_tree_view.customContextMenuRequested.connect(self.on_custom_context_menu)
# manage node tree model
self.node_tree_model = self.NodeStandardModel()
self.node_tree_view.setModel(self.node_tree_model)
self.node_tree_model.itemChanged.connect(lambda item: item.xml_node.save_metadata())
self.node_tree_model.itemChanged.connect(lambda item: self.xml_code_changed.emit(item.xml_node))
# connect actions to the respective methods
self.action_Open.triggered.connect(self.open)
self.action_Save.triggered.connect(self.save)
self.actionO_ptions.triggered.connect(self.settings)
self.action_Refresh.triggered.connect(self.refresh)
self.action_Delete.triggered.connect(self.delete)
self.actionHide_Node.triggered.connect(self.hide_node)
self.actionShow_Node.triggered.connect(self.show_node)
self.actionHe_lp.triggered.connect(self.help)
self.action_About.triggered.connect(lambda _, self_=self: self.about(self_))
self.actionClear.triggered.connect(self.clear_recent_files)
self.actionCopy.triggered.connect(
lambda: self.copy_item_to_clipboard()
if self.node_tree_view.selectedIndexes() else None
)
self.actionPaste.triggered.connect(
lambda: self.paste_item_from_clipboard()
if self.node_tree_view.selectedIndexes() else None
)
self.actionExpand_All.triggered.connect(self.node_tree_view.expandAll)
self.actionCollapse_All.triggered.connect(self.node_tree_view.collapseAll)
self.action_Object_Tree.toggled.connect(self.node_tree.setVisible)
self.actionObject_Box.toggled.connect(self.children_box.setVisible)
self.action_Property_Editor.toggled.connect(self.property_editor.setVisible)
self.node_tree.visibilityChanged.connect(self.action_Object_Tree.setChecked)
self.children_box.visibilityChanged.connect(self.actionObject_Box.setChecked)
self.property_editor.visibilityChanged.connect(self.action_Property_Editor.setChecked)
# setup any necessary variables
self.original_title = self.windowTitle()
self._package_path = ""
self.package_name = ""
self.settings_dict = read_settings()
self._info_root = None
self._config_root = None
self._current_prop_list = []
self.original_prop_value_list = {}
# start the preview threads
self.preview_queue = Queue()
self.preview_gui_worker = PreviewMoGui(self.layout_mo)
self.update_previews.connect(self.preview_queue.put)
self.update_code_preview.connect(self.xml_code_browser.setHtml)
self.preview_thread = PreviewDispatcherThread(
self.preview_queue,
self.update_code_preview,
**{
"package_path": self.package_path,
"info_root": self.info_root,
"config_root": self.config_root,
"gui_worker": self.preview_gui_worker
}
)
self.preview_thread.start()
# manage the wizard button
self.button_wizard.clicked.connect(self.run_wizard)
# manage auto-completion
self.flag_label_model = QStringListModel()
self.flag_label_completer = QCompleter()
self.flag_label_completer.setCaseSensitivity(Qt.CaseInsensitive)
self.flag_label_completer.setModel(self.flag_label_model)
self.flag_value_model = QStringListModel()
self.flag_value_completer = QCompleter()
self.flag_value_completer.setCaseSensitivity(Qt.CaseInsensitive)
self.flag_value_completer.setModel(self.flag_value_model)
# connect node selected signal
self.current_node = None # type: _NodeElement
self.select_node.connect(
lambda index: self.set_current_node(self.node_tree_model.itemFromIndex(index).xml_node)
)
self.select_node.connect(lambda index: self.node_tree_view.setCurrentIndex(index))
self.select_node.connect(
lambda: self.update_previews.emit(self.current_node)
if self.settings_dict["General"]["code_refresh"] >= 2 else None
)
self.select_node.connect(self.update_children_box)
self.select_node.connect(self.update_props_list)
self.select_node.connect(lambda: self.action_Delete.setEnabled(True))
self.select_node.connect(
lambda: self.button_wizard.setEnabled(False)
if self.current_node.wizard is None else self.button_wizard.setEnabled(True)
)
self.select_node.connect(
lambda index: self.actionHide_Node.setEnabled(True)
if self.current_node is not self._config_root and
self.current_node is not self._info_root and
self.current_node not in self.current_node.getparent().hidden_children and
not self.current_node.allowed_instances
else self.actionHide_Node.setEnabled(False)
)
self.select_node.connect(
lambda index: self.actionShow_Node.setEnabled(True)
if self.current_node is not self._config_root and
self.current_node is not self._info_root and
self.current_node in self.current_node.getparent().hidden_children and
not self.current_node.allowed_instances
else self.actionShow_Node.setEnabled(False)
)
# manage code changed signal
self.xml_code_changed.connect(self.update_previews.emit)
# manage clean/dirty states
self.undo_stack.cleanChanged.connect(
lambda clean: self.setWindowTitle(self.package_name + " - " + self.original_title)
if clean
else self.setWindowTitle("*" + self.package_name + " - " + self.original_title)
)
self.undo_stack.cleanChanged.connect(
lambda clean: self.action_Save.setEnabled(not clean)
)
self.update_recent_files()
self.check_updates()
# disable the wizards until they're up-to-date
self.button_wizard.hide()
def on_custom_context_menu(self, position):
index = self.node_tree_view.indexAt(position)
node_tree_context_menu = QMenu(self.node_tree_view)
node_tree_context_menu.addActions([self.actionExpand_All, self.actionCollapse_All])
if index.isValid():
self.select_node.emit(index)
node_tree_context_menu.addSeparator()
node_tree_context_menu.addAction(self.action_Delete)
if self.current_node is not self._config_root and self.current_node is not self._info_root:
if self.current_node in self.current_node.getparent().hidden_children:
node_tree_context_menu.addAction(self.actionShow_Node)
else:
node_tree_context_menu.addAction(self.actionHide_Node)
node_tree_context_menu.addSeparator()
node_tree_context_menu.addActions([self.actionCopy, self.actionPaste])
node_tree_context_menu.addSeparator()
node_tree_context_menu.addActions([self.actionUndo, self.actionRedo])
node_tree_context_menu.move(self.node_tree_view.mapToGlobal(position))
node_tree_context_menu.exec_()
def set_current_node(self, selected_node):
self.current_node = selected_node
@property
def current_prop_list(self):
return self._current_prop_list
def info_root(self):
return self._info_root
def config_root(self):
return self._config_root
def package_path(self):
return self._package_path
def copy_item_to_clipboard(self):
item = self.node_tree_model.itemFromIndex(self.node_tree_view.selectedIndexes()[0])
QApplication.clipboard().setMimeData(self.node_tree_model.mimeData([self.node_tree_model.indexFromItem(item)]))
self.actionPaste.setEnabled(True)
def paste_item_from_clipboard(self):
parent_item = self.node_tree_model.itemFromIndex(self.node_tree_view.selectedIndexes()[0])
new_node = copy_node(QApplication.clipboard().mimeData().node())
if not parent_item.xml_node.can_add_child(new_node):
self.statusBar().showMessage("This parent is not valid!")
else:
self.undo_stack.push(
self.PasteCommand(
parent_item,
self.statusBar(),
self.node_tree_model,
self.select_node
)
)
@staticmethod
def update_flag_label_completer(label_model, elem_root):
label_list = []
for elem in elem_root.iter():
if elem.tag == "flag":
value = elem.properties["name"].value
if value not in label_list:
label_list.append(value)
label_model.setStringList(label_list)
@staticmethod
def update_flag_value_completer(value_model, elem_root, label):
value_list = []
for elem in elem_root.iter():
if elem.tag == "flag" and elem.text not in value_list and elem.properties["name"].value == label:
value_list.append(elem.text)
value_model.setStringList(value_list)
def check_updates(self):
"""
Checks the version number on the remote repository (Github Releases)
and compares it against the current version.
If the remote version is higher, then the user is warned in the status bar and advised to get the new one.
Otherwise, ignore.
"""
def update_available_button():
update_button = QPushButton("New Version Available!")
update_button.setFlat(True)
update_button.clicked.connect(lambda: open_new_tab("https://github.com/GandaG/fomod-designer/releases/latest"))
self.statusBar().addPermanentWidget(update_button)
def check_remote():
try:
response = get("https://api.github.com/repos/GandaG/fomod-designer/releases", timeout=10)
if response.status_code == codes.ok and response.json()[0]["tag_name"][1:] > __version__:
self.update_check_update_available.emit()
else:
self.update_check_up_to_date.emit()
except Timeout:
self.update_check_timeout.emit()
except ConnectionError:
self.update_check_connection_error.emit()
self.update_check_up_to_date.connect(lambda: self.setStatusBar(QStatusBar()))
self.update_check_up_to_date.connect(
lambda: self.statusBar().addPermanentWidget(QLabel("Everything is up-to-date."))
)
self.update_check_update_available.connect(lambda: self.setStatusBar(QStatusBar()))
self.update_check_update_available.connect(update_available_button)
self.update_check_timeout.connect(lambda: self.setStatusBar(QStatusBar()))
self.update_check_timeout.connect(lambda: self.statusBar().addPermanentWidget(QLabel("Connection timed out.")))
self.update_check_connection_error.connect(lambda: self.setStatusBar(QStatusBar()))
self.update_check_connection_error.connect(
lambda: self.statusBar().addPermanentWidget(QLabel(
"Could not connect to remote server, check your internet connection."
))
)
self.statusBar().addPermanentWidget(QLabel("Checking for updates..."))
Thread(target=check_remote).start()
def hide_node(self):
if self.current_node is not None:
self.current_node.set_hidden(True)
def show_node(self):
if self.current_node is not None:
self.current_node.set_hidden(False)
def open(self, path=""):
"""
Open a new installer if one exists at path (if no path is given a dialog pops up asking the user to choose one)
or create a new one.
If enabled in the Settings the installer is also validated and checked for common errors.
:param path: Optional. The path to open/create an installer at.
"""
try:
answer = self.check_fomod_state()
if answer == QMessageBox.Save:
self.save()
elif answer == QMessageBox.Cancel:
return
else:
pass
if not path:
open_dialog = QFileDialog()
package_path = open_dialog.getExistingDirectory(self, "Select package root directory:", expanduser("~"))
else:
package_path = path
if package_path:
info_root, config_root = import_(normpath(package_path))
if info_root is not None and config_root is not None:
if self.settings_dict["Load"]["validate"]:
try:
validate_tree(
parse(BytesIO(tostring(config_root, pretty_print=True))),
join(cur_folder, "resources", "mod_schema.xsd"),
)
except ValidationError as p:
generic_errorbox(p.title, str(p), p.detailed).exec_()
if not self.settings_dict["Load"]["validate_ignore"]:
return
if self.settings_dict["Load"]["warnings"]:
try:
check_warnings(
package_path,
config_root,
)
except WarningError as p:
generic_errorbox(p.title, str(p), p.detailed).exec_()
if not self.settings_dict["Save"]["warn_ignore"]:
return
else:
info_root, config_root = new()
self._package_path = package_path
self._info_root, self._config_root = info_root, config_root
self.node_tree_model.clear()
self.node_tree_model.appendRow(self._info_root.model_item)
self.node_tree_model.appendRow(self._config_root.model_item)
self.package_name = basename(normpath(self._package_path))
self.current_node = None
self.xml_code_changed.emit(self.current_node)
self.undo_stack.setClean()
self.undo_stack.cleanChanged.emit(True)
self.undo_stack.clear()
QApplication.clipboard().clear()
self.actionPaste.setEnabled(False)
self.action_Delete.setEnabled(False)
self.update_recent_files(self._package_path)
self.clear_prop_list()
self.button_wizard.setEnabled(False)
except (DesignerError, ValidatorError) as p:
generic_errorbox(p.title, str(p), p.detailed).exec_()
return
def save(self):
"""
Saves the current installer at the current path.
If enabled in the Settings the installer is also validated and checked for common errors.
"""
try:
if self._info_root is None and self._config_root is None:
return
elif not self.undo_stack.isClean():
self._info_root.sort()
self._config_root.sort()
if self.settings_dict["Save"]["validate"]:
try:
validate_tree(
parse(BytesIO(tostring(self._config_root, pretty_print=True))),
join(cur_folder, "resources", "mod_schema.xsd"),
)
except ValidationError as e:
generic_errorbox(e.title, str(e), e.detailed).exec_()
if not self.settings_dict["Save"]["validate_ignore"]:
return
if self.settings_dict["Save"]["warnings"]:
try:
check_warnings(
self._package_path,
self._config_root,
)
except MissingFolderError:
pass
except WarningError as e:
generic_errorbox(e.title, str(e), e.detailed).exec_()
if not self.settings_dict["Save"]["warn_ignore"]:
return
export(self._info_root, self._config_root, self._package_path)
self.undo_stack.setClean()
except (DesignerError, ValidatorError) as e:
generic_errorbox(e.title, str(e), e.detailed).exec_()
return
def settings(self):
"""
Opens the Settings dialog.
"""
config = SettingsDialog(self)
config.exec_()
self.settings_dict = read_settings()
def refresh(self):
"""
Refreshes all the previews if the refresh rate in Settings is high enough.
"""
if self.settings_dict["General"]["code_refresh"] >= 1:
self.update_previews.emit(self.current_node)
def delete(self):
"""
Deletes the current node in the tree. No effect when using the Basic View.
"""
if self.current_node is None:
self.statusBar().showMessage("Can't delete nothing.")
elif self.current_node.getparent() is None:
self.statusBar().showMessage("Can't delete root nodes.")
else:
if self.current_node.is_hidden:
self.current_node.set_hidden(False)
self.undo_stack.push(self.DeleteCommand(
self.current_node,
self.node_tree_model,
self.select_node
))
@staticmethod
def help():
docs_url = "http://fomod-designer.readthedocs.io/en/stable/index.html"
local_docs = "file://" + abspath(join(cur_folder, "resources", "docs", "index.html"))
try:
if head(docs_url, timeout=0.5).status_code == codes.ok:
open_new_tab(docs_url)
else:
raise ConnectionError()
except (Timeout, ConnectionError):
open_new_tab(local_docs)
@staticmethod
def about(parent):
"""
Opens the About dialog. This method is static to be able to be called from the Intro window.
:param parent: The parent of the dialog.
"""
about_dialog = About(parent)
about_dialog.exec_()
def clear_recent_files(self):
"""
Clears the Recent Files gui menu and settings.
"""
self.settings_dict["Recent Files"].clear()
makedirs(join(expanduser("~"), ".fomod"), exist_ok=True)
with open(join(expanduser("~"), ".fomod", ".designer"), "w") as configfile:
set_encoder_options("json", indent=4)
configfile.write(encode(self.settings_dict))
for child in self.menu_Recent_Files.actions():
if child is not self.actionClear:
self.menu_Recent_Files.removeAction(child)
del child
def update_recent_files(self, add_new=None):
"""
Updates the Recent Files gui menu and settings. If called when opening an installer, pass that installer as
add_new so it can be added to list or placed at the top.
:param add_new: If a new installer is being opened, add it to the list or move it to the top.
"""
file_list = deque(self.settings_dict["Recent Files"], maxlen=5)
self.clear_recent_files()
# check for invalid paths and remove them
for path in file_list:
if not isdir(path):
file_list.remove(path)
# check if the path is new or if it already exists - delete the last one or reorder respectively
if add_new:
if add_new in file_list:
file_list.remove(add_new)
file_list.appendleft(add_new)
# write the new list to the settings file
self.settings_dict["Recent Files"] = file_list
makedirs(join(expanduser("~"), ".fomod"), exist_ok=True)
with open(join(expanduser("~"), ".fomod", ".designer"), "w") as configfile:
set_encoder_options("json", indent=4)
configfile.write(encode(self.settings_dict))
# populate the gui menu with the new files list
self.menu_Recent_Files.removeAction(self.actionClear)
for path in self.settings_dict["Recent Files"]:
action = self.menu_Recent_Files.addAction(path)
action.triggered.connect(lambda _, path_=path: self.open(path_))
self.menu_Recent_Files.addSeparator()
self.menu_Recent_Files.addAction(self.actionClear)
def update_children_box(self):
"""
Updates the possible children to add in Object Box.
"""
spacer = self.layout_box.takeAt(self.layout_box.count() - 1)
for index in reversed(range(self.layout_box.count())):
widget = self.layout_box.takeAt(index).widget()
if widget is not None:
widget.deleteLater()
children_list = list(self.current_node.allowed_children)
if self.current_node.tag is not Comment:
children_list.insert(0, NodeComment)
for child in children_list:
new_object = child()
child_button = QPushButton(new_object.name)
font_button = QFont()
font_button.setPointSize(8)
child_button.setFont(font_button)
child_button.setMaximumSize(5000, 30)
child_button.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
child_button.setStatusTip("A possible child node.")
child_button.clicked.connect(
lambda _,
tag_=new_object.tag,
parent_node=self.current_node,
tree_model=self.node_tree_model,
settings_dict=self.settings_dict,
: self.undo_stack.push(self.AddChildCommand(
tag_,
parent_node,
tree_model,
settings_dict,
self.select_node
))
)
if not self.current_node.can_add_child(new_object):
child_button.setEnabled(False)
if child in self.current_node.required_children:
child_button.setStyleSheet(
"background-color: " + QColor(self.settings_dict["Appearance"]["required_colour"]).name()
)
child_button.setStatusTip(
"A button of this colour indicates that at least one of this node is required."
)
if child in self.current_node.either_children_group:
child_button.setStyleSheet(
"background-color: " + QColor(self.settings_dict["Appearance"]["either_colour"]).name()
)
child_button.setStatusTip(
"A button of this colour indicates that only one of these buttons must be used."
)
if child in self.current_node.at_least_one_children_group:
child_button.setStyleSheet(
"background-color: " + QColor(self.settings_dict["Appearance"]["atleastone_colour"]).name()
)
child_button.setStatusTip(
"A button of this colour indicates that from all of these buttons, at least one is required."
)
self.layout_box.addWidget(child_button)
self.layout_box.addSpacerItem(spacer)
def clear_prop_list(self):
"""
Deletes all the properties from the Property Editor
"""
self._current_prop_list.clear()
for index in reversed(range(self.layout_prop_editor.count())):
widget = self.layout_prop_editor.takeAt(index).widget()
if widget is not None:
widget.deleteLater()
def update_props_list(self):
"""
Updates the Property Editor's prop list. Deletes everything and
then creates the list from the node's properties.
"""
self.clear_prop_list()
prop_index = 0
og_values = self.original_prop_value_list
prop_list = self._current_prop_list
props = self.current_node.properties
for key in props:
if not props[key].editable:
continue
label = QLabel(self.dockWidgetContents)
label.setObjectName("label_" + str(prop_index))
label.setText(props[key].name)
self.layout_prop_editor.setWidget(prop_index, QFormLayout.LabelRole, label)
if type(props[key]) is PropertyText:
def open_plain_editor(line_edit_, node):
dialog_ui = window_plaintexteditor.Ui_Dialog()
dialog = QDialog(self)
dialog_ui.setupUi(dialog)
dialog_ui.edit_text.setPlainText(line_edit_.text())
if node.tag is Comment:
for sequence in node.forbidden_sequences:
dialog_ui.edit_text.textChanged.connect(
lambda: dialog_ui.edit_text.setText(
dialog_ui.edit_text.toPlainText().replace(sequence, "")
) if sequence in dialog_ui.edit_text.toPlainText() else None
)
dialog_ui.buttonBox.accepted.connect(dialog.close)
dialog_ui.buttonBox.accepted.connect(lambda: line_edit_.setText(dialog_ui.edit_text.toPlainText()))
dialog_ui.buttonBox.accepted.connect(line_edit_.editingFinished.emit)
dialog.exec_()
og_values[prop_index] = props[key].value
prop_list.append(QWidget(self.dockWidgetContents))
layout = QHBoxLayout(prop_list[prop_index])
text_edit = QLineEdit(prop_list[prop_index])
text_button = QPushButton(prop_list[prop_index])
text_button.setText("...")
text_button.setMaximumWidth(30)
layout.addWidget(text_edit)
layout.addWidget(text_button)
layout.setContentsMargins(0, 0, 0, 0)
text_edit.setText(props[key].value)
if self.current_node.tag is Comment:
for sequence in self.current_node.forbidden_sequences:
text_edit.textChanged.connect(
lambda: text_edit.setText(
text_edit.text().replace(sequence, "")
) if sequence in text_edit.text() else None
)
text_edit.textChanged.connect(props[key].set_value)
text_edit.textChanged[str].connect(self.current_node.write_attribs)
text_edit.textChanged[str].connect(self.current_node.update_item_name)
text_edit.textChanged[str].connect(
lambda: self.xml_code_changed.emit(self.current_node)
if self.settings_dict["General"]["code_refresh"] >= 3 else None
)
text_edit.editingFinished.connect(
lambda index=prop_index: self.undo_stack.push(
self.WidgetLineEditChangeCommand(
og_values[index],
text_edit.text(),
self.current_prop_list,
index,
self.node_tree_model,
self.current_node.model_item,
self.select_node
)
)
if og_values[index] != text_edit.text() else None
)
text_edit.editingFinished.connect(
lambda index=prop_index: og_values.update({index: text_edit.text()})
)
text_button.clicked.connect(
lambda _, line_edit_=text_edit, node=self.current_node: open_plain_editor(line_edit_, node)
)
if type(props[key]) is PropertyHTML:
def open_plain_editor(line_edit_):
dialog_ui = window_texteditor.Ui_Dialog()
dialog = QDialog(self)
dialog_ui.setupUi(dialog)
dialog_ui.radio_html.toggled.connect(dialog_ui.widget_warning.setVisible)
dialog_ui.button_colour.clicked.connect(
lambda: dialog_ui.edit_text.setTextColor(QColorDialog.getColor())
)
dialog_ui.button_bold.clicked.connect(
lambda: dialog_ui.edit_text.setFontWeight(QFont.Bold)
if dialog_ui.edit_text.fontWeight() == QFont.Normal
else dialog_ui.edit_text.setFontWeight(QFont.Normal)
)
dialog_ui.button_italic.clicked.connect(
lambda: dialog_ui.edit_text.setFontItalic(not dialog_ui.edit_text.fontItalic())
)
dialog_ui.button_underline.clicked.connect(
lambda: dialog_ui.edit_text.setFontUnderline(not dialog_ui.edit_text.fontUnderline())
)
dialog_ui.button_align_left.clicked.connect(
lambda: dialog_ui.edit_text.setAlignment(Qt.AlignLeft)
)
dialog_ui.button_align_center.clicked.connect(
lambda: dialog_ui.edit_text.setAlignment(Qt.AlignCenter)
)
dialog_ui.button_align_right.clicked.connect(
lambda: dialog_ui.edit_text.setAlignment(Qt.AlignRight)
)
dialog_ui.button_align_justify.clicked.connect(
lambda: dialog_ui.edit_text.setAlignment(Qt.AlignJustify)
)
dialog_ui.buttonBox.accepted.connect(dialog.close)
dialog_ui.buttonBox.accepted.connect(
lambda: line_edit_.setText(dialog_ui.edit_text.toPlainText())
if dialog_ui.radio_plain.isChecked()
else line_edit_.setText(dialog_ui.edit_text.toHtml())
)
dialog_ui.buttonBox.accepted.connect(line_edit_.editingFinished.emit)
dialog_ui.widget_warning.hide()
dialog_ui.label_warning.setPixmap(QPixmap(join(cur_folder, "resources/logos/logo_danger.png")))
dialog_ui.button_colour.setIcon(QIcon(join(cur_folder, "resources/logos/logo_font_colour.png")))
dialog_ui.button_bold.setIcon(QIcon(join(cur_folder, "resources/logos/logo_font_bold.png")))
dialog_ui.button_italic.setIcon(QIcon(join(cur_folder, "resources/logos/logo_font_italic.png")))
dialog_ui.button_underline.setIcon(QIcon(
join(cur_folder, "resources/logos/logo_font_underline.png")
))
dialog_ui.button_align_left.setIcon(QIcon(
join(cur_folder, "resources/logos/logo_font_align_left.png")
))
dialog_ui.button_align_center.setIcon(QIcon(
join(cur_folder, "resources/logos/logo_font_align_center.png")
))
dialog_ui.button_align_right.setIcon(QIcon(
join(cur_folder, "resources/logos/logo_font_align_right.png")
))
dialog_ui.button_align_justify.setIcon(QIcon(
join(cur_folder, "resources/logos/logo_font_align_justify.png")
))
dialog_ui.edit_text.setText(line_edit_.text())
dialog.exec_()
og_values[prop_index] = props[key].value
prop_list.append(QWidget(self.dockWidgetContents))
layout = QHBoxLayout(prop_list[prop_index])
text_edit = QLineEdit(prop_list[prop_index])
text_button = QPushButton(prop_list[prop_index])
text_button.setText("...")
text_button.setMaximumWidth(30)
layout.addWidget(text_edit)
layout.addWidget(text_button)
layout.setContentsMargins(0, 0, 0, 0)
text_edit.setText(props[key].value)
text_edit.textChanged.connect(props[key].set_value)
text_edit.textChanged[str].connect(self.current_node.write_attribs)
text_edit.textChanged[str].connect(self.current_node.update_item_name)
text_edit.textChanged[str].connect(
lambda: self.xml_code_changed.emit(self.current_node)
if self.settings_dict["General"]["code_refresh"] >= 3 else None
)
text_edit.editingFinished.connect(
lambda index=prop_index: self.undo_stack.push(
self.WidgetLineEditChangeCommand(
og_values[index],
text_edit.text(),
self.current_prop_list,
index,
self.node_tree_model,
self.current_node.model_item,
self.select_node
)
)
if og_values[index] != text_edit.text() else None
)
text_edit.editingFinished.connect(
lambda index=prop_index: og_values.update({index: text_edit.text()})
)
text_button.clicked.connect(lambda _, line_edit_=text_edit: open_plain_editor(line_edit_))
if type(props[key]) is PropertyFlagLabel:
og_values[prop_index] = props[key].value
prop_list.append(QLineEdit(self.dockWidgetContents))
self.update_flag_label_completer(self.flag_label_model, self._config_root)
self.flag_label_completer.activated[str].connect(prop_list[prop_index].setText)
prop_list[prop_index].setCompleter(self.flag_label_completer)
prop_list[prop_index].textChanged[str].connect(
lambda text: self.update_flag_value_completer(self.flag_value_model, self._config_root, text)
)
prop_list[prop_index].setText(props[key].value)
prop_list[prop_index].textChanged[str].connect(props[key].set_value)
prop_list[prop_index].textChanged[str].connect(self.current_node.write_attribs)
prop_list[prop_index].textChanged[str].connect(self.current_node.update_item_name)
prop_list[prop_index].textChanged[str].connect(
lambda: self.xml_code_changed.emit(self.current_node)
if self.settings_dict["General"]["code_refresh"] >= 3 else None
)
prop_list[prop_index].editingFinished.connect(
lambda index=prop_index: self.undo_stack.push(
self.LineEditChangeCommand(
og_values[index],
prop_list[index].text(),
self.current_prop_list,
index,
self.node_tree_model,
self.current_node.model_item,
self.select_node
)
)
if og_values[index] != prop_list[index].text() else None
)
prop_list[prop_index].editingFinished.connect(
lambda index=prop_index: og_values.update({index: prop_list[index].text()})
)
if type(props[key]) is PropertyFlagValue:
og_values[prop_index] = props[key].value
prop_list.append(QLineEdit(self.dockWidgetContents))
prop_list[prop_index].setCompleter(self.flag_value_completer)
self.flag_value_completer.activated[str].connect(prop_list[prop_index].setText)
prop_list[prop_index].setText(props[key].value)
prop_list[prop_index].textChanged[str].connect(props[key].set_value)
prop_list[prop_index].textChanged[str].connect(self.current_node.write_attribs)
prop_list[prop_index].textChanged[str].connect(self.current_node.update_item_name)
prop_list[prop_index].textChanged[str].connect(
lambda: self.xml_code_changed.emit(self.current_node)
if self.settings_dict["General"]["code_refresh"] >= 3 else None
)
prop_list[prop_index].editingFinished.connect(
lambda index=prop_index: self.undo_stack.push(
self.LineEditChangeCommand(
og_values[index],
prop_list[index].text(),
self.current_prop_list,
index,
self.node_tree_model,
self.current_node.model_item,
self.select_node
)
)
if og_values[index] != prop_list[index].text() else None
)
prop_list[prop_index].editingFinished.connect(
lambda index=prop_index: og_values.update({index: prop_list[index].text()})
)
elif type(props[key]) is PropertyInt:
og_values[prop_index] = props[key].value
prop_list.append(QSpinBox(self.dockWidgetContents))
prop_list[prop_index].setValue(int(props[key].value))
prop_list[prop_index].setMinimum(props[key].min)
prop_list[prop_index].setMaximum(props[key].max)
prop_list[prop_index].valueChanged.connect(props[key].set_value)
prop_list[prop_index].valueChanged.connect(self.current_node.write_attribs)
prop_list[prop_index].valueChanged.connect(
lambda: self.xml_code_changed.emit(self.current_node)
if self.settings_dict["General"]["code_refresh"] >= 3 else None
)
prop_list[prop_index].valueChanged.connect(
lambda new_value, index=prop_index: self.undo_stack.push(
self.SpinBoxChangeCommand(
og_values[index],
new_value,
self.current_prop_list,
index,
self.node_tree_model,
self.current_node.model_item,
self.select_node
)
)
if og_values[index] != new_value else None
)
prop_list[prop_index].valueChanged.connect(
lambda new_value, index=prop_index: og_values.update({index: new_value})
)
elif type(props[key]) is PropertyCombo:
og_values[prop_index] = props[key].value
prop_list.append(QComboBox(self.dockWidgetContents))
prop_list[prop_index].insertItems(0, props[key].values)
prop_list[prop_index].setCurrentIndex(props[key].values.index(props[key].value))
prop_list[prop_index].currentTextChanged.connect(props[key].set_value)
prop_list[prop_index].currentTextChanged.connect(self.current_node.write_attribs)
prop_list[prop_index].currentTextChanged.connect(self.current_node.update_item_name)
prop_list[prop_index].currentTextChanged.connect(
lambda: self.xml_code_changed.emit(self.current_node)
if self.settings_dict["General"]["code_refresh"] >= 3 else None
)
prop_list[prop_index].activated[str].connect(
lambda new_value, index=prop_index: self.undo_stack.push(
self.ComboBoxChangeCommand(
og_values[index],
new_value,
self.current_prop_list,
index,
self.node_tree_model,
self.current_node.model_item,
self.select_node
)
)
)
prop_list[prop_index].activated[str].connect(
lambda new_value, index=prop_index: og_values.update({index: new_value})
)
elif type(props[key]) is PropertyFile:
def button_clicked(line_edit_):
open_dialog = QFileDialog()
file_path = open_dialog.getOpenFileName(self, "Select File:", self._package_path)
if file_path[0]:
line_edit.setText(relpath(file_path[0], self._package_path))
line_edit_.editingFinished.emit()
og_values[prop_index] = props[key].value
prop_list.append(QWidget(self.dockWidgetContents))
layout = QHBoxLayout(prop_list[prop_index])
line_edit = QLineEdit(prop_list[prop_index])
push_button = QPushButton(prop_list[prop_index])
push_button.setText("...")
push_button.setMaximumWidth(30)
layout.addWidget(line_edit)
layout.addWidget(push_button)
layout.setContentsMargins(0, 0, 0, 0)
line_edit.setText(props[key].value)
line_edit.textChanged.connect(props[key].set_value)
line_edit.textChanged[str].connect(self.current_node.write_attribs)
line_edit.textChanged[str].connect(self.current_node.update_item_name)
line_edit.textChanged[str].connect(
lambda: self.xml_code_changed.emit(self.current_node)
if self.settings_dict["General"]["code_refresh"] >= 3 else None
)
line_edit.editingFinished.connect(
lambda index=prop_index: self.undo_stack.push(
self.WidgetLineEditChangeCommand(
og_values[index],
line_edit.text(),
self.current_prop_list,
index,
self.node_tree_model,
self.current_node.model_item,
self.select_node
)
)
if og_values[index] != line_edit.text() else None
)
line_edit.editingFinished.connect(
lambda index=prop_index: og_values.update({index: line_edit.text()})
)
push_button.clicked.connect(lambda _, line_edit_=line_edit: button_clicked(line_edit_))
elif type(props[key]) is PropertyFolder:
def button_clicked(line_edit_):
open_dialog = QFileDialog()
folder_path = open_dialog.getExistingDirectory(self, "Select folder:", self._package_path)
if folder_path:
line_edit.setText(relpath(folder_path, self._package_path))
line_edit_.editingFinished.emit()
og_values[prop_index] = props[key].value
prop_list.append(QWidget(self.dockWidgetContents))
layout = QHBoxLayout(prop_list[prop_index])
line_edit = QLineEdit(prop_list[prop_index])
push_button = QPushButton(prop_list[prop_index])
push_button.setText("...")
push_button.setMaximumWidth(30)
layout.addWidget(line_edit)
layout.addWidget(push_button)
layout.setContentsMargins(0, 0, 0, 0)
line_edit.setText(props[key].value)
line_edit.textChanged.connect(props[key].set_value)
line_edit.textChanged.connect(self.current_node.write_attribs)
line_edit.textChanged.connect(self.current_node.update_item_name)
line_edit.textChanged.connect(
lambda: self.xml_code_changed.emit(self.current_node)
if self.settings_dict["General"]["code_refresh"] >= 3 else None
)
line_edit.editingFinished.connect(
lambda index=prop_index: self.undo_stack.push(
self.WidgetLineEditChangeCommand(
og_values[index],
line_edit.text(),
self.current_prop_list,
index,
self.node_tree_model,
self.current_node.model_item,
self.select_node
)
)
if og_values[index] != line_edit.text() else None
)
line_edit.editingFinished.connect(
lambda index=prop_index: og_values.update({index: line_edit.text()})
)
push_button.clicked.connect(lambda _, line_edit_=line_edit: button_clicked(line_edit_))
elif type(props[key]) is PropertyColour:
def button_clicked(line_edit_):
init_colour = QColor("#" + props[key].value)
colour_dialog = QColorDialog()
colour = colour_dialog.getColor(init_colour, self, "Choose Colour:")
if colour.isValid():
line_edit.setText(colour.name()[1:])
line_edit_.editingFinished.emit()
def update_button_colour(text):
colour = QColor("#" + text)
if colour.isValid() and len(text) == 6:
push_button.setStyleSheet("background-color: " + colour.name())
push_button.setIcon(QIcon())
else:
push_button.setStyleSheet("background-color: #ffffff")
icon = QIcon()
icon.addPixmap(QPixmap(join(cur_folder, "resources/logos/logo_danger.png")),
QIcon.Normal, QIcon.Off)
push_button.setIcon(icon)
og_values[prop_index] = props[key].value
prop_list.append(QWidget(self.dockWidgetContents))
layout = QHBoxLayout(prop_list[prop_index])
line_edit = QLineEdit(prop_list[prop_index])
line_edit.setMaxLength(6)
push_button = QPushButton(prop_list[prop_index])
push_button.setMinimumHeight(21)
push_button.setMinimumWidth(30)
push_button.setMaximumHeight(21)
push_button.setMaximumWidth(30)
layout.addWidget(line_edit)
layout.addWidget(push_button)
layout.setContentsMargins(0, 0, 0, 0)
line_edit.setText(props[key].value)
update_button_colour(line_edit.text())
line_edit.textChanged.connect(props[key].set_value)
line_edit.textChanged.connect(update_button_colour)
line_edit.textChanged.connect(self.current_node.write_attribs)
line_edit.textChanged.connect(
lambda: self.xml_code_changed.emit(self.current_node)
if self.settings_dict["General"]["code_refresh"] >= 3 else None
)
line_edit.editingFinished.connect(
lambda index=prop_index: self.undo_stack.push(
self.WidgetLineEditChangeCommand(
og_values[index],
line_edit.text(),
self.current_prop_list,
index,
self.node_tree_model,
self.current_node.model_item,
self.select_node
)
)
if og_values[index] != line_edit.text() else None
)
line_edit.editingFinished.connect(
lambda index=prop_index: og_values.update({index: line_edit.text()})
)
push_button.clicked.connect(lambda _, line_edit_=line_edit: button_clicked(line_edit_))
self.layout_prop_editor.setWidget(prop_index, QFormLayout.FieldRole, prop_list[prop_index])
prop_list[prop_index].setObjectName(str(prop_index))
prop_index += 1
def run_wizard(self):
"""
Called when the wizard button is clicked.
Sets up the main window and runs the wizard.
"""
def close():
wizard.deleteLater()
self.action_Object_Tree.toggled.emit(enabled_tree)
self.actionObject_Box.toggled.emit(enabled_box)
self.action_Property_Editor.toggled.emit(enabled_list)
self.menu_File.setEnabled(True)
self.menu_Tools.setEnabled(True)
self.menu_View.setEnabled(True)
current_index = self.node_tree_model.indexFromItem(self.current_node.model_item)
enabled_tree = self.action_Object_Tree.isChecked()
enabled_box = self.actionObject_Box.isChecked()
enabled_list = self.action_Property_Editor.isChecked()
self.action_Object_Tree.toggled.emit(False)
self.actionObject_Box.toggled.emit(False)
self.action_Property_Editor.toggled.emit(False)
self.menu_File.setEnabled(False)
self.menu_Tools.setEnabled(False)
self.menu_View.setEnabled(False)
parent_node = self.current_node.getparent()
original_node = self.current_node
kwargs = {"package_path": self._package_path}
wizard = self.current_node.wizard(self, self.current_node, self.xml_code_changed, **kwargs)
self.splitter.insertWidget(0, wizard)
wizard.cancelled.connect(close)
wizard.cancelled.connect(lambda: self.select_node.emit(current_index))
wizard.finished.connect(close)
wizard.finished.connect(
lambda result: self.undo_stack.push(
self.RunWizardCommand(
parent_node,
original_node,
result,
self.node_tree_model,
self.select_node
)
)
)
wizard.finished.connect(lambda: self.select_node.emit(current_index))
def check_fomod_state(self):
"""
Checks whether the installer has unsaved changes.
"""
if not self.undo_stack.isClean():
msg_box = QMessageBox()
msg_box.setWindowTitle("The installer has been modified.")
msg_box.setText("Do you want to save your changes?")
msg_box.setStandardButtons(QMessageBox.Save |
QMessageBox.Discard |
QMessageBox.Cancel)
msg_box.setDefaultButton(QMessageBox.Save)
return msg_box.exec_()
else:
return
def closeEvent(self, event):
"""
Override the Qt close event to account for unsaved changes.
:param event:
"""
answer = self.check_fomod_state()
if answer == QMessageBox.Save:
self.save()
elif answer == QMessageBox.Discard:
pass
elif answer == QMessageBox.Cancel:
event.ignore()
class SettingsDialog(QDialog, window_settings.Ui_Dialog):
"""
The class for the settings window. Subclassed from QDialog and created in Qt Designer.
"""
def __init__(self, parent):
super().__init__(parent=parent)
self.setupUi(self)
self.setWindowFlags(Qt.WindowSystemMenuHint | Qt.WindowTitleHint | Qt.Dialog)
self.label_warning_palette.setPixmap(QPixmap(join(cur_folder, "resources/logos/logo_danger.png")))
self.label_warning_style.setPixmap(QPixmap(join(cur_folder, "resources/logos/logo_danger.png")))
self.widget_warning_palette.hide()
self.widget_warning_style.hide()
self.settings_dict = read_settings()
self.buttonBox.accepted.connect(self.accepted)
self.buttonBox.rejected.connect(self.close)
self.check_valid_load.stateChanged.connect(self.check_valid_load_ignore.setEnabled)
self.check_warn_load.stateChanged.connect(self.check_warn_load_ignore.setEnabled)
self.check_valid_save.stateChanged.connect(self.check_valid_save_ignore.setEnabled)
self.check_warn_save.stateChanged.connect(self.check_warn_save_ignore.setEnabled)
self.check_installSteps.stateChanged.connect(self.combo_installSteps.setEnabled)
self.check_optionalFileGroups.stateChanged.connect(self.combo_optionalFileGroups.setEnabled)
self.check_type.stateChanged.connect(self.combo_type.setEnabled)
self.check_defaultType.stateChanged.connect(self.combo_defaultType.setEnabled)
self.button_colour_required.clicked.connect(
lambda: self.button_colour_required.setStyleSheet(
"background-color: " + QColorDialog().getColor(
QColor(self.button_colour_required.styleSheet().split()[1]),
self,
"Choose Colour:"
).name()
)
)
self.button_colour_atleastone.clicked.connect(
lambda: self.button_colour_atleastone.setStyleSheet(
"background-color: " + QColorDialog().getColor(
QColor(self.button_colour_atleastone.styleSheet().split()[1]),
self,
"Choose Colour:"
).name()
)
)
self.button_colour_either.clicked.connect(
lambda: self.button_colour_either.setStyleSheet(
"background-color: " + QColorDialog().getColor(
QColor(self.button_colour_either.styleSheet().split()[1]),
self,
"Choose Colour:"
).name()
)
)
self.button_colour_reset_required.clicked.connect(
lambda: self.button_colour_required.setStyleSheet("background-color: #d90027")
)
self.button_colour_reset_atleastone.clicked.connect(
lambda: self.button_colour_atleastone.setStyleSheet("background-color: #d0d02e")
)
self.button_colour_reset_either.clicked.connect(
lambda: self.button_colour_either.setStyleSheet("background-color: #ffaa7f")
)
self.combo_style.currentTextChanged.connect(
lambda text: self.widget_warning_style.show()
if text != self.settings_dict["Appearance"]["style"]
else self.widget_warning_style.hide()
)
self.combo_palette.currentTextChanged.connect(
lambda text: self.widget_warning_palette.show()
if text != self.settings_dict["Appearance"]["palette"]
else self.widget_warning_palette.hide()
)
self.combo_code_refresh.setCurrentIndex(self.settings_dict["General"]["code_refresh"])
self.check_intro.setChecked(self.settings_dict["General"]["show_intro"])
self.check_advanced.setChecked(self.settings_dict["General"]["show_advanced"])
self.check_tutorial.setChecked(self.settings_dict["General"]["tutorial_advanced"])
self.check_valid_load.setChecked(self.settings_dict["Load"]["validate"])
self.check_valid_load_ignore.setChecked(self.settings_dict["Load"]["validate_ignore"])
self.check_warn_load.setChecked(self.settings_dict["Load"]["warnings"])
self.check_warn_load_ignore.setChecked(self.settings_dict["Load"]["warn_ignore"])
self.check_valid_save.setChecked(self.settings_dict["Save"]["validate"])
self.check_valid_save_ignore.setChecked(self.settings_dict["Save"]["validate_ignore"])
self.check_warn_save.setChecked(self.settings_dict["Save"]["warnings"])
self.check_warn_save_ignore.setChecked(self.settings_dict["Save"]["warn_ignore"])
self.check_installSteps.setChecked(self.settings_dict["Defaults"]["installSteps"].enabled())
self.combo_installSteps.setEnabled(self.settings_dict["Defaults"]["installSteps"].enabled())
self.combo_installSteps.setCurrentText(self.settings_dict["Defaults"]["installSteps"].value())
self.check_optionalFileGroups.setChecked(self.settings_dict["Defaults"]["optionalFileGroups"].enabled())
self.combo_optionalFileGroups.setEnabled(self.settings_dict["Defaults"]["optionalFileGroups"].enabled())
self.combo_optionalFileGroups.setCurrentText(self.settings_dict["Defaults"]["optionalFileGroups"].value())
self.check_type.setChecked(self.settings_dict["Defaults"]["type"].enabled())
self.combo_type.setEnabled(self.settings_dict["Defaults"]["type"].enabled())
self.combo_type.setCurrentText(self.settings_dict["Defaults"]["type"].value())
self.check_defaultType.setChecked(self.settings_dict["Defaults"]["defaultType"].enabled())
self.combo_defaultType.setEnabled(self.settings_dict["Defaults"]["defaultType"].enabled())
self.combo_defaultType.setCurrentText(self.settings_dict["Defaults"]["defaultType"].value())
self.button_colour_required.setStyleSheet(
"background-color: " + self.settings_dict["Appearance"]["required_colour"]
)
self.button_colour_atleastone.setStyleSheet(
"background-color: " + self.settings_dict["Appearance"]["atleastone_colour"]
)
self.button_colour_either.setStyleSheet(
"background-color: " + self.settings_dict["Appearance"]["either_colour"]
)
if self.settings_dict["Appearance"]["style"]:
self.combo_style.setCurrentText(self.settings_dict["Appearance"]["style"])
else:
self.combo_style.setCurrentText("Default")
if self.settings_dict["Appearance"]["palette"]:
self.combo_palette.setCurrentText(self.settings_dict["Appearance"]["palette"])
else:
self.combo_palette.setCurrentText("Default")
def accepted(self):
self.settings_dict["General"]["code_refresh"] = self.combo_code_refresh.currentIndex()
self.settings_dict["General"]["show_intro"] = self.check_intro.isChecked()
self.settings_dict["General"]["show_advanced"] = self.check_advanced.isChecked()
self.settings_dict["General"]["tutorial_advanced"] = self.check_tutorial.isChecked()
self.settings_dict["Load"]["validate"] = self.check_valid_load.isChecked()
self.settings_dict["Load"]["validate_ignore"] = self.check_valid_load_ignore.isChecked()
self.settings_dict["Load"]["warnings"] = self.check_warn_load.isChecked()
self.settings_dict["Load"]["warn_ignore"] = self.check_warn_load_ignore.isChecked()
self.settings_dict["Save"]["validate"] = self.check_valid_save.isChecked()
self.settings_dict["Save"]["validate_ignore"] = self.check_valid_save_ignore.isChecked()
self.settings_dict["Save"]["warnings"] = self.check_warn_save.isChecked()
self.settings_dict["Save"]["warn_ignore"] = self.check_warn_save_ignore.isChecked()
self.settings_dict["Defaults"]["installSteps"].set_enabled(self.check_installSteps.isChecked())
self.settings_dict["Defaults"]["installSteps"].set_value(self.combo_installSteps.currentText())
self.settings_dict["Defaults"]["optionalFileGroups"].set_enabled(self.check_optionalFileGroups.isChecked())
self.settings_dict["Defaults"]["optionalFileGroups"].set_value(self.combo_optionalFileGroups.currentText()
)
self.settings_dict["Defaults"]["type"].set_enabled(self.check_type.isChecked())
self.settings_dict["Defaults"]["type"].set_value(self.combo_type.currentText())
self.settings_dict["Defaults"]["defaultType"].set_enabled(self.check_defaultType.isChecked())
self.settings_dict["Defaults"]["defaultType"].set_value(self.combo_defaultType.currentText())
self.settings_dict["Appearance"]["required_colour"] = self.button_colour_required.styleSheet().split()[1]
self.settings_dict["Appearance"]["atleastone_colour"] = self.button_colour_atleastone.styleSheet().split()[1]
self.settings_dict["Appearance"]["either_colour"] = self.button_colour_either.styleSheet().split()[1]
if self.combo_style.currentText() != "Default":
self.settings_dict["Appearance"]["style"] = self.combo_style.currentText()
else:
self.settings_dict["Appearance"]["style"] = ""
if self.combo_palette.currentText() != "Default":
self.settings_dict["Appearance"]["palette"] = self.combo_palette.currentText()
else:
self.settings_dict["Appearance"]["palette"] = ""
makedirs(join(expanduser("~"), ".fomod"), exist_ok=True)
with open(join(expanduser("~"), ".fomod", ".designer"), "w") as configfile:
set_encoder_options("json", indent=4)
configfile.write(encode(self.settings_dict))
self.close()
class About(QDialog, window_about.Ui_Dialog):
"""
The class for the about window. Subclassed from QDialog and created in Qt Designer.
"""
def __init__(self, parent):
super().__init__(parent=parent)
self.setupUi(self)
if parent:
self.move(
parent.window().frameGeometry().topLeft() + parent.window().rect().center() - self.rect().center()
)
self.setWindowFlags(Qt.WindowTitleHint | Qt.Dialog)
self.version.setText("Version: " + __version__)
copyright_text = self.copyright.text()
new_year = "2016-" + str(datetime.now().year) if datetime.now().year != 2016 else "2016"
copyright_text = copyright_text.replace("2016", new_year)
self.copyright.setText(copyright_text)
self.button.clicked.connect(self.close)
class PreviewMoGui(QWidget, preview_mo.Ui_Form):
clear_tab_signal = pyqtSignal()
clear_ui_signal = pyqtSignal()
invalid_node_signal = pyqtSignal()
missing_node_signal = pyqtSignal()
set_labels_signal = pyqtSignal([str, str, str, str])
create_page_signal = pyqtSignal([object])
class ScaledLabel(QLabel):
def __init__(self, parent=None):
super().__init__(parent)
self.original_pixmap = None
self.setMinimumSize(320, 200)
def set_scalable_pixmap(self, pixmap):
self.original_pixmap = pixmap
self.setPixmap(self.original_pixmap.scaled(self.size(), Qt.KeepAspectRatio))
def resizeEvent(self, event):
if self.pixmap() and self.original_pixmap:
self.setPixmap(self.original_pixmap.scaled(event.size(), Qt.KeepAspectRatio))
class PreviewItem(QStandardItem):
def set_priority(self, value):
self.priority = value
def __init__(self, mo_preview_layout):
super().__init__()
self.mo_preview_layout = mo_preview_layout
self.setupUi(self)
self.mo_preview_layout.addWidget(self)
self.label_image = self.ScaledLabel(self)
self.splitter_label.addWidget(self.label_image)
self.hide()
self.button_preview_more.setIcon(QIcon(join(cur_folder, "resources/logos/logo_more.png")))
self.button_preview_less.setIcon(QIcon(join(cur_folder, "resources/logos/logo_less.png")))
self.button_preview_more.clicked.connect(self.button_preview_more.hide)
self.button_preview_more.clicked.connect(self.button_preview_less.show)
self.button_preview_more.clicked.connect(self.widget_preview.show)
self.button_preview_less.clicked.connect(self.button_preview_less.hide)
self.button_preview_less.clicked.connect(self.button_preview_more.show)
self.button_preview_less.clicked.connect(self.widget_preview.hide)
self.button_preview_more.clicked.emit()
self.button_results_more.setIcon(QIcon(join(cur_folder, "resources/logos/logo_more.png")))
self.button_results_less.setIcon(QIcon(join(cur_folder, "resources/logos/logo_less.png")))
self.button_results_more.clicked.connect(self.button_results_more.hide)
self.button_results_more.clicked.connect(self.button_results_less.show)
self.button_results_more.clicked.connect(self.widget_results.show)
self.button_results_less.clicked.connect(self.button_results_less.hide)
self.button_results_less.clicked.connect(self.button_results_more.show)
self.button_results_less.clicked.connect(self.widget_results.hide)
self.button_results_less.clicked.emit()
self.model_files = QStandardItemModel()
self.tree_results.expanded.connect(
lambda: self.tree_results.header().resizeSections(QHeaderView.Stretch)
)
self.tree_results.collapsed.connect(
lambda: self.tree_results.header().resizeSections(QHeaderView.Stretch)
)
self.tree_results.setContextMenuPolicy(Qt.CustomContextMenu)
self.tree_results.customContextMenuRequested.connect(self.on_custom_context_menu)
self.model_flags = QStandardItemModel()
self.list_flags.expanded.connect(
lambda: self.list_flags.header().resizeSections(QHeaderView.Stretch)
)
self.list_flags.collapsed.connect(
lambda: self.list_flags.header().resizeSections(QHeaderView.Stretch)
)
self.reset_models()
self.label_invalid = QLabel(
"Select an Installation Step node or one of its children to preview its installer page."
)
self.label_invalid.setAlignment(Qt.AlignCenter)
self.mo_preview_layout.addWidget(self.label_invalid)
self.label_invalid.hide()
self.label_missing = QLabel(
"In order to preview an installer page, create an Installation Step node."
)
self.label_missing.setAlignment(Qt.AlignCenter)
self.mo_preview_layout.addWidget(self.label_missing)
self.label_missing.hide()
self.clear_tab_signal.connect(self.clear_tab)
self.clear_ui_signal.connect(self.clear_ui)
self.invalid_node_signal.connect(self.invalid_node)
self.missing_node_signal.connect(self.missing_node)
self.set_labels_signal.connect(self.set_labels)
self.create_page_signal.connect(self.create_page)
def on_custom_context_menu(self, position):
node_tree_context_menu = QMenu(self.tree_results)
action_expand = QAction(QIcon(join(cur_folder, "resources/logos/logo_expand.png")), "Expand All", self)
action_collapse = QAction(QIcon(join(cur_folder, "resources/logos/logo_collapse.png")), "Collapse All", self)
action_expand.triggered.connect(self.tree_results.expandAll)
action_collapse.triggered.connect(self.tree_results.collapseAll)
node_tree_context_menu.addActions([action_expand, action_collapse])
node_tree_context_menu.move(self.tree_results.mapToGlobal(position))
node_tree_context_menu.exec_()
def eventFilter(self, object_, event):
if event.type() == QEvent.HoverEnter:
self.label_description.setText(object_.property("description"))
self.label_image.set_scalable_pixmap(QPixmap(object_.property("image_path")))
return QWidget().eventFilter(object_, event)
def clear_ui(self):
self.label_name.clear()
self.label_author.clear()
self.label_version.clear()
self.label_website.clear()
self.label_description.clear()
self.label_image.clear()
[widget.deleteLater() for widget in [
self.layout_widget.itemAt(index).widget() for index in range(self.layout_widget.count())
if self.layout_widget.itemAt(index).widget()
]]
self.reset_models()
def reset_models(self):
self.model_files.clear()
self.model_files.setHorizontalHeaderLabels(["Files Preview", "Source", "Plugin"])
self.model_files_root = QStandardItem(QIcon(join(cur_folder, "resources/logos/logo_folder.png")), "<root>")
self.model_files.appendRow(self.model_files_root)
self.tree_results.setModel(self.model_files)
self.model_flags.clear()
self.model_flags.setHorizontalHeaderLabels(["Flag Label", "Flag Value", "Plugin"])
self.list_flags.setModel(self.model_flags)
def clear_tab(self):
for index in reversed(range(self.mo_preview_layout.count())):
widget = self.mo_preview_layout.itemAt(index).widget()
if widget is not None:
widget.hide()
def invalid_node(self):
self.clear_tab()
self.label_invalid.show()
def missing_node(self):
self.clear_tab()
self.label_missing.show()
def set_labels(self, name, author, version, website):
self.label_name.setText(name)
self.label_author.setText(author)
self.label_version.setText(version)
self.label_website.setText("<a href = {}>link</a>".format(website))
# this is pretty horrendous, need to come up with a better way of doing this.
def create_page(self, page_data):
group_step = QGroupBox(page_data.name)
layout_step = QVBoxLayout()
group_step.setLayout(layout_step)
check_first_radio = True
for group in page_data.group_list:
group_group = QGroupBox(group.name)
layout_group = QVBoxLayout()
group_group.setLayout(layout_group)
for plugin in group.plugin_list:
if group.type in ["SelectAny", "SelectAll", "SelectAtLeastOne"]:
button_plugin = QCheckBox(plugin.name, self)
if group.type == "SelectAll":
button_plugin.setChecked(True)
button_plugin.setEnabled(False)
elif group.type == "SelectAtLeastOne":
button_plugin.toggled.connect(
lambda checked, button=button_plugin: button.setChecked(True)
if not checked and not [
button for button in [
layout_group.itemAt(index).widget() for index in range(layout_group.count())
if layout_group.itemAt(index).widget()
] if button.isChecked()
]
else None
)
elif group.type in ["SelectExactlyOne", "SelectAtMostOne"]:
button_plugin = QRadioButton(plugin.name, self)
if check_first_radio and not button_plugin.isChecked():
button_plugin.animateClick(0)
check_first_radio = False
button_plugin.setProperty("description", plugin.description)
button_plugin.setProperty("image_path", plugin.image_path)
button_plugin.setProperty("file_list", plugin.file_list)
button_plugin.setProperty("folder_list", plugin.folder_list)
button_plugin.setProperty("flag_list", plugin.flag_list)
button_plugin.setProperty("type", plugin.type)
button_plugin.setAttribute(Qt.WA_Hover)
if plugin.type == "Required":
button_plugin.setEnabled(False)
elif plugin.type == "Recommended":
button_plugin.animateClick(0) if not button_plugin.isChecked() else None
elif plugin.type == "NotUsable":
button_plugin.setChecked(False)
button_plugin.setEnabled(False)
button_plugin.toggled.connect(self.reset_models)
button_plugin.toggled.connect(self.update_installed_files)
button_plugin.toggled.connect(self.update_set_flags)
button_plugin.installEventFilter(self)
button_plugin.setObjectName("preview_button")
layout_group.addWidget(button_plugin)
if group.type == "SelectAtMostOne":
button_none = QRadioButton("None")
layout_group.addWidget(button_none)
layout_step.addWidget(group_group)
self.layout_widget.addWidget(group_step)
self.reset_models()
self.update_installed_files()
self.update_set_flags()
self.show()
def update_installed_files(self):
def recurse_add_items(folder, parent):
for boop in listdir(folder): # I was very tired
if isdir(join(folder, boop)):
folder_item = None
existing_folder_ = self.model_files.findItems(boop, Qt.MatchRecursive)
if existing_folder_:
for boopity in existing_folder_:
if boopity.parent() is parent:
folder_item = boopity
break
if not folder_item:
folder_item = self.PreviewItem(
QIcon(join(cur_folder, "resources/logos/logo_folder.png")),
boop
)
folder_item.set_priority(folder_.priority)
parent.appendRow([folder_item, QStandardItem(rel_source), QStandardItem(button.text())])
recurse_add_items(join(folder, boop), folder_item)
elif isfile(join(folder, boop)):
file_item_ = None
existing_file_ = self.model_files.findItems(boop, Qt.MatchRecursive)
if existing_file_:
for boopity in existing_file_:
if boopity.parent() is parent:
if folder_.priority < boopity.priority:
file_item_ = boopity
break
else:
parent.removeRow(boopity.row())
break
if not file_item_:
file_item_ = self.PreviewItem(
QIcon(join(cur_folder, "resources/logos/logo_file.png")),
boop
)
file_item_.set_priority(folder_.priority)
parent.appendRow([file_item_, QStandardItem(rel_source), QStandardItem(button.text())])
for button in self.findChildren((QCheckBox, QRadioButton), "preview_button"):
for folder_ in button.property("folder_list"):
if (button.isChecked() and button.property("type") != "NotUsable" or
folder_.always_install or
folder_.install_usable and button.property("type") != "NotUsable" or
button.property("type") == "Required"):
destination = folder_.destination
abs_source = folder_.abs_source
rel_source = folder_.rel_source
parent_item = self.model_files_root
destination_split = destination.split("/")
if destination_split[0] == ".":
destination_split = destination_split[1:]
for dest_folder in destination_split:
existing_folder_list = self.model_files.findItems(dest_folder, Qt.MatchRecursive)
if existing_folder_list:
for existing_folder in existing_folder_list:
if existing_folder.parent() is parent_item:
parent_item = existing_folder
break
continue
item_ = self.PreviewItem(
QIcon(join(cur_folder, "resources/logos/logo_folder.png")),
dest_folder
)
item_.set_priority(folder_.priority)
parent_item.appendRow([item_, QStandardItem(), QStandardItem(button.text())])
parent_item = item_
if isdir(abs_source):
recurse_add_items(abs_source, parent_item)
for file_ in button.property("file_list"):
if (button.isChecked() and button.property("type") != "NotUsable" or
file_.always_install or
file_.install_usable and button.property("type") != "NotUsable" or
button.property("type") == "Required"):
destination = file_.destination
abs_source = file_.abs_source
rel_source = file_.rel_source
parent_item = self.model_files_root
destination_split = destination.split("/")
if destination_split[0] == ".":
destination_split = destination_split[1:]
for dest_folder in destination_split:
existing_folder_list = self.model_files.findItems(dest_folder, Qt.MatchRecursive)
if existing_folder_list:
for existing_folder in existing_folder_list:
if existing_folder.parent() is parent_item:
parent_item = existing_folder
break
continue
item_ = self.PreviewItem(
QIcon(join(cur_folder, "resources/logos/logo_folder.png")),
dest_folder
)
item_.set_priority(file_.priority)
parent_item.appendRow([item_, QStandardItem(), QStandardItem(button.text())])
parent_item = item_
source_file = abs_source.split("/")[len(abs_source.split("/")) - 1]
file_item = None
existing_file_list = self.model_files.findItems(source_file, Qt.MatchRecursive)
if existing_file_list:
for existing_file in existing_file_list:
if existing_file.parent() is parent_item:
if file_.priority < existing_file.priority:
file_item = existing_file
break
else:
parent_item.removeRow(existing_file.row())
break
if not file_item:
file_item = self.PreviewItem(
QIcon(join(cur_folder, "resources/logos/logo_file.png")),
source_file
)
file_item.set_priority(file_.priority)
parent_item.appendRow([file_item, QStandardItem(rel_source), QStandardItem(button.text())])
self.tree_results.header().resizeSections(QHeaderView.Stretch)
def update_set_flags(self):
for button in self.findChildren((QCheckBox, QRadioButton), "preview_button"):
if button.isChecked():
for flag in button.property("flag_list"):
flag_label = QStandardItem(flag.label)
flag_value = QStandardItem(flag.value)
flag_plugin = QStandardItem(button.text())
existing_flag = self.model_flags.findItems(flag.label)
if existing_flag:
previous_flag_row = existing_flag[0].row()
self.model_flags.removeRow(previous_flag_row)
self.model_flags.insertRow(previous_flag_row, [flag_label, flag_value, flag_plugin])
else:
self.model_flags.appendRow([flag_label, flag_value, flag_plugin])
self.list_flags.header().resizeSections(QHeaderView.Stretch)
class DefaultsSettings(object):
def __init__(self, key, default_enabled, default_value):
self.__enabled = default_enabled
self.__property_key = key
self.__property_value = default_value
def __eq__(self, other):
if self.enabled() == other.enabled() and self.value() == other.value() and self.key() == other.key():
return True
else:
return False
def set_enabled(self, enabled):
self.__enabled = enabled
def set_value(self, value):
self.__property_value = value
def enabled(self):
return self.__enabled
def value(self):
return self.__property_value
def key(self):
return self.__property_key
default_settings = {
"General": {
"code_refresh": 3,
"show_intro": True,
"show_advanced": False,
"tutorial_advanced": True,
},
"Appearance": {
"required_colour": "#ba4d0e",
"atleastone_colour": "#d0d02e",
"either_colour": "#ffaa7f",
"style": "",
"palette": "",
},
"Defaults": {
"installSteps": DefaultsSettings("order", True, "Explicit"),
"optionalFileGroups": DefaultsSettings("order", True, "Explicit"),
"type": DefaultsSettings("name", True, "Optional"),
"defaultType": DefaultsSettings("name", True, "Optional"),
},
"Load": {
"validate": True,
"validate_ignore": False,
"warnings": True,
"warn_ignore": True,
},
"Save": {
"validate": True,
"validate_ignore": False,
"warnings": True,
"warn_ignore": True,
},
"Recent Files": deque(maxlen=5),
}
def generic_errorbox(title, text, detail=""):
"""
A function that creates a generic errorbox with the logo_admin.png logo.
:param title: A string containing the title of the errorbox.
:param text: A string containing the text of the errorbox.
:param detail: Optional. A string containing the detail text of the errorbox.
"""
errorbox = QMessageBox()
errorbox.setText(text)
errorbox.setWindowTitle(title)
errorbox.setDetailedText(detail)
errorbox.setIconPixmap(QPixmap(join(cur_folder, "resources/logos/logo_admin.png")))
return errorbox
def read_settings():
"""
Reads the settings from the ~/.fomod/.designer file. If such a file does not exist it uses the default settings.
The settings are processed to be ready to be used in Python code.
:return: The processed settings.
"""
def deep_merge(a, b, path=None):
"""merges b into a"""
if path is None:
path = []
for key in b:
if key in a: # only accept the keys in default settings
if isinstance(a[key], dict) and isinstance(b[key], dict):
deep_merge(a[key], b[key], path + [str(key)])
elif isinstance(b[key], type(a[key])):
a[key] = b[key]
else:
pass # user has messed with conf files
return a
try:
with open(join(expanduser("~"), ".fomod", ".designer"), "r") as configfile:
settings_dict = decode(configfile.read())
deep_merge(default_settings, settings_dict)
return default_settings
except (FileNotFoundError, JSONDecodeError):
return default_settings
|
GandaG/fomod-designer
|
src/gui.py
|
Python
|
apache-2.0
| 106,062
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"CampaignExperimentTypeEnum",},
)
class CampaignExperimentTypeEnum(proto.Message):
r"""Container for enum describing campaign experiment type.
"""
class CampaignExperimentType(proto.Enum):
r"""Indicates if this campaign is a normal campaign,
a draft campaign, or an experiment campaign.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BASE = 2
DRAFT = 3
EXPERIMENT = 4
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v9/enums/types/campaign_experiment_type.py
|
Python
|
apache-2.0
| 1,239
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import re
import urlparse
import xbmcaddon
import urllib
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
from salts_lib import dom_parser
BASE_URL = 'http://moviestorm.eu'
QUALITY_MAP = {'HD': QUALITIES.HIGH, 'CAM': QUALITIES.LOW, 'BRRIP': QUALITIES.HIGH, 'UNKNOWN': QUALITIES.MEDIUM, 'DVDRIP': QUALITIES.HIGH}
class MovieStorm_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'moviestorm.eu'
def resolve_link(self, link):
if self.base_url in link:
url = urlparse.urljoin(self.base_url, link)
html = self._http_get(url, cache_limit=.5)
match = re.search('class="real_link"\s+href="([^"]+)', html)
if match:
return match.group(1)
else:
return link
def format_source_label(self, item):
label = '[%s] %s (%s views)' % (item['quality'], item['host'], item['views'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
pattern = 'class="source_td">\s*<img[^>]+>\s*(.*?)\s*-\s*\((\d+) views\).*?class="quality_td">\s*(.*?)\s*<.*?href="([^"]+)'
for match in re.finditer(pattern, html, re.DOTALL):
host, views, quality_str, stream_url = match.groups()
hoster = {'multi-part': False, 'host': host.lower(), 'class': self, 'url': stream_url, 'quality': self._get_quality(video, host, QUALITY_MAP.get(quality_str.upper())), 'views': views, 'rating': None, 'direct': False}
hosters.append(hoster)
return hosters
def get_url(self, video):
return super(MovieStorm_Scraper, self)._default_get_url(video)
def _get_episode_url(self, show_url, video):
episode_pattern = 'class="number left".*?href="([^"]+season-%d/episode-%d[^"]+)' % (int(video.season), int(video.episode))
title_pattern = 'class="name left".*?href="([^"]+)">([^<]+)'
airdate_pattern = 'class="edate[^>]+>\s*{p_month}-{p_day}-{year}.*?href="([^"]+)'
return super(MovieStorm_Scraper, self)._default_get_episode_url(show_url, video, episode_pattern, title_pattern, airdate_pattern)
def search(self, video_type, title, year):
results = []
if video_type == VIDEO_TYPES.TVSHOW:
url = urlparse.urljoin(self.base_url, '/series/all/')
html = self._http_get(url, cache_limit=8)
links = dom_parser.parse_dom(html, 'a', {'class': 'underilne'}, 'href')
titles = dom_parser.parse_dom(html, 'a', {'class': 'underilne'})
items = zip(links, titles)
else:
url = urlparse.urljoin(self.base_url, '/search?q=%s&go=Search' % urllib.quote_plus(title))
data = {'q': title, 'go': 'Search'}
html = self._http_get(url, data=data, cache_limit=8)
pattern = 'class="movie_box.*?href="([^"]+).*?<h1>([^<]+)'
items = re.findall(pattern, html)
norm_title = self._normalize_title(title)
for item in items:
url, match_title = item
if norm_title in self._normalize_title(match_title):
result = {'url': url.replace(self.base_url, ''), 'title': match_title, 'year': ''}
results.append(result)
return results
def _http_get(self, url, data=None, cache_limit=8):
return super(MovieStorm_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=cache_limit)
|
aplicatii-romanesti/allinclusive-kodi-pi
|
.kodi/addons/plugin.video.salts/scrapers/moviestorm_scraper.py
|
Python
|
apache-2.0
| 4,745
|
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default Hyperparameter configuration."""
import ml_collections
def get_config():
"""Get the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
# Path to load or store sentencepiece vocab file.
config.vocab_path = None
# Vocabulary size if `vocab_path` is not given.
config.vocab_size = 30_000
config.max_corpus_chars = 10**7
# Name of TFDS translation dataset to use.
config.dataset_name = "lm1b"
# Optional name of TFDS translation dataset to use for evaluation.
config.eval_dataset_name = "lm1b"
config.eval_split = "test"
# Per device batch size for training.
config.per_device_batch_size = 32
# Per device batch size for training.
config.eval_per_device_batch_size = 32
# Sampling temperature for language model inference.
config.sampling_temperature = 0.6
# Top k cutoff for logit sampling. If 0 then no top-k cutoff is used.
config.sampling_top_k = 20
config.num_train_steps = 500_000
# Number of steps to take during evaluation. Large enough to evaluate all.
# Large enough to evaluate all samples: 306_688 / (32 * 8) = 1198
config.num_eval_steps = 2_000
# Number of steps to generate predictions.
# -1 will use the whole eval dataset.
config.num_predict_steps = -1
# Base learning rate.
config.learning_rate = 0.0016
# Linear learning rate warmup.
config.warmup_steps = 1000
# Cross entropy loss label smoothing.
config.label_smoothing = 0.0
# Decay factor for AdamW style weight decay.
config.weight_decay = 0.1
# Maximum length cutoff for training examples.
config.max_target_length = 128
# Maximum length cutoff for eval examples.
config.max_eval_target_length = 512
# Maximum length cutoff for predicted tokens.
config.max_predict_length = 50
# Final logit transform uses embedding matrix transpose.
config.logits_via_embedding = False
# Number of transformer layers.
config.num_layers = 6
# Size of query/key/value for attention.
config.qkv_dim = 512
# Size of embeddings.
config.emb_dim = 512
# Size of the MLP.
config.mlp_dim = 2048
# Number of attention heads.
config.num_heads = 8
# Dropout rate.
config.dropout_rate = 0.1
# Attention dropout rate.
config.attention_dropout_rate = 0.1
# Whether to save model checkpoints.
config.save_checkpoints = True
# Whether to restore from existing model checkpoints.
config.restore_checkpoints = True
# Save a checkpoint every these number of steps.
config.checkpoint_every_steps = 10_000
# Frequency of eval during training, e.g. every 1_000 steps.
config.eval_every_steps = 1_000
# Use bfloat16 mixed precision training instead of float32.
config.use_bfloat16 = True
# Integer for PRNG random seed.
config.seed = 0
# Prompt for language model sampling.
config.prompts = "I love to "
return config
|
google/flax
|
examples/lm1b/configs/default.py
|
Python
|
apache-2.0
| 3,443
|
# -*- coding: utf-8 -*-
#
# Read the Docs Template documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 26 14:19:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
from subprocess import call, Popen, PIPE
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
from repo_util import run_cmd_get_output
# -- Run DoxyGen to prepare XML for Sphinx---------------------------------
# ref. https://github.com/rtfd/readthedocs.org/issues/388
call('doxygen')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe', 'link-roles']
# Breathe extension variables
breathe_projects = { "esp32-idf": "xml/" }
breathe_default_project = "esp32-idf"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ESP-IDF Programming Guide'
copyright = u'2016 - 2017, Espressif'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Readthedocs largely ignores 'version' and 'release', and displays one of
# 'latest', tag name, or branch name, depending on the build type.
# Still, this is useful for non-RTD builds.
# This is supposed to be "the short X.Y version", but it's the only version
# visible when you open index.html.
# Display full version to make things less confusing.
version = run_cmd_get_output('git describe')
# The full version, including alpha/beta/rc tags.
# If needed, nearest tag is returned by 'git describe --abbrev=0'.
release = version
print 'Version: {0} Release: {1}'.format(version, release)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
try:
import sphinx_rtd_theme
except ImportError:
html_theme = 'default'
else:
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReadtheDocsTemplatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ReadtheDocsTemplate.tex', u'Read the Docs Template Documentation',
u'Read the Docs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'readthedocstemplate', u'Read the Docs Template Documentation',
[u'Read the Docs'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ReadtheDocsTemplate', u'Read the Docs Template Documentation',
u'Read the Docs', 'ReadtheDocsTemplate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Use sphinx_rtd_theme for local builds --------------------------------
# ref. https://github.com/snide/sphinx_rtd_theme#using-this-theme-locally-then-building-on-read-the-docs
#
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
html_context = {
'display_github': True,
'github_user': 'tidyjiang8',
'github_repo': 'esp-idf-zh',
'github_version': 'zh/docs/'
}
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
tidyjiang8/esp-idf-zh
|
docs/conf.py
|
Python
|
apache-2.0
| 10,048
|
"""
Copyright 2015 herd contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
herdcontentloader
The herd data management module for populating content to UDC
"""
from setuptools import setup, find_packages
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
setup(
name="herdcl",
version="@@Version@@",
description="herd data management",
maintainer="FINRA",
maintainer_email="herd@finra.org",
license="http://www.apache.org/licenses/LICENSE-2.0",
url="https://github.com/FINRAOS/herd",
keywords=["herd", "dm"],
install_requires=[
"herdsdk", "pandas == 1.2.3", "openpyxl", "boto3 >= 1.9.50", "xlsxwriter >= 1.1.0", "requests"
],
packages=find_packages(),
include_package_data=True,
long_description="""\
The herd data management module for populating content to UDC
"""
)
|
FINRAOS/herd
|
herd-code/herd-tools/herd-content-loader/setup.py
|
Python
|
apache-2.0
| 1,454
|
import os
from kivy.properties import StringProperty, AliasProperty, NumericProperty, BooleanProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
Builder.load_file('gui/expandselector.kv')
class ExpandSelector(BoxLayout):
"""ExpandSelector is the superclass for all expanding selectors.
Expanding selectors has a line where a value can be edited, but also a list view where values can be selected."""
selector = None
"""The selector widget"""
caption = None
"""The caption of the selector"""
selection = None
"""The current selection"""
selector_height = None
"""The height of the selector expanded"""
input_height = None
"""The height of the input field"""
font_size = None
"""The size of the font"""
on_status = None
"""Triggered on status change"""
def do_on_status(self, _message):
"""Trigger the on_status event"""
if self.on_status is not None:
self.on_status(_message)
def __init__(self, **kwargs):
"""Constructor"""
super(ExpandSelector, self).__init__(**kwargs)
def init_selector(self, _height):
"""Things to do when initializing the selector"""
pass
def after_hide_selector(self):
"""Implement to add things to do after the selector has been hidden"""
pass
def recalc_layout(self):
"""Recalculate the height of the component"""
self.height = self.calc_child_height(self)
def switch_selector(self):
"""Hide selector if visible, show if hidden"""
self.do_on_status("")
if not self.selector:
self.selector = self.init_selector(self.selector_height)
if self.selector:
self.selector.path = self.ids.input_selection.text
if self.selector.parent:
self.selector.parent.remove_widget(self.selector)
self.after_hide_selector()
else:
self.add_widget(self.selector)
self.recalc_layout()
def calc_child_height(self, _widget):
"""Set the total height to the combined height of the children"""
_result = 0
for _curr_child in _widget.children:
_result+= _curr_child.height
return _result
def set_path(self, _path):
"""
Set the selector path to _path
:param _path: A string containing a valid path
:return:
"""
try:
self.selector.path = _path
except Exception as e:
self.do_on_status(str(e))
return False
return True
def set_selector_text(self, *args):
"""If an item is chosen, reflect that in the input field"""
if self.selector and len(self.selector.selection) > 0:
_selection = self.selector.selection[0]
if _selection[0:2] == "..":
# If the "../" item has been clicked, go up one level.
_resulting_path = os.path.split(self.selector.path)[0]
else:
# Otherwise, just set path
_resulting_path = os.path.normpath(_selection)
if self.set_path(_resulting_path):
# Path accepted and set, reflect in input
self.ids.input_selection.text = self.selector.path
def _set_caption(self, _value):
"""Set the caption of the selector"""
self.ids.selector_label.text = _value
def _get_caption(self):
"""Get the caption of the selector"""
return self.ids.selector_label.text
def _set_selection(self, _value):
"""Set the selection of the selector"""
self.ids.input_selection.text = _value
def _get_selection(self):
"""Get the selection of the selector"""
return self.ids.input_selection.text
caption = AliasProperty(_get_caption, _set_caption)
selection = AliasProperty(_get_selection, _set_selection)
selector_height = NumericProperty(200)
input_height = NumericProperty(60)
font_size = NumericProperty(20)
|
OptimalBPM/optimal_file_sync
|
gui/expandselector.py
|
Python
|
apache-2.0
| 4,077
|
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import fixture as fixture_config
from oslotest import base
from ceilometer import neutron_client
class TestNeutronClient(base.BaseTestCase):
def setUp(self):
super(TestNeutronClient, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.nc = neutron_client.Client(self.CONF)
self.nc.lb_version = 'v1'
@staticmethod
def fake_ports_list():
return {'ports':
[{'admin_state_up': True,
'device_id': '674e553b-8df9-4321-87d9-93ba05b93558',
'device_owner': 'network:router_gateway',
'extra_dhcp_opts': [],
'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442',
'mac_address': 'fa:16:3e:c5:35:93',
'name': '',
'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b',
'status': 'ACTIVE',
'tenant_id': '89271fa581ab4380bf172f868c3615f9'},
]}
def test_port_get_all(self):
with mock.patch.object(self.nc.client, 'list_ports',
side_effect=self.fake_ports_list):
ports = self.nc.port_get_all()
self.assertEqual(1, len(ports))
self.assertEqual('96d49cc3-4e01-40ce-9cac-c0e32642a442',
ports[0]['id'])
@staticmethod
def fake_networks_list():
return {'networks':
[{'admin_state_up': True,
'id': '298a3088-a446-4d5a-bad8-f92ecacd786b',
'name': 'public',
'provider:network_type': 'gre',
'provider:physical_network': None,
'provider:segmentation_id': 2,
'router:external': True,
'shared': False,
'status': 'ACTIVE',
'subnets': [u'c4b6f5b8-3508-4896-b238-a441f25fb492'],
'tenant_id': '62d6f08bbd3a44f6ad6f00ca15cce4e5'},
]}
@staticmethod
def fake_pool_list():
return {'pools': [{'status': 'ACTIVE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'status_description': None,
'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
]}
def test_pool_list(self):
with mock.patch.object(self.nc.client, 'list_pools',
side_effect=self.fake_pool_list):
pools = self.nc.pool_get_all()
self.assertEqual(1, len(pools))
self.assertEqual('ce73ad36-437d-4c84-aee1-186027d3da9a',
pools[0]['id'])
@staticmethod
def fake_vip_list():
return {'vips': [{'status': 'ACTIVE',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.2',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip'},
]}
def test_vip_list(self):
with mock.patch.object(self.nc.client, 'list_vips',
side_effect=self.fake_vip_list):
vips = self.nc.vip_get_all()
self.assertEqual(1, len(vips))
self.assertEqual('cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
vips[0]['id'])
@staticmethod
def fake_member_list():
return {'members': [{'status': 'ACTIVE',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.3',
'status_description': None,
'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'},
]}
def test_member_list(self):
with mock.patch.object(self.nc.client, 'list_members',
side_effect=self.fake_member_list):
members = self.nc.member_get_all()
self.assertEqual(1, len(members))
self.assertEqual('290b61eb-07bc-4372-9fbf-36459dd0f96b',
members[0]['id'])
@staticmethod
def fake_monitors_list():
return {'health_monitors':
[{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365',
'admin_state_up': True,
'tenant_id': "d5d2817dae6b42159be9b665b64beb0e",
'delay': 2,
'max_retries': 5,
'timeout': 5,
'pools': [],
'type': 'PING',
}]}
def test_monitor_list(self):
with mock.patch.object(self.nc.client, 'list_health_monitors',
side_effect=self.fake_monitors_list):
monitors = self.nc.health_monitor_get_all()
self.assertEqual(1, len(monitors))
self.assertEqual('34ae33e1-0035-49e2-a2ca-77d5d3fab365',
monitors[0]['id'])
@staticmethod
def fake_pool_stats(fake_pool):
return {'stats':
[{'active_connections': 1,
'total_connections': 2,
'bytes_in': 3,
'bytes_out': 4
}]}
def test_pool_stats(self):
with mock.patch.object(self.nc.client, 'retrieve_pool_stats',
side_effect=self.fake_pool_stats):
stats = self.nc.pool_stats('fake_pool')['stats']
self.assertEqual(1, len(stats))
self.assertEqual(1, stats[0]['active_connections'])
self.assertEqual(2, stats[0]['total_connections'])
self.assertEqual(3, stats[0]['bytes_in'])
self.assertEqual(4, stats[0]['bytes_out'])
def test_v1_list_loadbalancer_returns_empty_list(self):
self.assertEqual([], self.nc.list_loadbalancer())
def test_v1_list_listener_returns_empty_list(self):
self.assertEqual([], self.nc.list_listener())
|
ityaptin/ceilometer
|
ceilometer/tests/unit/test_neutronclient.py
|
Python
|
apache-2.0
| 7,903
|
# -*- coding: utf-8 -*-
"""
mygeotab.exceptions
~~~~~~~~~~~~~~~~~~~
Exceptions thrown by the MyGeotab API.
"""
class MyGeotabException(IOError):
"""There was an exception while handling your call."""
def __init__(self, full_error, *args, **kwargs):
"""Initialize MyGeotabException with the full error from the server.
:param full_error: The full JSON-decoded error.
"""
self._full_error = full_error
main_error = full_error["errors"][0]
self.name = main_error["name"]
self.message = main_error["message"]
self.data = main_error.get("data")
self.stack_trace = main_error.get("stackTrace")
super(MyGeotabException, self).__init__(self.message, *args, **kwargs)
def __str__(self):
error_str = "{0}\n{1}".format(self.name, self.message)
if self.stack_trace:
error_str += "\n\nStacktrace:\n{0}".format(self.stack_trace)
return error_str
class AuthenticationException(IOError):
"""Unsuccessful authentication with the server."""
def __init__(self, username, database, server, *args, **kwargs):
"""Initialize AuthenticationException with username, database, and server.
:param username: The username used for MyGeotab servers. Usually an email address.
:param database: The database or company name.
:param server: The server ie. my23.geotab.com.
"""
self.username = username
self.database = database
self.server = server
super(AuthenticationException, self).__init__(self.message, *args, **kwargs)
def __str__(self):
return self.message
@property
def message(self):
"""The exception message."""
return "Cannot authenticate '{0} @ {1}/{2}'".format(self.username, self.server, self.database)
class TimeoutException(IOError):
"""The request timed out while handling your request."""
def __init__(self, server, *args, **kwargs):
"""Initialize TimeoutException with the server name.
:param server: The server ie. my23.geotab.com.
"""
self.server = server
super(TimeoutException, self).__init__(self.message, *args, **kwargs)
def __str__(self):
return self.message
@property
def message(self):
"""The excepton message."""
return "Request timed out @ {0}".format(self.server)
|
Geotab/mygeotab-python
|
mygeotab/exceptions.py
|
Python
|
apache-2.0
| 2,410
|
from subprocess import check_output
import pytest
from django.conf import settings
from django.contrib.auth.models import Group
@pytest.mark.django_db
def test_all_migration_files_have_been_created():
check_output(
["python", "manage.py", "makemigrations", "--dry-run", "--check"]
)
@pytest.mark.django_db
@pytest.mark.parametrize(
"group",
[
settings.REGISTERED_AND_ANON_USERS_GROUP_NAME,
settings.REGISTERED_USERS_GROUP_NAME,
],
)
def test_all_users_group_exists(group):
assert Group.objects.get(name=group)
|
comic/comic-django
|
app/tests/core_tests/test_migrations.py
|
Python
|
apache-2.0
| 563
|
import web_client_test
|
lukejharmon/tangelohub
|
test/__init__.py
|
Python
|
apache-2.0
| 23
|
#!/usr/bin/env python
#
# Copyright 2012 BloomReach, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Super S3 command line tool, setup.py
"""
from setuptools import setup, find_packages
__author__ = "Chou-han Yang"
__copyright__ = "Copyright 2014 BloomReach, Inc."
__license__ = "http://www.apache.org/licenses/LICENSE-2.0"
__version__ = "1.5.20"
__maintainer__ = __author__
__status__ = "Development"
setup(name='s4cmd',
version=__version__,
description='Super S3 command line tool',
author=__author__,
license=__license__,
url='https://github.com/bloomreach/s4cmd',
py_modules=['s4cmd'],
scripts=['s4cmd', 's4cmd.py'], # Added s4cmd.py as script for backward compatibility
install_requires=['boto>=2.3.0'],
)
|
jephdo/s4cmd
|
setup.py
|
Python
|
apache-2.0
| 1,277
|
import io
import tempfile
import tarfile
import os.path
from requests.utils import urlparse
from cnrclient.client import CnrClient
import cnrclient.pack as packager
class KubBase(object):
media_type = NotImplementedError
target = NotImplementedError
kub_class = NotImplementedError
manifest_file = []
cnr_client = CnrClient
def __init__(self, name, version='default', endpoint=None):
self._deploy_name = name
self.endpoint = endpoint
self._registry = self.cnr_client(endpoint=self.endpoint)
self._deploy_version = version
self._package = None
self._manifest = None
@property
def package(self):
if self._package is None:
result = self._fetch_package()
self._package = packager.CnrPackage(result, b64_encoded=False)
return self._package
def _create_manifest(self):
raise NotImplementedError
@property
def manifest(self):
if self._manifest is None:
self._manifest = self._create_manifest()
return self._manifest
def __unicode__(self):
return ("(<{class_name}({name}=={version})>".format(class_name=self.__class__.__name__,
name=self.name, version=self.version))
def __str__(self):
return self.__unicode__().encode('utf-8')
def __repr__(self):
return self.__str__()
@property
def author(self):
pass
@property
def version(self):
return self.manifest.version
@property
def description(self):
pass
@property
def name(self):
return self.manifest.name
@property
def variables(self):
pass
def _fetch_package(self):
parse = urlparse(self._deploy_name)
if parse.scheme in ["http", "https"]:
# @TODO
pass
elif parse.scheme == "file":
parts = parse.path.split("/")
_, ext = os.path.splitext(parts[-1])
if ext == ".gz":
filepath = parse.path
else:
filepath = tempfile.NamedTemporaryFile().name
packager.pack_kub(filepath)
with open(filepath, "rb") as tarf:
return tarf.read()
else:
return self._registry.pull(self._deploy_name, self._deploy_version, self.media_type)
def make_tarfile(self, source_dir):
output = io.BytesIO()
with tarfile.open(fileobj=output, mode="w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
return output
|
app-registry/appr-cli
|
cnrclient/formats/kub_base.py
|
Python
|
apache-2.0
| 2,636
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone import config
from keystone import exception
from keystone import service
from keystone import test
import default_fixtures
CONF = config.CONF
def _build_user_auth(token=None, user_id=None, username=None,
password=None, tenant_id=None, tenant_name=None):
"""Build auth dictionary.
It will create an auth dictionary based on all the arguments
that it receives.
"""
auth_json = {}
if token is not None:
auth_json['token'] = token
if username or password:
auth_json['passwordCredentials'] = {}
if username is not None:
auth_json['passwordCredentials']['username'] = username
if user_id is not None:
auth_json['passwordCredentials']['userId'] = user_id
if password is not None:
auth_json['passwordCredentials']['password'] = password
if tenant_name is not None:
auth_json['tenantName'] = tenant_name
if tenant_id is not None:
auth_json['tenantId'] = tenant_id
return auth_json
class AuthTest(test.TestCase):
def setUp(self):
super(AuthTest, self).setUp()
CONF.identity.driver = 'keystone.identity.backends.kvs.Identity'
self.load_backends()
self.load_fixtures(default_fixtures)
self.api = service.TokenController()
def test_authenticate_user_id_too_large(self):
"""Verify sending large 'userId' raises the right exception."""
body_dict = _build_user_auth(user_id='0' * 65, username='FOO',
password='foo2')
self.assertRaises(exception.ValidationSizeError, self.api.authenticate,
{}, body_dict)
def test_authenticate_username_too_large(self):
"""Verify sending large 'username' raises the right exception."""
body_dict = _build_user_auth(username='0' * 65, password='foo2')
self.assertRaises(exception.ValidationSizeError, self.api.authenticate,
{}, body_dict)
def test_authenticate_tenant_id_too_large(self):
"""Verify sending large 'tenantId' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_id='0' * 65)
self.assertRaises(exception.ValidationSizeError, self.api.authenticate,
{}, body_dict)
def test_authenticate_tenant_name_too_large(self):
"""Verify sending large 'tenantName' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='0' * 65)
self.assertRaises(exception.ValidationSizeError, self.api.authenticate,
{}, body_dict)
def test_authenticate_token_too_large(self):
"""Verify sending large 'token' raises the right exception."""
body_dict = _build_user_auth(token={'id': '0' * 8193})
self.assertRaises(exception.ValidationSizeError, self.api.authenticate,
{}, body_dict)
def test_authenticate_password_too_large(self):
"""Verify sending large 'password' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='0' * 8193)
self.assertRaises(exception.ValidationSizeError, self.api.authenticate,
{}, body_dict)
|
cbrucks/keystone_ldap
|
tests/test_service.py
|
Python
|
apache-2.0
| 4,005
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift import gettext_ as _
from urllib import unquote
import time
from swift.common.utils import public, csv_append, Timestamp
from swift.common.constraints import check_metadata
from swift.common import constraints
from swift.common.http import HTTP_ACCEPTED, is_success
from swift.proxy.controllers.base import Controller, delay_denial, \
cors_validation, clear_info_cache
from swift.common.storage_policy import POLICIES
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
HTTPNotFound
class ContainerController(Controller):
"""WSGI controller for container requests"""
server_type = 'Container'
# Ensure these are all lowercase
pass_through_headers = ['x-container-read', 'x-container-write',
'x-container-sync-key', 'x-container-sync-to',
'x-versions-location']
def __init__(self, app, account_name, container_name, **kwargs):
Controller.__init__(self, app)
self.account_name = unquote(account_name)
self.container_name = unquote(container_name)
def _x_remove_headers(self):
st = self.server_type.lower()
return ['x-remove-%s-read' % st,
'x-remove-%s-write' % st,
'x-remove-versions-location']
def _convert_policy_to_index(self, req):
"""
Helper method to convert a policy name (from a request from a client)
to a policy index (for a request to a backend).
:param req: incoming request
"""
policy_name = req.headers.get('X-Storage-Policy')
if not policy_name:
return
policy = POLICIES.get_by_name(policy_name)
if not policy:
raise HTTPBadRequest(request=req,
content_type="text/plain",
body=("Invalid %s '%s'"
% ('X-Storage-Policy', policy_name)))
if policy.is_deprecated:
body = 'Storage Policy %r is deprecated' % (policy.name)
raise HTTPBadRequest(request=req, body=body)
return int(policy)
def clean_acls(self, req):
if 'swift.clean_acl' in req.environ:
for header in ('x-container-read', 'x-container-write'):
if header in req.headers:
try:
req.headers[header] = \
req.environ['swift.clean_acl'](header,
req.headers[header])
except ValueError as err:
return HTTPBadRequest(request=req, body=str(err))
return None
def GETorHEAD(self, req):
"""Handler for HTTP GET/HEAD requests."""
if not self.account_info(self.account_name, req)[1]:
return HTTPNotFound(request=req)
part = self.app.container_ring.get_part(
self.account_name, self.container_name)
resp = self.GETorHEAD_base(
req, _('Container'), self.app.container_ring, part,
req.swift_entity_path)
if 'swift.authorize' in req.environ:
req.acl = resp.headers.get('x-container-read')
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not req.environ.get('swift_owner', False):
for key in self.app.swift_owner_headers:
if key in resp.headers:
del resp.headers[key]
return resp
@public
@delay_denial
@cors_validation
def GET(self, req):
"""Handler for HTTP GET requests."""
return self.GETorHEAD(req)
@public
@delay_denial
@cors_validation
def HEAD(self, req):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD(req)
@public
@cors_validation
def PUT(self, req):
"""HTTP PUT request handler."""
error_response = \
self.clean_acls(req) or check_metadata(req, 'container')
if error_response:
return error_response
policy_index = self._convert_policy_to_index(req)
if not req.environ.get('swift_owner'):
for key in self.app.swift_owner_headers:
req.headers.pop(key, None)
if len(self.container_name) > constraints.MAX_CONTAINER_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Container name length of %d longer than %d' % \
(len(self.container_name),
constraints.MAX_CONTAINER_NAME_LENGTH)
return resp
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts and self.app.account_autocreate:
self.autocreate_account(req, self.account_name)
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
if self.app.max_containers_per_account > 0 and \
container_count >= self.app.max_containers_per_account and \
self.account_name not in self.app.max_containers_whitelist:
container_info = \
self.container_info(self.account_name, self.container_name,
req)
if not is_success(container_info.get('status')):
resp = HTTPForbidden(request=req)
resp.body = 'Reached container limit of %s' % \
self.app.max_containers_per_account
return resp
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self._backend_requests(req, len(containers),
account_partition, accounts,
policy_index)
clear_info_cache(self.app, req.environ,
self.account_name, self.container_name)
resp = self.make_requests(
req, self.app.container_ring,
container_partition, 'PUT', req.swift_entity_path, headers)
return resp
@public
@cors_validation
def POST(self, req):
"""HTTP POST request handler."""
error_response = \
self.clean_acls(req) or check_metadata(req, 'container')
if error_response:
return error_response
if not req.environ.get('swift_owner'):
for key in self.app.swift_owner_headers:
req.headers.pop(key, None)
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ,
self.account_name, self.container_name)
resp = self.make_requests(
req, self.app.container_ring, container_partition, 'POST',
req.swift_entity_path, [headers] * len(containers))
return resp
@public
@cors_validation
def DELETE(self, req):
"""HTTP DELETE request handler."""
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self._backend_requests(req, len(containers),
account_partition, accounts)
clear_info_cache(self.app, req.environ,
self.account_name, self.container_name)
resp = self.make_requests(
req, self.app.container_ring, container_partition, 'DELETE',
req.swift_entity_path, headers)
# Indicates no server had the container
if resp.status_int == HTTP_ACCEPTED:
return HTTPNotFound(request=req)
return resp
def _backend_requests(self, req, n_outgoing, account_partition, accounts,
policy_index=None):
additional = {'X-Timestamp': Timestamp(time.time()).internal}
if policy_index is None:
additional['X-Backend-Storage-Policy-Default'] = \
int(POLICIES.default)
else:
additional['X-Backend-Storage-Policy-Index'] = str(policy_index)
headers = [self.generate_request_headers(req, transfer=True,
additional=additional)
for _junk in range(n_outgoing)]
for i, account in enumerate(accounts):
i = i % len(headers)
headers[i]['X-Account-Partition'] = account_partition
headers[i]['X-Account-Host'] = csv_append(
headers[i].get('X-Account-Host'),
'%(ip)s:%(port)s' % account)
headers[i]['X-Account-Device'] = csv_append(
headers[i].get('X-Account-Device'),
account['device'])
return headers
|
heemanshu/swift_juno
|
swift/proxy/controllers/container.py
|
Python
|
apache-2.0
| 9,982
|
# -*- coding:utf8 -*-
# !/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") == "map":
yql_url = makeYqlQuery(req)
if yql_url is None:
return {}
result = urlopen(yql_url).read()
data = json.loads(result)
place_id_1=data['results'][0]['place_id']
yql_url_1=makeYqlQuery1(req,place_id_1)
if yql_url_1 is None:
return {}
result_1 = urlopen(yql_url_1).read()
data_1 = json.loads(result_1)
res = makeWebhookResult(data,data_1)
return res
if req.get("result").get("action") == "age":
number1 = req.get("result").get("parameters").get("number")
res = age(number1)
return res
if req.get("result").get("action") == "weight":
number2 = req.get("result").get("parameters").get("unit-weight").get("amount")
res = weight(number2)
return res
if req.get("result").get("action") == "months":
a = req.get("result").get("parameters").get("boo")
res = months(a)
return res
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
item = parameters.get("itemssss")
cityarr=city.split(" ")
itemarr=item.split(" ")
if city is None:
return None
url_1="https://maps.googleapis.com/maps/api/place/textsearch/json?query="
url_1=url_1 + cityarr[0]
c=len(cityarr)
for i in range(1,c):
url_1=url_1+ '+' + cityarr[i]
for i in itemarr:
url_1 = url_1 + '+' + i
url_1=url_1+ '+'+"office"+"&key=" +"AIzaSyDfiyv5MZ0uubTkHw1oq9bkK3DXJo5uVtU"
return url_1
def makeYqlQuery1(req,place_id_1):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
item = parameters.get("itemssss")
cityarr=city.split(" ")
itemarr=item.split(" ")
if city is None:
return None
'''url_1="https://maps.googleapis.com/maps/api/place/textsearch/json?query="
url_1=url_1 + cityarr[0]
c=len(cityarr)
for i in range(1,c):
url_1=url_1+ '+' + cityarr[i]
for i in itemarr:
url_1 = url_1 + '+' + i
url_1=url_1+ '+'+"office"+"&key=" +"AIzaSyDfiyv5MZ0uubTkHw1oq9bkK3DXJo5uVtU"
return url_1'''
url_2="https://maps.googleapis.com/maps/api/place/details/json?placeid="+place_id_1+"&key=AIzaSyDfiyv5MZ0uubTkHw1oq9bkK3DXJo5uVtU"
return url_2
def makeWebhookResult(data,data_1):
#results = data.get('results')
#if results is None:
# return {}
formatted_address_1 = data['results'][0]['formatted_address']
name = data['results'][0]['name']
if formatted_address_1 is None:
return {}
place_id_1=data['results'][0]['place_id']
if place_id_1 is None:
return {}
phone=data_1['result']['formatted_phone_number']
#item = channel.get('item')
#location = channel.get('location')
#units = channel.get('units')
#if (location is None) or (item is None) or (units is None):
# return {}
#condition = item.get('condition')
#if condition is None:
# return {}
# print(json.dumps(item, indent=4))
speech = name + " \n Address is: " + formatted_address_1 +"\n \n Phone number: "+phone
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "https://github.com/ranjan1110/google-map"
}
def age(num):
if num<18 or num >60:
speech = "You don't fall into required age criteria to donate blood."
else :
speech = "What is Your Weight?"
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "https://github.com/ranjan1110/google-map"
}
def weight(num):
if num<50 :
speech = "You are Underweight to donate blood. See you next time!"
else :
speech = "Have You Donated blood in past three Months?"
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "https://github.com/ranjan1110/google-map"
}
def months(a):
if a=="yes" :
speech = "Sorry! You Can't donate blood. See you next time."
if a=="no" :
speech = "A person can only donate if:" + "\n \n 1) Never has been tested HIV positive." + "\n \n 2) Not suffering from ailments like cardiac arrest, hypertension, blood pressure, cancer, epilepsy, kidney ailments and diabetes." + " \n \n 3) Hasn't undergone ear body piercing or tattoo in the past 6 months." + " \n \n 4) Haven't undergone immunization in the past 1 month." + " \n \n 5) Not treated for rabies or received Hepatitis B vaccine in the past 6 months." + " \n \n 6) Hasn't consumed alcohol in the past 24 hours." + " \n \n 7) Haven't had fits, tuberculosis, asthma and allergic disorders in the past." + " \n \n 8) Haven't undergone major dental procedures or general surgeries in the past 1 month." + " \n \n 9)In case of female donors :" + "\n \n \t (i) Haven't had miscarriage in the past 6 months." + "\n \n \t (ii) Not pregnant or breastfeeding." + " \n \n Do you pass these conditions?"
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "https://github.com/ranjan1110/google-map"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
|
harshit1504/hs
|
app.py
|
Python
|
apache-2.0
| 7,265
|
# Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import json
import mock
import sqlalchemy as sa
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import models
from mistral import exceptions as exc
from mistral.rpc import clients as rpc
from mistral.tests.unit.api import base
from mistral.workflow import data_flow
from mistral.workflow import states
# TODO(everyone): later we need additional tests verifying all the errors etc.
RESULT = {"some": "result"}
PUBLISHED = {"var": "val"}
RUNTIME_CONTEXT = {
'triggered_by': [
{
'task_id': '123-123-123',
'event': 'on-success'
}
]
}
WF_EX = models.WorkflowExecution(
id='abc',
workflow_name='some',
description='execution description.',
spec={'name': 'some'},
state=states.RUNNING,
state_info=None,
input={'foo': 'bar'},
output={},
params={'env': {'k1': 'abc'}},
created_at=datetime.datetime(1970, 1, 1),
updated_at=datetime.datetime(1970, 1, 1)
)
TASK_EX = models.TaskExecution(
id='123',
name='task',
workflow_name='flow',
workflow_id='123e4567-e89b-12d3-a456-426655441111',
spec={
'type': 'direct',
'version': '2.0',
'name': 'task'
},
action_spec={},
state=states.RUNNING,
tags=['a', 'b'],
in_context={},
runtime_context=RUNTIME_CONTEXT,
workflow_execution_id=WF_EX.id,
created_at=datetime.datetime(1970, 1, 1),
updated_at=datetime.datetime(1970, 1, 1),
published=PUBLISHED,
processed=True
)
WITH_ITEMS_TASK_EX = models.TaskExecution(
id='123',
name='task',
workflow_name='flow',
workflow_id='123e4567-e89b-12d3-a456-426655441111',
spec={
'type': 'direct',
'version': '2.0',
'name': 'task',
'with-items': 'var in [1, 2, 3]'
},
action_spec={},
state=states.RUNNING,
tags=['a', 'b'],
in_context={},
runtime_context=RUNTIME_CONTEXT,
workflow_execution_id=WF_EX.id,
created_at=datetime.datetime(1970, 1, 1),
updated_at=datetime.datetime(1970, 1, 1),
published=PUBLISHED,
processed=True
)
TASK = {
'id': '123',
'name': 'task',
'workflow_name': 'flow',
'workflow_id': '123e4567-e89b-12d3-a456-426655441111',
'state': 'RUNNING',
'workflow_execution_id': WF_EX.id,
'created_at': '1970-01-01 00:00:00',
'updated_at': '1970-01-01 00:00:00',
'result': json.dumps(RESULT),
'published': json.dumps(PUBLISHED),
'runtime_context': json.dumps(RUNTIME_CONTEXT),
'processed': True
}
TASK_WITHOUT_RESULT = copy.deepcopy(TASK)
del TASK_WITHOUT_RESULT['result']
UPDATED_TASK_EX = copy.deepcopy(TASK_EX)
UPDATED_TASK_EX['state'] = 'SUCCESS'
UPDATED_TASK = copy.deepcopy(TASK)
UPDATED_TASK['state'] = 'SUCCESS'
ERROR_TASK_EX = copy.deepcopy(TASK_EX)
ERROR_TASK_EX['state'] = 'ERROR'
ERROR_ITEMS_TASK_EX = copy.deepcopy(WITH_ITEMS_TASK_EX)
ERROR_ITEMS_TASK_EX['state'] = 'ERROR'
ERROR_TASK = copy.deepcopy(TASK)
ERROR_TASK['state'] = 'ERROR'
BROKEN_TASK = copy.deepcopy(TASK)
RERUN_TASK = {
'id': '123',
'state': 'RUNNING'
}
MOCK_WF_EX = mock.MagicMock(return_value=WF_EX)
MOCK_TASK = mock.MagicMock(return_value=TASK_EX)
MOCK_TASKS = mock.MagicMock(return_value=[TASK_EX])
MOCK_EMPTY = mock.MagicMock(return_value=[])
MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError())
MOCK_ERROR_TASK = mock.MagicMock(return_value=ERROR_TASK_EX)
MOCK_ERROR_ITEMS_TASK = mock.MagicMock(return_value=ERROR_ITEMS_TASK_EX)
TASK_EX_WITH_PROJECT_ID = TASK_EX.get_clone()
TASK_EX_WITH_PROJECT_ID.project_id = '<default-project>'
@mock.patch.object(
data_flow,
'get_task_execution_result', mock.Mock(return_value=RESULT)
)
class TestTasksController(base.APITest):
@mock.patch.object(db_api, 'get_task_execution', MOCK_TASK)
def test_get(self):
resp = self.app.get('/v2/tasks/123')
self.assertEqual(200, resp.status_int)
self.assertDictEqual(TASK, resp.json)
@mock.patch.object(db_api, 'get_task_execution')
def test_get_operational_error(self, mocked_get):
mocked_get.side_effect = [
# Emulating DB OperationalError
sa.exc.OperationalError('Mock', 'mock', 'mock'),
TASK_EX # Successful run
]
resp = self.app.get('/v2/tasks/123')
self.assertEqual(200, resp.status_int)
self.assertDictEqual(TASK, resp.json)
@mock.patch.object(db_api, 'get_task_execution', MOCK_NOT_FOUND)
def test_get_not_found(self):
resp = self.app.get('/v2/tasks/123', expect_errors=True)
self.assertEqual(404, resp.status_int)
@mock.patch.object(db_api, 'get_task_executions', MOCK_TASKS)
def test_get_all(self):
resp = self.app.get('/v2/tasks')
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(resp.json['tasks']))
self.assertDictEqual(TASK_WITHOUT_RESULT, resp.json['tasks'][0])
@mock.patch.object(db_api, 'get_task_executions')
def test_get_all_operational_error(self, mocked_get_all):
mocked_get_all.side_effect = [
# Emulating DB OperationalError
sa.exc.OperationalError('Mock', 'mock', 'mock'),
[TASK_EX] # Successful run
]
resp = self.app.get('/v2/tasks')
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(resp.json['tasks']))
self.assertDictEqual(TASK_WITHOUT_RESULT, resp.json['tasks'][0])
@mock.patch.object(db_api, 'get_task_execution',
return_value=TASK_EX_WITH_PROJECT_ID)
def test_get_within_project_id(self, mock_get):
resp = self.app.get('/v2/tasks/123')
self.assertEqual(200, resp.status_int)
self.assertTrue('project_id' in resp.json)
@mock.patch.object(db_api, 'get_task_executions', MOCK_EMPTY)
def test_get_all_empty(self):
resp = self.app.get('/v2/tasks')
self.assertEqual(200, resp.status_int)
self.assertEqual(0, len(resp.json['tasks']))
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(
db_api,
'get_task_execution',
mock.MagicMock(side_effect=[ERROR_TASK_EX, TASK_EX])
)
@mock.patch.object(rpc.EngineClient, 'rerun_workflow', MOCK_WF_EX)
def test_put(self):
params = copy.deepcopy(RERUN_TASK)
params['reset'] = True
resp = self.app.put_json('/v2/tasks/123', params=params)
self.assertEqual(200, resp.status_int)
self.assertDictEqual(TASK, resp.json)
rpc.EngineClient.rerun_workflow.assert_called_with(
TASK_EX.id,
reset=params['reset'],
env=None
)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(
db_api,
'get_task_execution',
mock.MagicMock(side_effect=[ERROR_TASK_EX, TASK_EX])
)
@mock.patch.object(rpc.EngineClient, 'rerun_workflow', MOCK_WF_EX)
def test_put_missing_reset(self):
params = copy.deepcopy(RERUN_TASK)
resp = self.app.put_json(
'/v2/tasks/123',
params=params,
expect_errors=True)
self.assertEqual(400, resp.status_int)
self.assertIn('faultstring', resp.json)
self.assertIn('Mandatory field missing', resp.json['faultstring'])
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(
db_api,
'get_task_execution',
mock.MagicMock(side_effect=[ERROR_ITEMS_TASK_EX, WITH_ITEMS_TASK_EX])
)
@mock.patch.object(rpc.EngineClient, 'rerun_workflow', MOCK_WF_EX)
def test_put_with_items(self):
params = copy.deepcopy(RERUN_TASK)
params['reset'] = False
resp = self.app.put_json('/v2/tasks/123', params=params)
self.assertEqual(200, resp.status_int)
self.assertDictEqual(TASK, resp.json)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(
db_api,
'get_task_execution',
mock.MagicMock(side_effect=[ERROR_TASK_EX, TASK_EX])
)
@mock.patch.object(rpc.EngineClient, 'rerun_workflow', MOCK_WF_EX)
def test_put_env(self):
params = copy.deepcopy(RERUN_TASK)
params['reset'] = True
params['env'] = '{"k1": "def"}'
resp = self.app.put_json('/v2/tasks/123', params=params)
self.assertEqual(200, resp.status_int)
self.assertDictEqual(TASK, resp.json)
rpc.EngineClient.rerun_workflow.assert_called_with(
TASK_EX.id,
reset=params['reset'],
env=json.loads(params['env'])
)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(db_api, 'get_task_execution', MOCK_TASK)
def test_put_current_task_not_in_error(self):
params = copy.deepcopy(RERUN_TASK)
params['reset'] = True
resp = self.app.put_json(
'/v2/tasks/123',
params=params,
expect_errors=True
)
self.assertEqual(400, resp.status_int)
self.assertIn('faultstring', resp.json)
self.assertIn('execution must be in ERROR', resp.json['faultstring'])
@mock.patch.object(rpc.EngineClient, 'rerun_workflow', MOCK_WF_EX)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK)
def test_put_current_task_in_error(self):
params = copy.deepcopy(RERUN_TASK)
params['reset'] = True
params['env'] = '{"k1": "def"}'
resp = self.app.put_json('/v2/tasks/123', params=params)
self.assertEqual(200, resp.status_int)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK)
def test_put_invalid_state(self):
params = copy.deepcopy(RERUN_TASK)
params['state'] = states.IDLE
params['reset'] = True
resp = self.app.put_json(
'/v2/tasks/123',
params=params,
expect_errors=True
)
self.assertEqual(400, resp.status_int)
self.assertIn('faultstring', resp.json)
self.assertIn('Invalid task state', resp.json['faultstring'])
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK)
def test_put_invalid_reset(self):
params = copy.deepcopy(RERUN_TASK)
params['reset'] = False
resp = self.app.put_json(
'/v2/tasks/123',
params=params,
expect_errors=True
)
self.assertEqual(400, resp.status_int)
self.assertIn('faultstring', resp.json)
self.assertIn('Only with-items task', resp.json['faultstring'])
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK)
def test_put_valid_state(self):
params = copy.deepcopy(RERUN_TASK)
params['state'] = states.RUNNING
params['reset'] = True
resp = self.app.put_json(
'/v2/tasks/123',
params=params
)
self.assertEqual(200, resp.status_int)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK)
def test_put_mismatch_task_name(self):
params = copy.deepcopy(RERUN_TASK)
params['name'] = 'abc'
params['reset'] = True
resp = self.app.put_json(
'/v2/tasks/123',
params=params,
expect_errors=True
)
self.assertEqual(400, resp.status_int)
self.assertIn('faultstring', resp.json)
self.assertIn('Task name does not match', resp.json['faultstring'])
@mock.patch.object(rpc.EngineClient, 'rerun_workflow', MOCK_WF_EX)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK)
def test_put_match_task_name(self):
params = copy.deepcopy(RERUN_TASK)
params['name'] = 'task'
params['reset'] = True
resp = self.app.put_json(
'/v2/tasks/123',
params=params,
expect_errors=True
)
self.assertEqual(200, resp.status_int)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
@mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK)
def test_put_mismatch_workflow_name(self):
params = copy.deepcopy(RERUN_TASK)
params['workflow_name'] = 'xyz'
params['reset'] = True
resp = self.app.put_json(
'/v2/tasks/123',
params=params,
expect_errors=True
)
self.assertEqual(400, resp.status_int)
self.assertIn('faultstring', resp.json)
self.assertIn('Workflow name does not match', resp.json['faultstring'])
|
StackStorm/mistral
|
mistral/tests/unit/api/v2/test_tasks.py
|
Python
|
apache-2.0
| 13,732
|
from config import DATABASE_URI
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
Bootstrap(app)
|
Mirantis/disk_perf_test_tool
|
web_app/app.py
|
Python
|
apache-2.0
| 254
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Simple multi-layer perceptron model.
"""
import logging
from neon.backends.backend import Block
from neon.models.model import Model
from neon.util.param import opt_param, req_param
logger = logging.getLogger(__name__)
class MLP(Model):
"""
Fully connected, feed-forward, multi-layer perceptron model
"""
def __init__(self, **kwargs):
self.initialized = False
self.__dict__.update(kwargs)
req_param(self, ['layers', 'batch_size'])
opt_param(self, ['step_print'], -1)
opt_param(self, ['accumulate'], False)
opt_param(self, ['reuse_deltas'], True)
opt_param(self, ['timing_plots'], False)
opt_param(self, ['serialize_schedule'])
def link(self, initlayer=None):
for ll, pl in zip(self.layers, [initlayer] + self.layers[:-1]):
ll.set_previous_layer(pl)
self.print_layers()
def initialize(self, backend, initlayer=None):
self.data_layer = self.layers[0]
self.cost_layer = self.layers[-1]
self.class_layer = self.layers[-2]
if not hasattr(self.cost_layer, 'ref_layer'):
self.cost_layer.ref_layer = self.data_layer
if self.initialized:
return
self.backend = backend
kwargs = {"backend": self.backend, "batch_size": self.batch_size,
"accumulate": self.accumulate}
for ll, pl in zip(self.layers, [initlayer] + self.layers[:-1]):
ll.initialize(kwargs)
self.nin_max = max(map(lambda x: x.nin, self.layers[1:-1]))
self.global_deltas = None
if self.reuse_deltas:
self.global_deltas = backend.zeros(
(2 * self.nin_max, self.batch_size),
dtype=self.layers[1].deltas_dtype)
self.global_deltas.name = "delta_pool"
for idx, ll in enumerate(self.layers[1:-1]):
ll.set_deltas_buf(self.global_deltas,
offset=((idx % 2) * self.nin_max))
self.initialized = True
# Make some scratch space for NervanaGPU backend:
if self.backend.__module__ == 'neon.backends.gpu':
self.backend.init_mempool((1, self.batch_size),
dtype=self.layers[1].deltas_dtype)
def fprop(self):
for ll, pl in zip(self.layers, [None] + self.layers[:-1]):
y = None if pl is None else pl.output
ll.fprop(y)
def bprop(self):
for ll, nl in zip(reversed(self.layers),
reversed(self.layers[1:] + [None])):
error = None if nl is None else nl.deltas
ll.bprop(error)
def print_layers(self, debug=False):
printfunc = logger.debug if debug else logger.info
netdesc = 'Layers:\n'
for layer in self.layers:
netdesc += '\t' + str(layer) + '\n'
printfunc("%s", netdesc)
def update(self, epoch):
for layer in self.layers:
layer.update(epoch)
def get_classifier_output(self):
return self.class_layer.output
def print_training_error(self, error, num_batches, partial=False):
rederr = self.backend.reduce_tensor(error)
if self.backend.rank() != 0:
return
if partial is True:
assert self.step_print != 0
logger.info('%d:%d training error: %0.5f', self.epochs_complete,
num_batches / self.step_print,
rederr)
else:
errorval = rederr / num_batches
logger.info('epoch: %d, training error: %0.5f',
self.epochs_complete,
errorval)
def print_test_error(self, setname, misclass, nrecs):
redmisclass = self.backend.reduce_tensor(misclass)
if self.backend.rank() != 0:
return
misclassval = redmisclass / nrecs
logging.info("%s set misclass rate: %0.5f%%",
setname, 100. * misclassval)
def fit(self, dataset):
"""
Learn model weights on the given datasets.
"""
error = self.backend.zeros((1, 1), dtype=self.cost_layer.weight_dtype)
self.data_layer.init_dataset(dataset)
self.data_layer.use_set('train')
logger.info('commencing model fitting')
while self.epochs_complete < self.num_epochs:
self.backend.begin(Block.epoch, self.epochs_complete)
error.fill(0.0)
mb_id = 1
self.data_layer.reset_counter()
while self.data_layer.has_more_data():
self.backend.begin(Block.minibatch, mb_id)
self.backend.begin(Block.fprop, mb_id)
self.fprop()
self.backend.end(Block.fprop, mb_id)
self.backend.begin(Block.bprop, mb_id)
self.bprop()
self.backend.end(Block.bprop, mb_id)
self.backend.begin(Block.update, mb_id)
self.update(self.epochs_complete)
self.backend.end(Block.update, mb_id)
if self.step_print > 0 and mb_id % self.step_print == 0:
self.print_training_error(self.cost_layer.get_cost(),
mb_id, partial=True)
self.backend.add(error, self.cost_layer.get_cost(), error)
self.backend.end(Block.minibatch, mb_id)
mb_id += 1
self.epochs_complete += 1
self.print_training_error(error, self.data_layer.num_batches)
self.print_layers(debug=True)
self.backend.end(Block.epoch, self.epochs_complete - 1)
self.save_snapshot()
self.data_layer.cleanup()
def set_train_mode(self, mode):
for ll in self.layers:
ll.set_train_mode(mode)
def predict_generator(self, dataset, setname):
"""
Generate predicitons and true labels for the given dataset, one
mini-batch at a time.
Agruments:
dataset: A neon dataset instance
setname: Which set to compute predictions for (test, train, val)
Returns:
tuple: on each call will yield a 2-tuple of outputs and references.
The first item is the model probabilities for each class,
and the second item is either the one-hot or raw labels with
ground truth.
See Also:
predict_fullset
"""
self.data_layer.init_dataset(dataset)
assert self.data_layer.has_set(setname)
self.data_layer.use_set(setname, predict=True)
self.data_layer.reset_counter()
nrecs = self.batch_size * 1
outputs = self.backend.empty((self.class_layer.nout, nrecs))
if self.data_layer.has_labels:
reference = self.backend.empty((1, nrecs))
else:
reference = self.backend.empty(outputs.shape)
while self.data_layer.has_more_data():
self.fprop()
outputs = self.get_classifier_output()
reference = self.cost_layer.get_reference()
yield (outputs, reference)
self.data_layer.cleanup()
def predict_fullset(self, dataset, setname):
"""
Generate predicitons and true labels for the given dataset.
Note that this requires enough memory to house the predictions and
labels for the entire dataset at one time (not recommended for large
datasets, see predict_generator instead).
Agruments:
dataset: A neon dataset instance
setname: Which set to compute predictions for (test, train, val)
Returns:
tuple: on each call will yield a 2-tuple of outputs and references.
The first item is the model probabilities for each class,
and the second item is either the one-hot or raw labels with
ground truth.
See Also:
predict_generator
"""
self.data_layer.init_dataset(dataset)
assert self.data_layer.has_set(setname)
self.data_layer.use_set(setname, predict=True)
nrecs = self.batch_size * self.data_layer.num_batches
outputs = self.backend.empty((self.class_layer.nout, nrecs))
if self.data_layer.has_labels:
reference = self.backend.empty((1, nrecs))
else:
reference = self.backend.empty(outputs.shape)
batch = 0
for batch_preds, batch_refs in self.predict_generator(dataset,
setname):
start = batch * self.batch_size
end = start + self.batch_size
outputs[:, start:end] = batch_preds
reference[:, start:end] = batch_refs
batch += 1
return outputs, reference
def predict_live_init(self, dataset):
self.data_layer.init_dataset(dataset)
for ll in self.layers:
ll.set_train_mode(False)
def predict_live(self):
self.fprop()
return self.get_classifier_output()
|
tambetm/neon
|
neon/models/mlp.py
|
Python
|
apache-2.0
| 9,880
|
# Copyright 2017 BrainPad Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function, unicode_literals
class TimeoutError(Exception):
pass
class DeviceNotFound(IOError):
pass
class PacketParseError(IOError):
pass
|
BrainPad/FindYourCandy
|
robot-arm/dobot/errors.py
|
Python
|
apache-2.0
| 891
|
import logging
from confluent_kafka import Consumer, KafkaError, KafkaException, TopicPartition
from kafka_influxdb.encoder.errors import EncoderError
from kafka_influxdb.reader.reader import ReaderAbstract
class Reader(ReaderAbstract):
"""
A high-performance Kafka consumer based on confluent-kafka, which uses librdkafka internally.
See: https://github.com/confluentinc/confluent-kafka-python
"""
def _subscribe(self):
"""
Subscribe to Kafka topics.
A workaround for missing Zookeeper support in confluent-python is required here.
Automatic partition rebalancing is not working with Kafka Versions < 0.9.0.
Therefore we manually assign the partitions to the consumer for legacy Kafka versions.
"""
if self.broker_version < self.KAFKA_VERSION_ZOOKEEPER_OPTIONAL:
self.consumer.assign([TopicPartition(self.topic, p)
for p in range(0, 10)])
else:
self.consumer.subscribe([self.topic])
def _setup_connection(self):
"""
Confluent-Kafka configuration
"""
# TODO: Test async commit handling (self.consumer.commit(async=False))
connection = {
'bootstrap.servers': self.host + ":" + self.port,
'group.id': self.group,
'offset.store.method': 'broker',
'default.topic.config': {
# In newer Kafka versions, this can either be 'largest' or 'smallest'.
# See https://kafka.apache.org/documentation/
'auto.offset.reset': self.offset
}
}
# Add additional flag based on the Kafka version.
if self.broker_version < self.KAFKA_VERSION_ZOOKEEPER_OPTIONAL:
connection['broker.version.fallback'] = self.broker_version
return connection
def _connect(self):
"""
Connect to Kafka and subscribe to the topic
"""
connection = self._setup_connection()
logging.info(
"Connecting to Kafka with the following settings:\n %s...", connection)
self.consumer = Consumer(**connection)
self._subscribe()
def _handle_read(self):
"""
Read messages from Kafka.
"""
while True:
msg = self.consumer.poll(timeout=1.0)
if __debug__:
logging.debug(msg)
if msg is None:
yield False
continue
if msg.error():
self._handle_error(msg)
else:
# Proper message
if __debug__:
logging.debug('%s [%d] at offset %d with key %s:\n',
msg.topic(), msg.partition(), msg.offset(), str(msg.key()))
yield msg.value().rstrip()
@staticmethod
def _handle_error(msg):
if not msg.error():
return
# Error or event
if msg.error().code() == KafkaError._PARTITION_EOF:
# End of partition event
logging.info('%s [%d] reached end at offset %d with key %s\n',
msg.topic(), msg.partition(), msg.offset(), str(msg.key()))
else:
raise EncoderError(msg.error())
|
mre/kafka-influxdb
|
kafka_influxdb/reader/confluent.py
|
Python
|
apache-2.0
| 3,278
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from pyramid import predicates
from pyramid.exceptions import ConfigurationError
from pyramid.util import is_same_domain
class DomainPredicate:
def __init__(self, val, config):
self.val = val
def text(self):
return "domain = {!r}".format(self.val)
phash = text
def __call__(self, info, request):
# Support running under the same instance for local development and for
# test.pypi.io which will continue to host it's own uploader.
if self.val is None:
return True
return is_same_domain(request.domain, self.val)
class HeadersPredicate:
def __init__(self, val: List[str], config):
if not val:
raise ConfigurationError(
"Excpected at least one value in headers predicate"
)
self.sub_predicates = [
predicates.HeaderPredicate(subval, config) for subval in val
]
def text(self):
return ", ".join(sub.text() for sub in self.sub_predicates)
phash = text
def __call__(self, context, request):
return all(sub(context, request) for sub in self.sub_predicates)
def includeme(config):
config.add_route_predicate("domain", DomainPredicate)
config.add_view_predicate("require_headers", HeadersPredicate)
|
pypa/warehouse
|
warehouse/predicates.py
|
Python
|
apache-2.0
| 1,862
|
# Copyright 2020 The UniqueRandomizer Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for expand_grammar.py."""
import math
from absl.testing import absltest
from absl.testing import parameterized
import mock
import scipy.special
from unique_randomizer import unique_randomizer as ur
from unique_randomizer.examples import expand_grammar
class GrammarTest(parameterized.TestCase):
"""Tests for the grammar and grammar-manipulating functions alone."""
def test_grammar_validates(self):
expand_grammar.verify_grammar(expand_grammar.GRAMMAR_EXAMPLE)
@parameterized.named_parameters(
('probabilities_and_expansions_mismatch',
{'my_nonterminal': expand_grammar.Nonterminal(
expansions=['a', 'b', 'c'],
probabilities=[0.2, 0.8])},
"Nonterminal 'my_nonterminal' has 3 expansions and 2 probabilities"),
('probability_sum',
{'my_nonterminal': expand_grammar.Nonterminal(
expansions=['a', 'b', 'c'],
probabilities=[0.2, 0.8, 0.1])},
"Nonterminal 'my_nonterminal' has probabilities summing to 1.1"),
('inner_expansion',
{'my_nonterminal': expand_grammar.Nonterminal(
expansions=['a', 'b', '{c}'],
probabilities=[0.2, 0.7, 0.1])},
"Nonterminal 'my_nonterminal' has an expansion '{c}' with unknown inner "
"nonterminal 'c'"))
def test_verify_grammar(self, grammar, error_regex):
with self.assertRaisesRegex(expand_grammar.GrammarError, error_regex):
expand_grammar.verify_grammar(grammar)
@parameterized.named_parameters(
('leaf_0', 'animal', 0, 'crow'),
('leaf_1', 'animal', 1, 'goldfish'),
('non_leaf_0', 'sentence', 0, 'the apple tasted delicious.'),
('non_leaf_1', 'sentence', 1, 'miraculously, the pear was dull!'))
def test_expand_nonterminal(self, nonterminal_name, randomizer_index,
expected_expansion):
mock_randomizer = ur.NormalRandomizer()
mock_randomizer.sample_distribution = mock.MagicMock(
return_value=randomizer_index)
self.assertEqual(
expand_grammar._expand_nonterminal(
nonterminal_name, expand_grammar.GRAMMAR_EXAMPLE, mock_randomizer),
expected_expansion)
@parameterized.named_parameters(
('fruit', 'fruit', 3,
{'apple': 0.5, 'pear': 0.4, 'dragonfruit': 0.1}),
('thing', 'thing', 12,
{'goldfish': 0.06, 't-rex': 0.0002, 'apple': 0.15, 'pebble': 0.4}),
('phrase', 'phrase', 1953,
{'the pear was dull': 0.4 * 0.3 * 0.4 * 0.1}),
('sentence', 'sentence', (1 + 1 + 6)*1953 + 1953**2 + 1,
{
'miraculously, the pear was dull!': 0.2 * 0.4 * 0.3 * 0.4 * 0.1,
'the rhino exclaimed, "the dragonfruit tasted delicious!"':
0.1 * 0.049 * 0.4 * 0.1,
'hello world!': 0.1,
}))
def test_enumerate_nonterminal(self, nonterminal_name,
expected_num_expansions,
expected_probability_dict):
actual_results = expand_grammar.enumerate_nonterminal(
nonterminal_name, expand_grammar.GRAMMAR_EXAMPLE)
self.assertLen(actual_results, expected_num_expansions)
actual_results_dict = dict(actual_results)
# No duplicate expansions.
self.assertLen(actual_results_dict, expected_num_expansions)
# Check the given expansions. (expected_probability_dict does not need to
# be exhaustive.)
for expansion in expected_probability_dict:
self.assertAlmostEqual(actual_results_dict[expansion],
math.log(expected_probability_dict[expansion]))
# The probabilities need to sum to 1.
self.assertAlmostEqual(
scipy.special.logsumexp([log_probability
for _, log_probability in actual_results]),
0.0)
class SamplingTest(parameterized.TestCase):
"""Tests sampling of grammar expansions."""
@parameterized.named_parameters(
('fruit', 'fruit'),
('animal', 'animal'),
('thing', 'thing'),
('phrase', 'phrase'))
def test_unique_randomizer_searches_completely(self, nonterminal_name):
grammar = expand_grammar.GRAMMAR_EXAMPLE
enumeration_list = expand_grammar.enumerate_nonterminal(nonterminal_name,
grammar)
expected_expansion_probabilities = dict(enumeration_list)
actual_expansions_dict, num_samples = expand_grammar.sample_with_ur(
float('inf'), nonterminal_name, grammar)
# The correct number of expansions, the actual number of expansions, and the
# actual number of unique expansions should all be equal.
self.assertTrue(len(enumeration_list) == # pylint: disable=g-generic-assert
num_samples ==
len(actual_expansions_dict))
# The probabilities should match exactly (no floating point errors), as
# they're both computed by summing log probabilities.
self.assertEqual(expected_expansion_probabilities,
actual_expansions_dict)
@parameterized.named_parameters(
('fruit', 'fruit'),
('animal', 'animal'),
('thing', 'thing'),
('phrase', 'phrase'))
def test_normal_randomizer_correct_probabilities(self, nonterminal_name):
grammar = expand_grammar.GRAMMAR_EXAMPLE
enumeration_list = expand_grammar.enumerate_nonterminal(nonterminal_name,
grammar)
expansion_probabilities = dict(enumeration_list)
num_samples = min(100, len(expansion_probabilities) - 1)
expansions_dict, _ = expand_grammar.sample_with_rejection(
num_samples, nonterminal_name, grammar)
for expansion, log_probability in expansions_dict.items():
self.assertIn(expansion, expansion_probabilities)
self.assertEqual(expansion_probabilities[expansion], log_probability)
@parameterized.named_parameters(
('fruit', 'fruit'),
('animal', 'animal'),
('thing', 'thing'),
('phrase', 'phrase'))
def test_sbs_searches_completely(self, nonterminal_name):
grammar = expand_grammar.GRAMMAR_EXAMPLE
enumeration_list = expand_grammar.enumerate_nonterminal(nonterminal_name,
grammar)
expansion_probabilities = dict(enumeration_list)
actual_expansions_dict, actual_num_samples = expand_grammar.sample_with_sbs(
len(expansion_probabilities) + 1, nonterminal_name, grammar)
self.assertEqual(actual_expansions_dict, expansion_probabilities)
self.assertLen(actual_expansions_dict, actual_num_samples)
if __name__ == '__main__':
absltest.main()
|
google-research/unique-randomizer
|
unique_randomizer/examples/expand_grammar_test.py
|
Python
|
apache-2.0
| 7,225
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service import api
from sahara.service.validations.edp import job_binary as b
from sahara.swift import utils as su
from sahara.tests.unit.service.validation import utils as u
class TestJobBinaryValidation(u.ValidationTestCase):
def setUp(self):
super(TestJobBinaryValidation, self).setUp()
self._create_object_fun = b.check_job_binary
self.scheme = b.JOB_BINARY_SCHEMA
api.plugin_base.setup_plugins()
def test_creation(self):
data = {
"name": "main.jar",
"url": "internal-db://3e4651a5-1f08-4880-94c4-596372b37c64",
"extra": {
"user": "user",
"password": "password"
},
"description": "long description"
}
self._assert_types(data)
def test_job_binary_create_swift(self):
self._assert_create_object_validation(
data={
"name": "j_o_w",
"url": su.SWIFT_INTERNAL_PREFIX + "o.sahara/k"
},
bad_req_i=(1, "BAD_JOB_BINARY",
"To work with JobBinary located in internal "
"swift add 'user' and 'password' to extra"))
# TODO(mattf): remove support for OLD_SWIFT_INTERNAL_PREFIX
def test_job_binary_create_swift_with_old_prefix(self):
self._assert_create_object_validation(
data={
"name": "j_o_w",
"url": su.OLD_SWIFT_INTERNAL_PREFIX + "o.sahara/k"
},
bad_req_i=(1, "BAD_JOB_BINARY",
"To work with JobBinary located in internal "
"swift add 'user' and 'password' to extra"))
def test_job_binary_create_internal(self):
self._assert_create_object_validation(
data={
"name": "main.jar",
"url": "internal-db://abacaba",
},
bad_req_i=(1, "VALIDATION_ERROR",
"'internal-db://abacaba' is not a "
"'valid_job_location'"))
|
tellesnobrega/storm_plugin
|
sahara/tests/unit/service/validation/edp/test_job_binary.py
|
Python
|
apache-2.0
| 2,640
|
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from glyphsLib.util import bin_to_int_list, int_list_to_bin
class UtilTest(unittest.TestCase):
def test_bin_to_int_list(self):
self.assertEqual([], bin_to_int_list(0))
self.assertEqual([0], bin_to_int_list(1))
self.assertEqual([1], bin_to_int_list(2))
self.assertEqual([0, 1], bin_to_int_list(3))
self.assertEqual([2], bin_to_int_list(4))
self.assertEqual([7, 30], bin_to_int_list((1 << 7) + (1 << 30)))
def test_int_list_to_bin(self):
self.assertEqual(int_list_to_bin([]), 0)
self.assertEqual(int_list_to_bin([0]), 1)
self.assertEqual(int_list_to_bin([1]), 2)
self.assertEqual(int_list_to_bin([0, 1]), 3)
self.assertEqual(int_list_to_bin([2]), 4)
self.assertEqual(int_list_to_bin([7, 30]), (1 << 7) + (1 << 30))
|
googlefonts/glyphsLib
|
tests/util_test.py
|
Python
|
apache-2.0
| 1,437
|