repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
GNOME/dots
|
dots/docdocument.py
|
1
|
1733
|
# Dots - A braille translation program.
#
# Copyright (C) 2010 Consorcio Fernando de los Rios
# Author: Fernando Herrera <fherrera@onirica.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
from document import Document
from translator import Translator
def get_antiword():
for path in os.environ["PATH"].split(os.pathsep):
f = os.path.join(path, "antiword")
if os.path.exists(f) and os.access(f, os.X_OK):
return f
return None
antiword = get_antiword()
if antiword is None:
raise NameError('Antiword not found')
class DocDocument(Document):
def _get_text(seff, file):
text = subprocess.check_output([antiword, "-x", "db", file])
return text
def translate(self, config):
config['outputFormat']['inputTextEncoding'] = "UTF8"
self.translator = Translator(config)
result = self._get_text (self.input_file)
self.braille_text = self.translator.translate_string (result)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
document = OdtDocument(sys.argv[1])
print document._get_text(sys.argv[1])
|
gpl-3.0
| 1,013,742,404,977,871,100
| 31.092593
| 71
| 0.699365
| false
| 3.565844
| false
| false
| false
|
shashi28/nuts
|
port scanner/ui_portScanner.py
|
1
|
4932
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'portScanner.ui'
#
# Created: Tue Apr 29 18:10:30 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_portScanner(object):
def setupUi(self, portScanner):
portScanner.setObjectName(_fromUtf8("portScanner"))
portScanner.resize(372, 389)
portScanner.setMinimumSize(QtCore.QSize(372, 389))
portScanner.setMaximumSize(QtCore.QSize(372, 389))
self.hostLabel = QtGui.QLabel(portScanner)
self.hostLabel.setGeometry(QtCore.QRect(20, 30, 61, 16))
self.hostLabel.setObjectName(_fromUtf8("hostLabel"))
self.hostLineEdit = QtGui.QLineEdit(portScanner)
self.hostLineEdit.setGeometry(QtCore.QRect(80, 30, 171, 20))
self.hostLineEdit.setObjectName(_fromUtf8("hostLineEdit"))
self.portFromSpinBox = QtGui.QSpinBox(portScanner)
self.portFromSpinBox.setGeometry(QtCore.QRect(110, 70, 42, 22))
self.portFromSpinBox.setMinimum(20)
self.portFromSpinBox.setMaximum(65535)
self.portFromSpinBox.setObjectName(_fromUtf8("portFromSpinBox"))
self.portToSpinBox = QtGui.QSpinBox(portScanner)
self.portToSpinBox.setGeometry(QtCore.QRect(210, 70, 42, 22))
self.portToSpinBox.setMinimum(21)
self.portToSpinBox.setMaximum(65536)
self.portToSpinBox.setObjectName(_fromUtf8("portToSpinBox"))
self.fromLabel = QtGui.QLabel(portScanner)
self.fromLabel.setGeometry(QtCore.QRect(20, 70, 81, 16))
self.fromLabel.setObjectName(_fromUtf8("fromLabel"))
self.toLabel = QtGui.QLabel(portScanner)
self.toLabel.setGeometry(QtCore.QRect(170, 70, 31, 16))
self.toLabel.setObjectName(_fromUtf8("toLabel"))
self.scanPushButton = QtGui.QPushButton(portScanner)
self.scanPushButton.setGeometry(QtCore.QRect(290, 30, 75, 23))
self.scanPushButton.setObjectName(_fromUtf8("scanPushButton"))
self.resultTable = QtGui.QTableWidget(portScanner)
self.resultTable.setGeometry(QtCore.QRect(10, 110, 351, 271))
self.resultTable.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.resultTable.setTabKeyNavigation(False)
self.resultTable.setProperty("showDropIndicator", False)
self.resultTable.setDragDropOverwriteMode(False)
self.resultTable.setAlternatingRowColors(True)
self.resultTable.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.resultTable.setObjectName(_fromUtf8("resultTable"))
self.resultTable.setColumnCount(2)
self.resultTable.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.resultTable.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.resultTable.setHorizontalHeaderItem(1, item)
self.resultTable.horizontalHeader().setStretchLastSection(True)
self.resultTable.verticalHeader().setVisible(False)
self.stopPushButton = QtGui.QPushButton(portScanner)
self.stopPushButton.setGeometry(QtCore.QRect(290, 60, 75, 23))
self.stopPushButton.setObjectName(_fromUtf8("stopPushButton"))
self.statusLabel = QtGui.QLabel(portScanner)
self.statusLabel.setGeometry(QtCore.QRect(265, 90, 91, 20))
self.statusLabel.setText(_fromUtf8(""))
self.statusLabel.setObjectName(_fromUtf8("statusLabel"))
self.hostLabel.setBuddy(self.hostLineEdit)
self.retranslateUi(portScanner)
QtCore.QMetaObject.connectSlotsByName(portScanner)
def retranslateUi(self, portScanner):
portScanner.setWindowTitle(_translate("portScanner", "Port Scanner - Nuts and Bolts", None))
self.hostLabel.setText(_translate("portScanner", "&Host / IP :", None))
self.hostLineEdit.setPlaceholderText(_translate("portScanner", "Enter Hostname or IP Address", None))
self.fromLabel.setText(_translate("portScanner", "Port No from :", None))
self.toLabel.setText(_translate("portScanner", "to :", None))
self.scanPushButton.setText(_translate("portScanner", "Scan", None))
item = self.resultTable.horizontalHeaderItem(0)
item.setText(_translate("portScanner", "Port No", None))
item = self.resultTable.horizontalHeaderItem(1)
item.setText(_translate("portScanner", "Status", None))
self.stopPushButton.setText(_translate("portScanner", "Stop", None))
|
mit
| -6,696,726,251,446,786,000
| 49.326531
| 109
| 0.708232
| false
| 3.901899
| false
| false
| false
|
ewejeen/2017sejongAI
|
week 12/2-1.py
|
1
|
2186
|
from nltk.corpus import movie_reviews
from nltk.classify import NaiveBayesClassifier
from nltk.classify.util import accuracy as nltk_accuracy
def extract_features(words):
return dict([(word, True) for word in words])
if __name__=='__main__':
fileids_pos = movie_reviews.fileids('pos')
fileids_neg = movie_reviews.fileids('neg')
features_pos = [(extract_features(movie_reviews.words(
fileids=[f])), 'Positive') for f in fileids_pos]
features_neg = [(extract_features(movie_reviews.words(
fileids=[f])), 'Negative') for f in fileids_neg]
threshold = 0.8
num_pos = int(threshold * len(features_pos))
num_neg = int(threshold * len(features_neg))
features_train = features_pos[:num_pos] + features_neg[:num_neg]
features_test = features_pos[num_pos:] + features_neg[num_neg:]
print('\nNumber of training datapoints:', len(features_train))
print('Number of test datapoints:', len(features_test))
classifier = NaiveBayesClassifier.train(features_train)
print('\nAccuracy of the classifier:', nltk_accuracy(classifier, features_test))
N = 15
print('\nTop ' + str(N) + ' most informative words:')
for i, item in enumerate(classifier.most_informative_features()):
print(str(i+1) + '. ' + item[0])
if i == N - 1:
break
input_reviews = [
"Everything about this movie is outstanding -- the performances, the way the true events are handled, the cinematography. In this day of digital news, this movie makes us stand back and realize what we may lose in the way of investigative journalism as we slowly kill off print media. The focus remains the child abuse scandal in the archdiocese in Boston. That reflects the conflict the characters face and deal with when events make them rethink the focus of their article. The movie is riveting, though we know the outcome."
]
print("\nMovie review predictions:")
for review in input_reviews:
print("\nReview:", review)
probabilities = classifier.prob_classify(extract_features(review.split()))
predicted_sentiment = probabilities.max()
print("Predicted sentiment:", predicted_sentiment)
print("Probability:", round(probabilities.prob(predicted_sentiment), 2))
|
gpl-3.0
| 2,249,432,951,698,209,800
| 40.245283
| 531
| 0.723696
| false
| 3.643333
| false
| false
| false
|
default1406/PhyLab
|
PythonExperimentDataHandle/phylab.py
|
1
|
4356
|
# -*- coding: utf-8 -*-
from math import sqrt
#将二维列表x中的每一个值保留b位小数(带四舍五入)
def RoundTwo(x,b):
for i in range(len(x)):
for j in range(len(x[i])):
x[i][j] = round(x[i][j],b)
if b == 0:
x[i][j] = ("%d" %x[i][j])
elif b == 1:
x[i][j] = ("%.1f" %x[i][j])
elif b == 2:
x[i][j] = ("%.2f" %x[i][j])
elif b == 3:
x[i][j] = ("%.3f" %x[i][j])
elif b == 4:
x[i][j] = ("%.4f" %x[i][j])
elif b == 5:
x[i][j] = ("%.5f" %x[i][j])
elif b == 6:
x[i][j] = ("%.6f" %x[i][j])
#将一维列表x中的每一个值保留b位小数(带四舍五入)
def RoundOne(x,b):
for i in range(len(x)):
x[i] = round(x[i],b)
if b == 0:
x[i] = ("%d" %x[i])
elif b == 1:
x[i] = ("%.1f" %x[i])
elif b == 2:
x[i] = ("%.2f" %x[i])
elif b == 3:
x[i] = ("%.3f" %x[i])
elif b == 4:
x[i] = ("%.4f" %x[i])
elif b == 5:
x[i] = ("%.5f" %x[i])
elif b == 6:
x[i] = ("%.6f" %x[i])
#计算a类不确定度:x是一个列表,aver是x的平均值,k是数据的组数(不一定等于len(x),
# 因为x后面可能添加了x的平均值)
def Ua(x, aver, k) :
sumx = 0
for i in range(k):
sumx += (x[i] - aver)**2
return sqrt(sumx/(k*(k-1)))
#匹配最终结果:(f+u_f)
#输入算出来的最终结果和它的不确定度,可以返回最终结果的形式
def BitAdapt(x,u_x) :
ten = 0
ften = 0
if (u_x >= 10):
temp = x
while(temp >= 10):
temp = temp/10
ten += 1
x = float(x)/10**ten
u_x = float(u_x)/10**ten
elif (x < 0.001):
temp = x
ften = 0
while(temp < 1):
temp = temp*10
ften += 1
x = float(x) * 10**ften
u_x = float(u_x) * 10**ften
Tempbit = 0
bit = 0
while (1):
i = 0
while(1):
temp = float(u_x)*(10**i)
if(temp >= 1):
bit = i
break
else :
i+=1
u_x = round(float(u_x),bit)
x = round(float(x),bit)
u_x = ("%.*f"%(bit, u_x))
x = ("%.*f"%(bit, x))
# if bit == 0:
# u_x = ("%d" % u_x)
# x = ("%d" % x)
# elif bit == 1:
# u_x = ("%.1f" % u_x)
# x = ("%.1f" % x)
# elif bit == 2:
# u_x = ("%.2f" % u_x)
# x = ("%.2f" % x)
# elif bit == 3:
# u_x = ("%.3f" % u_x)
# x = ("%.3f" % x)
# elif bit == 4:
# u_x = ("%.4f" % u_x)
# x = ("%.4f" % x)
# elif bit == 5:
# u_x = ("%.5f" % u_x)
# x = ("%.5f" % x)
# elif bit == 6:
# u_x = ("%.6f" % u_x)
# x = ("%.6f" % x)
# elif bit == 7:
# u_x = ("%.7f" % u_x)
# x = ("%.7f" % x)
# elif bit == 8:
# u_x = ("%.8f" % u_x)
# x = ("%.8f" % x)
i = 0
while(1):
temp = float(u_x)*(10**i)
if(temp >= 1):
Tempbit = i
break
else :
i+=1
if Tempbit == bit:
break
if ten > 0:
x = "(" + str(x) + "\\pm"
u_x = str(u_x) + "){\\times}10^{" + str(ten) + "}"
elif ften > 0:
x = "(" + str(x) + "\\pm"
u_x = str(u_x) + "){\\times}10^{-" + str(ften) + "}"
else:
x = "(" + str(x) + "\\pm"
u_x = str(u_x) + ")"
return x + u_x
#转换为科学计数法表示
def ToScience(number):
Tempstr = format(number,'.4g')
#如果发现Tempstr中含有e的话,说明是科学计数法
if 'e' in Tempstr:
index_str = Tempstr.split('e')
if index_str[0] == '1':
return '10^{'+str(int(index_str[1]))+'}'
else:
return index_str[0]+'{\\times}10^{'+str(int(index_str[1]))+'}'
else:
return Tempstr
#对于x和y两个一维列表进行一元线性处理:y = a + bx
#返回列表[b,r]
def ULR(x,y):
size = len(x)-1
x_2 = []
y_2 = []
xy = []
for i in range(size):
x_2.append(x[i]**2)
y_2.append(y[i]**2)
xy.append(x[i] * y[i])
x_2.append(sum(x_2)/size)
y_2.append(sum(y_2)/size)
xy.append(sum(xy)/size)
b = (x[size]*y[size]-xy[size])/(pow(x[size],2)-x_2[size])
r = (xy[size] - x[size]*y[size]) / sqrt((x_2[size] - pow(x[size],2))*(y_2[size]-pow(y[size],2)))
res = [b,r]
return res
#求仪器误差限
def DELTA_R(R):
res = 0.02 + R%1*5/100.0
R = R - R%1
res = res + R%10*5/1000.0
R = R - R%10
res = res + R%100*2/1000.0
R = R - R%100
res = res + R/1000.0
return res
#逐差法求
def DWM(x):
res = []
size = len(x)/2
for i in range(size):
temp = abs(x[i]-x[i+size])
res.append(temp)
return res
#测试时的误差要求,误差范围内就返回1,否则就返回0
def Mistake(x,y):
x = abs(x)
y = abs(y)
r1 = x+x/100
r2 = x-x/100
if (y > r1) | (y <r2):
return 0
else:
return 1
|
gpl-2.0
| -669,399,115,657,459,200
| 18.959391
| 97
| 0.453204
| false
| 1.67819
| false
| false
| false
|
vaibhawvipul/Python-Politics-Game
|
trailblazers.py
|
1
|
20603
|
import sys
import math
import time
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vEnter Your name"
name = raw_input("> ")
"""This will display only first name"""
f_name = name.split()
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vWelcome %r! Be a \n\n\n\n\t\t\t...TRAILBLAZER..." %f_name[0]
print"\n Demo version 2"
print "\v\v\v\v1.Play"
print "\n2.About"
print "\n3.Exit"
print "\nCOPYRIGHTS RESERVED"
a = int(raw_input("\n\nEnter your choice - "))
if a == 3:
sys.exit(0)
elif a == 2:
print "\nThis game was concieved by Vipul Vaibhaw. It was build by very creative team of Fergusson College students"
print "\nWe are very collaborative team. We have an out of the box idea ready almost everytime."
print "\nThis game was build using Python."
print "\nWant to contact us, drop an e-mail to vaibhaw.vipul@gmail.com"
elif a == 1:
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vWelcome President %r to your office." %f_name[0]
print "\n\nHere is a message from intelligence. Press ENTER to see the message"
raw_input("")
print "A Terror outfit has grown very strong"
time.sleep(3)
print "They are constantly attacking Kamuri. Kamuri is a small nation which shares boundary with us. It also has religious importance for a minority group in your country."
time.sleep(5)
print"Kamuri and your Country has ancestral tie-ups"
time.sleep(2)
print "Our espionage have reported that it may soon try to overthrow government of Kamuri"
time.sleep(3)
print "\nPress ENTER to continue..."
raw_input("")
print "\n\v\v\v\v\v\v\v\v\v\v\v\v\vPresident of a Superpower nations has invited you over dinner."
print "\nIt could be benificial to your country. You could sort out issue like economic relations, weapon treaties or nuclear deal etc."
print "\nElse you can stay in our own country and solve internal affairs first."
print "\n\n1.You accept the invitation."
print "\n2.You decline the invitation."
b = int(raw_input("\n> "))
if b == 1:
print "\n\v\v\vGreat thought! It would not have been a good step to decline the invitation from a Superpower."
time.sleep(3)
print "\n\n\n'President Mark will meet you anytime from now. Sorry for inconvinience President %r' says Secretary " %f_name[0]
time.sleep(5)
print "\n\n\n\v\v\vPresident Mark is here!"
time.sleep(3)
print "\n\n\nPresident %r, Nice to meet you" %f_name[0]
time.sleep(3)
print "\nIt is good to know that your country is quite concerned about small countries neighbouring you."
time.sleep(4)
print "\nBut sometimes it is better to detach yourself from weak ones..."
time.sleep(2)
print "...and attach youself to more powerful nations."
time.sleep(3)
print "\n\nPress ENTER to continue..."
raw_input("")
print "\v\v\v\v\v'So here is a deal...'"
print "\n\n1. If you and your ally are ready to let us make army bases in you country, we may support you at war."
print "\n2. If you allow, while your ally deny We 'will' support you at war. Our soldiers will lead from front."
print "\n3. If you both deny, Your enemy will be showered with our benevolence."
print "\n\n\v\v1. You allow them."
print "2. You deny them"
c = int(raw_input("\n> "))
if c == 1:
print "\v\v\v'Great! Now let's see what Your ally has to say'"
time.sleep(3)
print "\nYour ally supported you in this decision. President Mark has built armybase in your country."
time.sleep(3)
print "\nPresident of 'Kamuri' has sent you a message. Press ENTER to read it."
raw_input("")
print "\n\n\v\v\vPresident we need help. Terrorists have attacked us. Help us!!"
print "\n\n1. You send army"
print "2. You ask Mark to help"
print "3. You ignore the problem and do not send Army."
d = int(raw_input("\n> "))
if d == 2:
print "Mark denies help. He had said that he 'may' help you at war."
time.sleep(3)
print "\n\nWhat will you do now?"
print "\n1. You send army"
print "2. You ignore the problem and do not send Army."
e = int(raw_input("> "))
if e == 1:
print "That's like an good ally!"
time.sleep(2)
print "Your army is ready to leave for Kamuri"
time.sleep(3)
print "ALERT!"
time.sleep(1)
print "ALERT!!"
time.sleep(1)
print "ALERT!!!"
time.sleep(2)
print "\n\nThere is massive flood in your country! Lots of lives are in danger!"
print "\nMessage from Cheif Minister of that flood-struck state. Press ENTER to see Message"
raw_input("")
print "\n\n\vPresident! We need Army support. Only trained personnels like Army men can help us"
print "\nHundreds of people are trapped here. Army needed immediately!"
print "\v\v\v\v1. You send your army to Kamuri."
print "2. You re-direct your army to rescue people from flood-struck state."
f = int(raw_input(""))
if f == 1:
print "\n\nInternational relations matters President %r! But your citizens are feeling unsafe in your country." %f_name[0]
time.sleep(2)
print "\nMisiters withdraw support and your government falls..."
time.sleep(2)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else:
print "\n\nGood decision to send army to rescue your people first."
print "\nArmy did fantastic job and saved hundreds of lives."
time.sleep(3)
print "\nYou become peoples favorite President!"
time.sleep(3)
print "\n\nBut Kamuri problem is unsolved yet!"
time.sleep(3)
print "Government is about to collapse. It would be a big threat to your country's security as well."
time.sleep(4)
print "\n1. Should we plan to offer an Armed force help?"
print "2. Or Negotitate with Terrorists."
time.sleep(3)
print "\nTerrorists want to contact you."
time.sleep(2)
print "\nThey have send you a message"
print "\nPress ENTER to see the message..."
raw_input("")
print "\v\v\nPresident %r if you ignore to help Kamuri, We will support you in next elections." %f_name[0]
print "People of our religion will support you. Secondly, we may ignore your country from our HIT LIST as well!!"
time.sleep(1)
print "\nYour options are:\n1. Should we plan to offer an Armed force help?"
print "2. Or Negotitate with Terrorists."
g = int(raw_input("\nTake your decision \n>"))
if g == 2:
print "\nPresident %r day by day conditions in Kamuri got worse." %f_name[0]
time.sleep(2)
print "\nKamuri Government was overthrown by Terrorists"
time.sleep(2)
print "\nYou even lost some reputation in World! News spread that you took this decision as you disbelieved your army!"
time.sleep(3)
print "You lost trust amongsts citizen and they voted against you!"
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
elif g == 1:
print "\nYou saved Kamuri. But back to back floods and warfare has made your economy weak"
time.sleep(5)
print "\nPresident Mark has come up with another deal"
time.sleep(3)
h = int(raw_input("\n\n1. You agree to meet him. \n2. You deny \n>"))
if h == 2:
print "\n\nSuperpower nation is upset now. He breaks offs economic ties and your economy crashes"
time.sleep(4)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else :
print "\v\v\v\v\vSo Here is the deal!"
print "\n\n1. If you allow us to make more armybases in your country. We WILL help you at any cost!"
print "2. If you deny, we break economic ties with you and your economy may crash!"
raw_input("\nPress ENTER to continue... ")
print "\n\nHere is a message from Minister of Scientific development"
time.sleep(4)
print "\n\n\nWe have developed special kind of rice, which is new to the world market."
print "\nWe may sell it to world market to stabalize our economy."
time.sleep(7)
print "\nBut..."
time.sleep(3)
print "\nWe are not sure about its success."
time.sleep(4)
i = int(raw_input("Take your decision - "))
if i == 2:
print "\n\nSuperPower got upset but our rice was successful invention!"
print "\nYou managed to survive..."
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU WIN!!"
else:
print "\nThis time you caught MARK! He had to help your country now because of 'will' which he had said in deal."
time.sleep(5)
print "\nAlso your rice got successful and Mark needed that rice to help his country"
time.sleep(4)
print "\nYou sold that rice to Mark with a deal that from now any of his Army movement won't be allowed without your consesus."
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU WIN!!!!!"
else:
print "Being Diplomatic!"
time.sleep(3)
print "Riots!!!!"
time.sleep(2)
print "Religious Minority in your country got upset and their protest have turned into riots. You LOSE!!"
elif d == 1:
print "That's like an good ally!"
time.sleep(2)
print "Your army is ready to leave for Kamuri"
time.sleep(3)
print "ALERT!"
time.sleep(1)
print "ALERT!!"
time.sleep(1)
print "ALERT!!!"
time.sleep(2)
print "\n\nThere is massive flood in your country! Lots of lives are in danger!"
print "\nMessage from Cheif Minister of that flood-struck state. Press ENTER to see Message"
raw_input("")
print "\n\n\vPresident! We need Army support. Only trained personnels like Army men can help us"
print "\nHundreds of people are trapped here. Army needed immediately!"
print "\v\v\v\v1. You send your army to Kamuri."
print "2. You re-direct your army to rescue people from flood-struck state."
f = int(raw_input("\n>"))
if f == 1:
print "\n\nInternational relations matters President %r! But your citizens are feeling unsafe in your country." %f_name[0]
time.sleep(2)
print "\nMisiters withdraw support and your government falls..."
time.sleep(2)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
elif f == 2:
print "\n\nGood decision to send army to rescue your people first."
print "\nArmy did fantastic job and saved hundreds of lives."
time.sleep(3)
print "\nYou become peoples favorite President!"
time.sleep(3)
print "\n\nBut Kamuri problem is unsolved yet!"
time.sleep(3)
print "Government is about to collapse. It would be a big threat to your country's security as well."
time.sleep(4)
print "\n1. Should we plan to offer an Armed force help?"
print "2. Or Negotitate with Terrorists."
time.sleep(3)
print "\nTerrorists want to contact you."
time.sleep(2)
print "\nThey have send you a message"
print "\nPress ENTER to see the message..."
raw_input("")
print "\v\v\nPresident %r if you ignore to help Kamuri, We will support you in next elections." %f_name[0]
print "People of our religion will support you. Secondly, we may ignore your country from our HIT LIST as well!!"
g = int(raw_input("\nTake your decision \n>"))
if g == 2:
print "\nPresident %r day by day conditions in Kamuri got worse." %f_name[0]
time.sleep(2)
print "\nKamuri Government was overthrown by Terrorists"
time.sleep(2)
print "\nYou even lost some reputation in World! News spread that you took this decision as you disbelieved your army!"
time.sleep(3)
print "You lost trust amongsts citizen and they voted against you!"
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
elif g == 1:
print "\nYou saved Kamuri. But back to back floods and warfare has made your economy weak"
time.sleep(5)
print "\nPresident Mark has come up with another deal"
time.sleep(3)
h = int(raw_input("\n\n1. You agree to meet him. \n2. You deny>"))
if h == 2:
print "\n\nSuperpower nation is upset now. He breaks offs economic ties and your economy crashes"
time.sleep(4)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else :
print "\v\v\v\v\vSo Here is the deal!"
print "\n\n1. If you allow us to make more armybases in your country. We WILL help you at any cost!"
print "2. If you deny, we break economic ties with you and your economy may crash!"
raw_input("\nPress ENTER to continue... ")
print "\n\nHere is a message from Minister of Scientific development"
time.sleep(4)
print "\n\n\nWe have developed special kind of rice, which is new to the world market."
print "\nWe may sell it to world market to stabalize our economy."
time.sleep(7)
print "\nBut..."
time.sleep(3)
print "\nWe are not sure about its success."
time.sleep(4)
i = int(raw_input("Take your decision - "))
if i == 2:
print "\n\nSuperPower got upset but our rice was successful invention!"
print "\nYou managed to survive..."
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU WIN!!"
else:
print "\nThis time you caught MARK! He had to help your country now because of 'will' which he had said in deal."
time.sleep(5)
print "\nAlso your rice got successful and Mark needed that rice to help his country"
time.sleep(4)
print "\nYou sold that rice to Mark with a deal that from now any of his Army movement won't be allowed without your consesus."
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU WIN!!!!!"
else :
print "Bye!"
else:
print "Being Diplomatic!"
time.sleep(3)
print "Riots!!!!"
time.sleep(2)
print "Religious Minority in your country got upset and their protest have turned into riots"
else :
print "'Ok President %r, Hope this decision won't cost you much!'" %f_name[0]
else :
print "Not a good decision to decline invitation from a superpower!"
print "\nPresident of 'Kamuri' has sent you a message. Press ENTER to read it."
raw_input("")
print "\n\n\v\v\vPresident we need help. Terrorists have attacked us. Help us!!"
print "\n\n1. You send army"
print "2. You ignore the problem and do not send Army."
d = int(raw_input("\n> "))
if d == 2:
print "Mark denies help. He had said that he 'may' help you at war."
time.sleep(3)
print "\n\nWhat will you do now?"
print "\n1. You send army"
print "2. You ignore the problem and do not send Army."
e = int(raw_input("> "))
if e == 1:
print "That's like an good ally!"
time.sleep(2)
print "Your army is ready to leave for Kamuri"
time.sleep(3)
print "ALERT!"
time.sleep(1)
print "ALERT!!"
time.sleep(1)
print "ALERT!!!"
time.sleep(2)
print "\n\nThere is massive flood in your country! Lots of lives are in danger!"
print "\nMessage from Cheif Minister of that flood-struck state. Press ENTER to see Message"
raw_input("")
print "\n\n\vPresident! We need Army support. Only trained personnels like Army men can help us"
print "\nHundreds of people are trapped here. Army needed immediately!"
print "\v\v\v\v1. You send your army to Kamuri."
print "2. You re-direct your army to rescue people from flood-struck state."
f = int(raw_input(""))
if f == 1:
print "\n\nInternational relations matters President %r! But your citizens are feeling unsafe in your country." %f_name[0]
time.sleep(2)
print "\nMisiters withdraw support and your government falls..."
time.sleep(2)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else:
print "\n\nGood decision to send army to rescue your people first."
print "\nArmy did fantastic job and saved hundreds of lives."
time.sleep(3)
print "\nYou become peoples favorite President!"
time.sleep(3)
print "\n\nBut Kamuri problem is unsolved yet!"
time.sleep(3)
print "Government is about to collapse. It would be a big threat to your country's security as well."
time.sleep(4)
print "\n1. Should we plan to offer an Armed force help?"
print "2. Or Negotitate with Terrorists."
time.sleep(3)
print "\nTerrorists want to contact you."
time.sleep(2)
print "\nThey have send you a message"
print "\nPress ENTER to see the message..."
raw_input("")
print "\v\v\nPresident %r if you ignore to help Kamuri, We will support you in next elections." %f_name[0]
print "People of our religion will support you. Secondly, we may ignore your country from our HIT LIST as well!!"
g = int(raw_input("\nTake your decision \n>"))
if g == 2:
print "\nNegotitation with terrorists wasn't a good idea President %r" %f_name[0]
time.sleep(2)
print "\nKamuri Government was overthrown by Terrorists"
time.sleep(2)
print "\nCitizen felt that their security was at threat and voted against you!"
time.sleep(3)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else:
print "\nYou saved Kamuri. Your country emerged as a Superpower"
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU WON!!!!!!!!"
else:
print "Being Diplomatic!"
time.sleep(3)
print "Riots!!!!"
time.sleep(2)
print "Religious Minority in your country got upset and their protest have turned into riots"
elif d == 1:
print "That's like an good ally!"
time.sleep(2)
print "Your army is ready to leave for Kamuri"
time.sleep(3)
print "ALERT!"
time.sleep(1)
print "ALERT!!"
time.sleep(1)
print "ALERT!!!"
time.sleep(2)
print "\n\nThere is massive flood in your country! Lots of lives are in danger!"
print "\nMessage from Cheif Minister of that flood-struck state. Press ENTER to see Message"
raw_input("")
print "\n\n\vPresident! We need Army support. Only trained personnels like Army men can help us"
print "\nHundreds of people are trapped here. Army needed immediately!"
print "\v\v\v\v1. You send your army to Kamuri."
print "2. You re-direct your army to rescue people from flood-struck state."
f = int(raw_input("\n>"))
if f == 1:
print "\n\nInternational relations matters President %r! But your citizens are feeling unsafe in your country." %f_name[0]
time.sleep(2)
print "\nMisiters withdraw support and your government falls..."
time.sleep(2)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else:
print "\n\nGood decision to send army to rescue your people first."
print "\nArmy did fantastic job and saved hundreds of lives."
time.sleep(3)
print "\nYou become peoples favorite President!"
time.sleep(3)
print "\n\nBut Kamuri problem is unsolved yet!"
time.sleep(3)
print "Government is about to collapse. It would be a big threat to your country's security as well."
time.sleep(4)
print "\n1. Should we plan to offer an Armed force help?"
print "2. Or Negotitate with Terrorists."
time.sleep(3)
print "\nTerrorists want to contact you."
time.sleep(2)
print "\nThey have send you a message"
print "\nPress ENTER to see the message..."
raw_input("")
print "\v\v\nPresident %r if you ignore to help Kamuri, We will support you in next elections." %f_name[0]
print "People of our religion will support you. Secondly, we may ignore your country from our HIT LIST as well!!"
g = int(raw_input("\nTake your decision \n>"))
if g == 2:
print "\nPresident %r day by day conditions in Kamuri got worse." %f_name[0]
time.sleep(2)
print "\nKamuri Government was overthrown by Terrorists"
time.sleep(2)
print "\nYou even lost some reputation in World! But terrorists ignored to attack your country!"
time.sleep(3)
print "This decision of yours gave some time to recover your country from Financial crisis"
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU SURVIVED!!"
elif g == 1:
print "\nYou saved Kamuri. But back to back floods and warfare has made your economy weak"
time.sleep(5)
print "\nPresident Mark has also cut off economic ties with your country"
time.sleep(5)
print "\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\v\vYOU LOSE!!"
else :
print "Bye!"
else:
print "Being Diplomatic!"
time.sleep(3)
print "Riots!!!!"
time.sleep(2)
print "Religious Minority in your country got upset and their protest have turned into riots"
|
apache-2.0
| 1,313,378,677,649,439,200
| 45.718821
| 173
| 0.653885
| false
| 2.88033
| false
| false
| false
|
fniephaus/alfred-dropbox
|
src/dropbox_filter.py
|
1
|
5886
|
import os
import sys
import time
from email.utils import parsedate
import config
from helpers import get_resource, get_hash, get_account_info, uid_exists
from dropbox import client
from workflow import Workflow, PasswordNotFound, ICON_TRASH
from workflow.background import run_in_background
def main(wf):
if wf.update_available:
wf.add_item("An update is available!",
autocomplete='workflow:update', valid=False)
user_input = wf.args[0]
command = query = ''
if len(user_input) > 0:
command = user_input.split()[0]
query = user_input[len(command) + 1:]
try:
wf.get_password('dropbox_access_tokens')
accounts = wf.cached_data(
'dropbox_accounts', data_func=get_account_info, max_age=360)
except PasswordNotFound:
accounts = None
if command == 'auth':
if query == '':
wf.add_item(
'Please enter your authorization code',
'If you don\'t have one, simply press enter.',
arg='url %s' % get_auth_url(), valid=True)
else:
wf.add_item(
'Authorize with "%s"' % query, 'Press enter to proceed',
arg='auth %s' % query, valid=True)
elif accounts is not None and command == 'remove':
for account in accounts:
wf.add_item(get_title(account), account[
'email'], arg='remove %s' % account['uid'], valid=True)
elif (accounts is not None and len(user_input) > 0 and
uid_exists(command, accounts)):
file_or_folder = get_file_or_folder(command, query)
if isinstance(file_or_folder, dict): # file
wf.add_item(
'Share', 'Copy link to clipboard',
arg='share %s %s' % (command, file_or_folder['path']),
icon='icons/folder_public.png', valid=True)
wf.add_item(
'Save to Downloads',
arg='download %s %s' % (command, file_or_folder['path']),
icon='icons/download.png', valid=True)
wf.add_item(
'Save to Desktop',
arg='desktop %s %s' % (command, file_or_folder['path']),
icon='icons/desktop.png', valid=True)
wf.add_item(
'Delete',
arg='delete %s %s' % (command, file_or_folder['path']),
icon=ICON_TRASH, valid=True)
elif isinstance(file_or_folder, list) and file_or_folder: # folder
if query and query != '/':
path = file_or_folder[0]['path'].split('/')
path = '/'.join(path[:-2])
wf.add_item(
'..', 'Change to parent directory',
icon='icons/folder.png',
autocomplete='%s %s/' % (command, path), valid=False)
for f in file_or_folder:
title = os.path.basename(f['path'])
subtitle = 'Modified: %s' % time.strftime(
'%Y-%m-%d %H:%M:%S', parsedate(f['modified']))
icon = 'icons/%s.png' % f['icon']
if not os.path.isfile(icon):
icon = 'icons/page_white.png'
if f['is_dir']:
title += '/'
wf.add_item(
title, subtitle, icon=icon,
autocomplete='%s %s/' % (command, f['path']),
valid=False)
else:
title += ' (%s)' % f['size']
wf.add_item(
title, subtitle, icon=icon,
autocomplete='%s %s' % (command, f['path']),
valid=False)
else:
wf.add_item(
'No files were found', 'Try a different request.', valid=False)
else:
if accounts is not None:
for account in accounts:
wf.add_item(get_title(account),
account['email'],
autocomplete='%s ' % account['uid'],
valid=False)
wf.add_item('Add another Dropbox account',
'', autocomplete='auth ', valid=False)
if accounts is not None and len(accounts) > 0:
wf.add_item('Remove an existing Dropbox account',
'', autocomplete='remove', valid=False)
wf.send_feedback()
def prefetch(wf, uid, path):
job_name = 'dropbox_prefetch_%s' % get_hash(uid, path)
cmd = ['/usr/bin/python', wf.workflowfile('dropbox_prefetch.py'), uid, path]
run_in_background(job_name, cmd)
def get_file_or_folder(uid, query):
path = '/' if query == '' else query
if len(path) > 1 and path[-1] == '/':
path = path[:-1]
prefetch(wf, uid, path)
def wrapper():
return get_resource(uid, path)
return wf.cached_data(get_hash(uid, path), wrapper, max_age=120)
def get_auth_url():
flow = client.DropboxOAuth2FlowNoRedirect(
config.APP_KEY, config.APP_SECRET)
return flow.start()
def get_title(account):
normal_use = account['quota_info']['normal']
shared_use = account['quota_info']['shared']
total_quota = account['quota_info']['quota']
total_used = round(100.0 * (normal_use + shared_use) / total_quota, 2)
return '%s (%s%% of %s used)' % (
account['display_name'], total_used,
sizeof(account['quota_info']['quota']))
def sizeof(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
if __name__ == '__main__':
wf = Workflow(
update_settings={'github_slug': 'fniephaus/alfred-dropbox'},
help_url='https://github.com/fniephaus/alfred-dropbox/issues'
)
log = wf.logger
sys.exit(wf.run(main))
|
mit
| -1,506,039,148,886,385,700
| 34.672727
| 80
| 0.51546
| false
| 3.880026
| false
| false
| false
|
alejo8591/maker
|
core/api/managers.py
|
1
|
2413
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of maker.
# License www.tree.io/license
from django.db import models
from django.contrib.auth.models import User
from maker.core.conf import settings
KEY_SIZE = 18
SECRET_SIZE = 32
CONSUMER_DB = getattr(settings, 'MAKER_API_CONSUMER_DB', 'default')
class KeyManager(models.Manager):
"""
Add support for random key/secret generation
"""
def generate_random_codes(self):
key = User.objects.make_random_password(length=KEY_SIZE)
secret = User.objects.make_random_password(length=SECRET_SIZE)
while self.filter(key__exact=key, secret__exact=secret).count():
secret = User.objects.make_random_password(length=SECRET_SIZE)
return key, secret
class ConsumerManager(KeyManager):
def create_consumer(self, name, description=None, user=None, using=CONSUMER_DB):
"""
Shortcut to create a consumer with random key/secret.
"""
consumer, created = self.using(using).get_or_create(name=name)
if user:
consumer.user = user
if description:
consumer.description = description
if created:
consumer.key, consumer.secret = self.generate_random_codes()
consumer.save()
return consumer
_default_consumer = None
class ResourceManager(models.Manager):
_default_resource = None
def get_default_resource(self, name):
"""
Add cache if you use a default resource.
"""
if not self._default_resource:
self._default_resource = self.get(name=name)
return self._default_resource
class TokenManager(KeyManager):
def create_token(self, consumer_id, token_type, timestamp, user=None, using=None):
"""
Shortcut to create a token with random key/secret.
"""
if using:
manager = self.using(using)
else:
manager = self
token, created = manager.get_or_create(consumer_id=consumer_id,
token_type=token_type,
timestamp=timestamp,
user=user)
if created:
token.key, token.secret = self.generate_random_codes()
token.save()
return token
|
mit
| 2,176,478,604,376,307,200
| 28.426829
| 86
| 0.594281
| false
| 4.255732
| false
| false
| false
|
wengzhiwen/Your-Vehicle-Status
|
main.py
|
1
|
1119
|
# coding=utf-8
import os
import helper
from google.appengine.ext.webapp import template
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import users
class MainHandler(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
nickName = user.nickname()
template_values = {
'nick_name': nickName,
}
path = os.path.join(os.path.dirname(__file__), 'main.html')
self.response.out.write(template.render(path, template_values))
else:
template_values = {
'message': '预期外的严重错误。',
}
path = os.path.join(os.path.dirname(__file__), 'error.html')
self.response.out.write(template.render(path, template_values))
def main():
application = webapp.WSGIApplication([('/main/', MainHandler)],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
mit
| 2,130,439,821,703,919,400
| 28.756757
| 75
| 0.563124
| false
| 4.092937
| false
| false
| false
|
SushiTee/teerace
|
teerace/accounts/forms.py
|
1
|
4377
|
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from accounts.models import UserProfile
from annoying.functions import get_config
from recaptcha_works.fields import RecaptchaField
class RegisterForm(forms.Form):
username = forms.RegexField(label="Username", regex=r'^\w+$', min_length=2,
max_length=30)
password1 = forms.CharField(label="Password", min_length=4,
widget=forms.PasswordInput(render_value=False),
help_text="At least 4 chars long")
password2 = forms.CharField(label="Password (again)", min_length=4,
widget=forms.PasswordInput(render_value=False))
email1 = forms.EmailField(label="E-mail address",
help_text="We won't share this to any 3rd-parties!")
email2 = forms.EmailField(label="E-mail address (again)")
if get_config('ENABLE_CAPTCHA', False):
if not (get_config('RECAPTCHA_PUBLIC_KEY', False) and
get_config('RECAPTCHA_PRIVATE_KEY', False)):
raise ImproperlyConfigured("You must define the RECAPTCHA_PUBLIC_KEY"
" and/or RECAPTCHA_PRIVATE_KEY setting in order to use reCAPTCHA.")
recaptcha = RecaptchaField(label="Human test", required=True)
def clean_username(self):
username = self.cleaned_data.get('username')
try:
user = User.objects.get(username__iexact=username)
del user
raise forms.ValidationError("Username is already taken")
except User.DoesNotExist:
pass
return username
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 != password2:
raise forms.ValidationError(
"You must type the same password each time")
return password2
def clean_email2(self):
email1 = self.cleaned_data.get('email1')
email2 = self.cleaned_data.get('email2')
if email1 != email2:
raise forms.ValidationError(
"You must type the same e-mail address each time")
return email2
def save(self):
return User.objects.create_user(self.cleaned_data.get('username'),
self.cleaned_data.get('email1'), self.cleaned_data.get('password1'))
class LoginForm(forms.Form):
username = forms.CharField(label="Username")
password = forms.CharField(label="Password",
widget=forms.PasswordInput(render_value=False))
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.user = None
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if not username or not password:
return self.cleaned_data
self.user = authenticate(username=username, password=password)
if self.user == None:
raise forms.ValidationError("Invalid username and/or password")
if not self.user.is_active:
raise forms.ValidationError("Your account has been disabled")
return self.cleaned_data
class SettingsUserForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name')
class SettingsProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('gender', 'country',)
class PasswordChangeForm(forms.Form):
old_password = forms.CharField(label="Old password",
widget=forms.PasswordInput(render_value=False))
new_password1 = forms.CharField(label="New password", min_length=4,
widget=forms.PasswordInput(render_value=False))
new_password2 = forms.CharField(label="New password (again)", min_length=4,
widget=forms.PasswordInput(render_value=False))
def __init__(self, *args, **kwargs):
self.current_user = kwargs.pop('current_user', None)
if self.current_user is None:
raise AttributeError("current_user missing")
super(PasswordChangeForm, self).__init__(*args, **kwargs)
def clean_old_password(self):
old_password = self.cleaned_data.get('old_password')
if not self.current_user.check_password(old_password):
raise forms.ValidationError(
"You have to type your old password correctly.")
return old_password
def clean_new_password2(self):
new_password1 = self.cleaned_data.get('new_password1')
new_password2 = self.cleaned_data.get('new_password2')
if new_password1 != new_password2:
raise forms.ValidationError(
"You must type the same password each time")
return new_password2
def save(self):
self.current_user.set_password(self.cleaned_data.get('new_password1'))
self.current_user.save()
|
bsd-3-clause
| -4,667,468,854,348,954,000
| 33.203125
| 76
| 0.739319
| false
| 3.400932
| true
| false
| false
|
papaiking/faceChecker_device
|
app/lib/search.py
|
1
|
3775
|
"""
@Author: Thuc VX<vxthuc@labsofthings.com>
@ORG: labsofthings.com
@date: 27 May 2017
Purpose: This package is for search for user in captured image.
It does some processing:
- Request to get Linkedface token,
- Post search image to Linkedace,
- Ssearch user appear in image
"""
import time
import requests
import json
from init import Log
class Search:
def __init__(self, options, config):
self.options = options
self.config = config
self.linkedface_token = self.getLinkedface_token()
# Call to FaceChecker server to get Linkedface access token
def getLinkedface_token(self):
# Get and check token URL
token_url = self.config['Server'].get('FaceChecker_GET_LINKEDFACE_TOKEN')
if not( token_url ):
Log.error('Configuration: FaceChecker_GET_LINKEDFACE_TOKEN for URL to get Linkedface token is invalid')
return None
# Get and check server token
server_token = self.config['Server'].get('FaceChecker_TOKEN')
if not( server_token ):
Log.error('Configuration: FaceChecker_TOKEN to access server APIs is invalid')
return None
headers = {'x-access-token': server_token}
# Request to get Linkedface token
ret = requests.get(token_url, headers=headers)
if not(ret) or (ret.status_code != 200):
Log.error('Cannot request to server to get Linkedface token')
return None
data = ret.json()
# return Linkedface access token
return data.get('token')
# Check if image contain any face
def _validateFaceImage(self, captured_img):
# TODO here
return True
# Upload image for searching
def _uploadImage(self, captured_img):
imgFile = open(captured_img, 'rb')
if imgFile is None:
Log.error('Cannot open image file: ' + captured_img)
return None
upload_url = self.config['Linkedface'].get('LINKEDAFCE_postimg')
# Log.info('Post url: ' + upload_url + ', file: ' + captured_img)
files = { 'faceImage': imgFile }
res = requests.post( upload_url, files=files )
if res is not None and res.status_code==200:
#Log.info('Uploaded file: ' + res.text)
return res.json()
else:
Log.error('Error in uploading image for searching')
return None
"""
This function is for searching users that have similar face in image
It does following steps:
- Check if this image is valid, contain face
- Upload image for searching
- Search and return result
"""
def searchUser(self, captured_img):
# Upload image for searching
uploaded_img = self._uploadImage(captured_img)
# Log.info('Uploaded image: ' + json.dumps(uploaded_img))
if uploaded_img is not None:
# Search for user in image
search_url = self.config['Linkedface'].get('LINKEDAFCE_search')
if search_url is None:
Log.error('Error in configuration for parameter: Linkedface.LINKEDAFCE_search')
search_url = search_url + uploaded_img.get('id')
headers = {'Authorization':'BEARER ' + self.linkedface_token}
#Log.info('Search URL: ' + search_url + ', header: ' + json.dumps(headers))
# Request for searching
res = requests.get(search_url, headers=headers)
if res.status_code == 200:
#Log.info('Search response: ' + res.text)
return res.json()
else:
Log.info('Error in searching user: ' + res.text)
return None
else:
return None
|
mit
| -2,242,247,548,680,306,700
| 33.009009
| 115
| 0.600795
| false
| 4.143798
| true
| false
| false
|
networkdynamics/zenlib
|
src/zen/drawing/ubigraph.py
|
1
|
12030
|
"""
The ``zen.drawing.ubigraph`` module provides support for rendering Zen graphs in the `Ubigraph visualization environment <http://ubietylab.net/ubigraph/>`_. The renderer will update the visualization in real time as changes are made to the underlying graph. Furthermore, edges and nodes can be visually highlighted.
The functionality of this module falls into two areas: rendering the topology of the graph and highlighting nodes and edges. All this functionality is
available through the :py:class:`zen.UbigraphRenderer` class.
Rendering a graph
=================
In order to render a graph, first construct the `UbigraphRenderer` and connect it to an Ubigraph server.
A simple use case involving a connection to a local Ubigraph server would look something like::
G = DiGraph()
ur = UbigraphRenderer('http://localhost:20738/RPC2')
ur.default_node_color = '#00ff00' # all nodes will be green
ur.graph = G
G.add_edge(1,2)
G.add_edge(2,3)
In this example, the graph is empty at first. Because the renderer registers as a graph event listener, the Ubigraph view
will be updated as nodes and edges are added.
Note that it is possible to change the way that nodes and edges will be rendered by default. Currently the following attributes
are supported:
* ``default_node_color``
* ``default_node_shape``
* ``default_edge_color``
* ``default_edge_width``
All these attributes assume values dictated by the `Ubigraph API <http://ubietylab.net/ubigraph/content/Docs/index.html>`_. Both undirected and directed graphs are
supported. Directed graphs will be rendered with directed edges - everything else is the same.
Node/Edge Highlighting
======================
Nodes and edges can be highlighted using the methods :py:meth:`zen.UbigraphRenderer.highlight_nodes`/:py:meth:`zen.UbigraphRenderer.highlight_nodes_` and :py:meth:`zen.UbigraphRenderer.highlight_edges`/:py:meth:`zen.UbigraphRenderer.highlight_edges_`. As always, the underscore allows use of either node/edge indices (with the underscore) or node/edge objects (without the underscore).
The UbigraphRenderer class
==========================
.. autoclass:: zen.UbigraphRenderer()
"""
import logging
import time
import xmlrpclib
from zen.graph import Graph
from zen.digraph import DiGraph
logger = logging.getLogger(__name__)
class UbigraphRenderer(object):
"""
The UbigraphRenderer is constructed with a URL to the Ubigraph server it will connect to. Following this, the graph can be set using the ``.graph`` attribute.
"""
def __init__(self,url,**kwargs):
"""
Create an UbigraphRenderer instance that will render graph events to the server indicated in ``url``.
**Keyword Args**:
* ``graph [=None]`` (:py:class:`Graph` or :py:class:`DiGraph`): the graph that will be rendered. This can also be set using
the ``UbigraphRenderer.graph`` property.
* ``event_delay [=0]`` (float): the number of seconds that each event update call should wait. This is one way of
making the graph render more slowly. Of course, this also slows down the graph construction code itself. Use with care.
"""
graph = kwargs.pop('graph',None)
self._event_delay = kwargs.pop('event_delay',0)
if len(kwargs) > 0:
raise ZenException, 'Unexpected remaining arguments: %s' % kwargs.keys()
logger.debug('connecting to ubigraph server: %s' % url)
self.server = xmlrpclib.Server(url)
self.server_graph = self.server.ubigraph
self.highlighted_node_style = self.server_graph.new_vertex_style(0)
self.highlighted_edge_style = self.server_graph.new_edge_style(0)
self.default_node_color = '#0000bb'
self.default_node_shape = 'sphere'
self.default_edge_color = '#ffffff'
self.default_edge_width = '1.0'
self.highlighted_node_color = '#bb0000'
self.highlighted_node_shape = 'sphere'
self.highlighted_edge_color = '#ffff00'
self.highlighted_edge_width = '6.0'
# now that everything is setup, if a graph was provided, apply it!
self.graph = graph
def __graph(self,graph=None):
if graph is None:
return self._graph
else:
self.server_graph.clear()
####
# reapply defaults to the server
# set the default styles
self.default_node_color = self._default_node_color
self.default_node_shape = self._default_node_shape
self.default_edge_color = self._default_edge_color
self.default_edge_width = self._default_edge_width
if type(graph) == DiGraph:
self.server_graph.set_edge_style_attribute(0, 'arrow', 'true')
# create and set the highlighted styles
self.highlighted_node_style = self.server_graph.new_vertex_style(0)
self.highlighted_edge_style = self.server_graph.new_edge_style(0)
self.highlighted_node_color = self._hlight_node_color
self.highlighted_node_shape = self._hlight_node_shape
self.highlighted_edge_color = self._hlight_edge_color
self.highlighted_edge_width = self._hlight_edge_width
# zero out highlighted anything
self._highlighted_edges = set()
self._highlighted_nodes = set()
####
# initialize graph stuff
self._graph = graph
self.node_map = {}
self.edge_map = {}
self._graph.add_listener(self)
#####
# build up the graph as it currently exists
# briefly suspend the event delay
actual_event_delay = self._event_delay
self._event_delay = 0
for nidx,nobj,data in self._graph.nodes_iter_(obj=True,data=True):
self.node_added(nidx,nobj,data)
for eidx,data,weight in self._graph.edges_iter_(data=True,weight=True):
uidx,vidx = self._graph.endpoints_(eidx)
self.edge_added(eidx,uidx,vidx,data,weight)
# put the event delay back in place
self._event_delay = actual_event_delay
graph = property( __graph, __graph)
def __inner_default_node_color(self,color=None):
"""
If a color is given, the default node color is changed. Otherwise, the default color is returned.
"""
if color is not None:
self.server_graph.set_vertex_style_attribute(0, 'color', color)
self._default_node_color = color
else:
return self._default_node_color
def __inner_default_node_shape(self,shape=None):
"""
If a shape is given, the default node shape is changed. Otherwise, the default shape is returned.
"""
logger.debug('entering inner default node shape with %s' % shape)
if shape is not None:
self.server_graph.set_vertex_style_attribute(0, 'shape', shape)
self._default_node_shape = shape
else:
return self._default_node_shape
def __inner_default_edge_color(self,color=None):
"""
If a shape is given, the default edge color is changed. Otherwise, the default color is returned.
"""
if color is not None:
self.server_graph.set_edge_style_attribute(0, 'color', color)
self._default_edge_color = color
else:
return self._default_edge_color
def __inner_default_edge_width(self,width=None):
"""
If a width (string) is given, the default edge width is changed. Otherwise, the default width is returned.
"""
if width is not None:
self.server_graph.set_edge_style_attribute(0, 'width', width)
self._default_edge_width = width
else:
return self._default_edge_width
default_node_color = property(__inner_default_node_color, __inner_default_node_color)
default_node_shape = property(__inner_default_node_shape, __inner_default_node_shape)
default_edge_color = property(__inner_default_edge_color, __inner_default_edge_color)
default_edge_width = property(__inner_default_edge_width, __inner_default_edge_width)
def __inner_hlight_node_color(self,color=None):
"""
If a color is given, the highlighted node color is changed. Otherwise, the highlighted color is returned.
"""
if color is not None:
self.server_graph.set_vertex_style_attribute(self.highlighted_node_style, 'color', color)
self._hlight_node_color = color
else:
return self._hlight_node_color
def __inner_hlight_node_shape(self,shape=None):
"""
If a shape is given, the hlight node shape is changed. Otherwise, the hlight shape is returned.
"""
logger.debug('entering inner hlight node shape with %s' % shape)
if shape is not None:
self.server_graph.set_vertex_style_attribute(self.highlighted_node_style, 'shape', shape)
self._hlight_node_shape = shape
else:
return self._hlight_node_shape
def __inner_hlight_edge_color(self,color=None):
"""
If a shape is given, the hlight edge color is changed. Otherwise, the hlight color is returned.
"""
if color is not None:
self.server_graph.set_edge_style_attribute(self.highlighted_edge_style, 'color', color)
self._hlight_edge_color = color
else:
return self._hlight_edge_color
def __inner_hlight_edge_width(self,width=None):
"""
If a width (string) is given, the hlight edge width is changed. Otherwise, the hlight width is returned.
"""
if width is not None:
self.server_graph.set_edge_style_attribute(self.highlighted_edge_style, 'width', width)
self._hlight_edge_width = width
else:
return self._hlight_edge_width
highlighted_node_color = property(__inner_hlight_node_color, __inner_hlight_node_color)
highlighted_node_shape = property(__inner_hlight_node_shape, __inner_hlight_node_shape)
highlighted_edge_color = property(__inner_hlight_edge_color, __inner_hlight_edge_color)
highlighted_edge_width = property(__inner_hlight_edge_width, __inner_hlight_edge_width)
def node_added(self,nidx,nobj,data):
# skip nodes that have already been seen
if nidx in self.node_map:
logger.warn('node %d cannot be added. A mapping already exists.' % nidx)
return
logger.debug('registering node %d with the server' % nidx)
self.node_map[nidx] = self.server_graph.new_vertex()
self.server_graph.set_vertex
time.sleep(self._event_delay)
return
def node_removed(self,nidx,nobj):
if nidx in self.node_map:
logger.debug('removing node %d from the server.' % nidx)
self.server_graph.remove_vertex(self.node_map[nidx])
del self.node_map[nidx]
time.sleep(self._event_delay)
else:
logger.warn('node %d cannot be removed. No mapping exists.' % nidx)
def edge_added(self,eidx,uidx,vidx,data,weight):
# skip nodes that have already been seen
if eidx in self.edge_map:
logger.warn('edge %d cannot be added. A mapping already exists.' % eidx)
return
logger.debug('registering edge %d with the server' % eidx)
self.edge_map[eidx] = self.server_graph.new_edge(self.node_map[uidx],self.node_map[vidx])
time.sleep(self._event_delay)
return
def edge_removed(self,eidx,uidx,vidx):
if eidx in self.edge_map:
logger.debug('removing edge %d from the server.' % eidx)
self.server_graph.remove_edge(self.edge_map[eidx])
del self.edge_map[eidx]
time.sleep(self._event_delay)
else:
logger.warn('edge %d cannot be removed. No mapping exists.' % eidx)
def highlight_edges_(self,edges):
for eidx in edges:
if eidx not in self._highlighted_edges:
self.server_graph.change_edge_style(self.edge_map[eidx], self.highlighted_edge_style)
self._highlighted_edges.add(eidx)
return
def highlight_nodes_(self,nodes):
for nidx in nodes:
if nidx not in self._highlighted_nodes:
self.server_graph.change_vertex_style(self.node_map[nidx], self.highlighted_node_style)
self._highlighted_nodes.add(nidx)
return
def highlight_edges(self,edges):
self.highlight_edges_(map(lambda x: self._graph.edge_idx(*x),edges))
def highlight_nodes(self,nodes):
self.highlight_nodes_(map(lambda x: self._graph.node_idx(x),nodes))
if __name__ == '__main__':
import zen
import time
logging.basicConfig(level=logging.DEBUG)
G = zen.DiGraph()
ur = UbigraphRenderer('http://localhost:20738/RPC2')
ur.default_node_shape = 'sphere'
ur.default_node_color = '#1100dd'
ur.graph = G
e1 = G.add_edge(1,2)
time.sleep(1)
e2 = G.add_edge(2,3)
time.sleep(1)
e3 = G.add_edge(3,4)
time.sleep(1)
e4 = G.add_edge(1,4)
ur.highlight_edges([(1,2),(2,3)])
ur.highlight_nodes([1])
|
bsd-3-clause
| -3,296,160,238,034,257,400
| 34.385294
| 386
| 0.705653
| false
| 3.156652
| false
| false
| false
|
cjbe/artiqDrivers
|
artiqDrivers/devices/coherentDds/driver.py
|
1
|
6338
|
import logging
import serial
import math
import time
logger = logging.getLogger(__name__)
class CoherentDds:
ser = None;
lsbAmp = 1.0 / 16383 # 0x3fff is maximum amplitude
lsbPhase = 360.0 / 65536 # Degrees per LSB.
def __init__(self, addr, clockFreq, baudrate=115200, internal_clock=False,
incoherent_channels=[False, False, False, False]):
# addr : serial port name
# clockFreq : clock frequency in Hz
# internal_clock: if true, use internal 1 GHz clock
# incoherent_channels: array listing which channels coherence is disabled
self.ser = serial.Serial(addr, baudrate=baudrate)
self.lsbFreq = clockFreq / (2**32);
self.clockFreq = clockFreq
self.disableCoherenceMode(*incoherent_channels)
# Write a trivial pulse shape to /disable/ pulse shaping (the VGA is always at max)
self.setPulseShape(0, [1])
self.setPulseShape(1, [1])
self.setPulseShape(2, [1])
self.setPulseShape(3, [1])
if internal_clock:
self.setClockSource(clock_internal=True)
def read_spi_word(self):
self.send("getSpiWord?\n")
line = self.ser.readline().decode().strip()
return int(line, 16)
def get_lsb_freq(self):
return self.lsbFreq
def send(self, data):
self.ser.write(data.encode())
def identity(self):
"""Returns a string representing the firmware name and version"""
self.send('idn?\n')
return self.ser.readline().decode().strip()
def resetPhase(self):
self.send('resetPhase\n');
def setProfile(self, channel, profile, freq, phase=0.0, amp=1.0):
"""Sets a DDS profile frequency (Hz), phase (degrees), and amplitude (full-scale).
phase defaults to 0 and amplitude defaults to 1"""
if amp < 0 or amp > 1:
raise ValueError("DDS amplitude must be between 0 and 1")
if freq < 0 or freq > 450e6: # This should be dependant on the clock frequency
raise ValueError("DDS frequency must be between 0 and 450 MHz")
ampWord = int(round( amp * 0x3fff ))
phaseWord = int(round( (phase % 360) / 360.0 * 0xffff ))
freqWord = int(round( freq / self.lsbFreq ))
self.setProfileWords(channel, profile, freqWord, phaseWord, ampWord)
def setProfileWords(self, channel, profile, freq, phase, amp): # Freq, phase, amp are all in units of lsb
profile = int(profile) # have to do this, because artiq uses a special artiq.integer
if channel < 0 or channel > 3 or not isinstance(channel, int):
raise ValueError("DDS channel should be an integer between 0 and 3")
if profile < 0 or profile > 7 or not isinstance(profile, int):
raise ValueError("DDS profile should be an integer between 0 and 7")
if amp > 0x3fff or amp < 0 or not isinstance(amp, int):
raise ValueError("DDS amplitude word should be an integer between 0 and 0x3fff")
if phase > 0xffff or phase < 0 or not isinstance(phase, int):
raise ValueError("DDS phase word should be an integer between 0 and 0xffff")
if freq < 0 or freq > 0xffffffff or not isinstance(freq, int):
raise ValueError("DDS frequency word should be an integer between 0 and 0xffffffff")
self.send('setProfile {} {} {} {} {}\n'.format( channel, profile, freq, phase, amp) );
def reset(self):
self.send('reset\n');
time.sleep(50e-3);
def disableCoherenceMode(self, ch0=False, ch1=False, ch2=False, ch3=False):
self.send('setDisableCoherence {:d} {:d} {:d} {:d}\n'.\
format(ch0,ch1,ch2,ch3))
self.ser.readline()
def setPulseShape(self, shapeChannel, shapeVec):
if shapeChannel < 0 or shapeChannel > 3 or not isinstance(shapeChannel, int):
raise ValueError("DDS pulse shape channel should be an integer between 0 and 3")
if len(shapeVec) < 1 or len(shapeVec) > 2048:
raise ValueError("DDS pulse shape array length should be between 1 and 2048")
quantisedShapeVec = []
for el in shapeVec:
quantisedEl = round(el*0x3fff)
if quantisedEl < 0 or quantisedEl > 0x3fff:
raise ValueError("DDS pulse shape points should all be between 0.0 and 1.0")
quantisedShapeVec.append(quantisedEl)
self.send('setPulseShape {}\n'.format(shapeChannel))
for i in range(len(quantisedShapeVec)):
self.send('%d' % quantisedShapeVec[i]);
if i != len(quantisedShapeVec)-1:
self.send(',');
self.send('\n');
def setSensiblePulseShape(self, duration, shapeChannel=0):
"""Sets a sensible looking pulse shape with total duration 'duration' seconds. The duration must be between 0 and 10us"""
if duration > 10e-6 or duration < 0.2e-6:
raise ValueError("DDS pulse shape duration must be between 0.2us and 10us")
shapeVec = []
i_max = round(duration*250e6/2) # /2 because clock used is divided by 2, 250MHz is DDS sync clk
for i in range(i_max):
y = 0.209*math.log10( (math.sin((1+i)/float(i_max+1)*math.pi/2))**4 ) + 1
if y < 0:
y = 0
shapeVec.append(y)
self.setPulseShape(shapeChannel, shapeVec)
def setClockSource(self, clock_internal=False):
"""Choose between external clock (default) and internal 1 GHz source"""
self.send('setClockSource {:d}\n'.format(clock_internal))
self.ser.readline()
self.ser.readline()
def ping(self):
return True
class CoherentDdsSim:
def __init__(self):
pass
def identity(self):
return "coherentdds simulation"
def resetPhase(self):
logger.warning("Resetting phase")
pass
def setProfile(self, channel, profile, freq, phase=0.0, amp=1.0):
logger.warning("Setting ch:p {}:{} to freq={}, phase={}, amp={}".format(channel,profile,freq,phase,amp))
pass
def setProfileWords(self, channel, profile, freq, phase, amp): # Freq, phase, amp are all in units of lsb
pass
def reset(self):
pass
def setPulseShape(self, shapeChannel, shapeVec):
pass
def ping(self):
return True
|
gpl-3.0
| -1,789,794,188,135,591,000
| 38.36646
| 129
| 0.622436
| false
| 3.74144
| false
| false
| false
|
enixdark/im-r-e-d-i-s
|
flask-cook/my_app/catalog/forms.py
|
1
|
2193
|
from flask_wtf import Form
from wtforms import TextField,DecimalField,SelectField
from decimal import Decimal
from wtforms.validators import InputRequired,NumberRange,Optional
from models import Category,Product
from wtforms.validators import ValidationError
from wtforms.widgets import html_params,Select, HTMLString
from wtforms import FileField
def check_duplicate_category(case_sentisive=True):
def _check_duplicate(form,field):
if case_sentisive:
res = Category.query.filter(
Category.name.like('%' + field.data + '%')
).first()
else:
res = Category.query.filter(
Category.name.ilike('%' + field.data + '%')
).first()
if res:
raise ValidationError('Category named %s already exists' % field.data)
return _check_duplicate
class CustomCategoryInput(Select):
def __call__(self,field,**kwargs):
kwargs.setdefault('id',field.id)
html = []
for val, label, selected in field.iter_choices():
html.append(
'<input type="radio" %s> %s' % (html_params(name=field.name,value=val,checked=selected,**kwargs)
,label)
)
return HTMLString(''.join(html))
class CategoryField(SelectField):
"""docstring for CategoryField"""
widget = CustomCategoryInput()
def iter_choices(self):
categories = [(c.id,c.name) for c in Category.query.all()]
for value,label in categories:
yield (value,label,self.coerce(value) == self.data)
def pre_validate(self,form):
# import ipdb; ipdb.set_trace()
for (v,_) in [(c.id,c.name) for c in Category.query.all()]:
if self.data == v:
break
else:
raise ValueError(self.gettext('Not a valid choice'))
return super(CategoryField,self).pre_validate(form)
class NameForm(Form):
name = TextField('Name', validators=[InputRequired()])
class ProductForm(NameForm):
price = DecimalField('Price',validators=[
InputRequired(),NumberRange(min=Decimal('0.0'))
])
category = CategoryField('Category',validators=[InputRequired()],coerce=int)
company = SelectField('Company',validators=[Optional()])
# company = SelectField('Company')
image_path = FileField('Product image')
class CategoryForm(NameForm):
name = TextField('Name', validators=[
InputRequired(),check_duplicate_category()
])
|
mit
| -5,309,016,394,700,146,000
| 30.782609
| 100
| 0.720474
| false
| 3.368664
| false
| false
| false
|
arokem/MRS-old
|
MRS/qc.py
|
1
|
2437
|
"""
quality control for MRS data
"""
import os
import os.path as op
import nibabel as nib
import numpy as np
import nipype.pipeline.engine as pe
from nipype.interfaces import fsl
def motioncheck(ref_file, end_file, out_path=None, thres=5.0):
"""
Checks motion between structural scans of the same modality.
Ideally obtained at the beginning and end of a scanning session.
Parameters
----------
ref_file: nifti file
Nifti file of first localizer acquired at the beginning of the session
end_file: nifti
nifti file of the localizer acquired at the end of the session
thres: float
threshold in mm of maximum allowed motion. Default 5mm
Returns
-------
rms : float
root mean square of xyz translation
passed: boolean
indicates if motion passed threshold: 1 if passed, 0 if failed.
"""
ref = nib.load(ref_file)
end = nib.load(end_file)
ref_data = ref.get_data()
end_data = end.get_data()
# Check if same affine space. modality must be the same to use realign,
# and prescription must be the same to deduce motion
ref_aff=ref.get_affine()
end_aff=end.get_affine()
if np.array_equal(ref_aff, end_aff):
print 'affines match'
else:
raise ValueError("Affines of start and end images do not match")
# save only axials
refax = ref_data[:, :, :, 0, np.newaxis]
endax = end_data[:, :, :, 0, np.newaxis]
if out_path is None:
path = os.path.dirname(ref_file)
refax_img = nib.Nifti1Image(refax, ref_aff)
nib.save(refax_img, op.join(out_path, 'refax.nii.gz'))
endax_img = nib.Nifti1Image(endax, ref_aff)
nib.save(endax_img, op.join(out_path, 'endax.nii.gz'))
# realignment
ref_file = op.join(out_path, 'refax.nii.gz')
in_file = op.join(out_path, 'endax.nii.gz')
mat_file = op.join(out_path, 'mat.nii.gz')
mcflt = fsl.MCFLIRT(in_file=in_file, ref_file=ref_file, save_mats=True,
cost='mutualinfo')
res = mcflt.run()
print('realignment affine matrix saved in mat_file: %s'
%res.outputs.mat_file)
aff_file=res.outputs.mat_file
aff = np.loadtxt(aff_file, dtype=float)
# compute RMS as indicator of motion
rel=aff[0:3, 3]
rms = np.sqrt(np.mean(rel**2))
if rms>=thres:
passed=False
else:
passed=True
return rms, passed
|
mit
| -6,128,460,252,175,409,000
| 26.382022
| 78
| 0.629873
| false
| 3.177314
| false
| false
| false
|
3liz/QuickOSM
|
QuickOSM/ui/base_overpass_panel.py
|
1
|
9703
|
"""Panel OSM Queries based on Overpass base class."""
import io
from qgis.core import (
Qgis,
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QgsGeometry,
QgsProject,
QgsRectangle,
QgsVectorLayer,
)
from qgis.PyQt.QtWidgets import QCompleter, QDialog
from QuickOSM.core.exceptions import MissingLayerUI, NoSelectedFeatures
from QuickOSM.core.utilities.tools import nominatim_file
from QuickOSM.definitions.gui import Panels
from QuickOSM.definitions.osm import QueryLanguage, QueryType
from QuickOSM.qgis_plugin_tools.tools.i18n import tr
from QuickOSM.ui.base_processing_panel import BaseProcessingPanel
__copyright__ = 'Copyright 2019, 3Liz'
__license__ = 'GPL version 3'
__email__ = 'info@3liz.org'
class BaseOverpassPanel(BaseProcessingPanel):
"""Panel OSM Processing base class.
This panels will have an run button.
This is a kind of virtual class.
"""
def __init__(self, dialog: QDialog):
super().__init__(dialog)
self.last_places = []
def setup_panel(self):
"""Function to set custom UI for some panels."""
super().setup_panel()
self.dialog.advanced_panels[self.panel].setSaveCollapsedState(False)
self.dialog.advanced_panels[self.panel].setCollapsed(True)
self.dialog.action_oql[self.panel].setEnabled(False)
def query_language_xml(self):
self.dialog.query_language[self.panel] = QueryLanguage.XML
self.dialog.action_oql[self.panel].setEnabled(True)
self.dialog.action_xml[self.panel].setEnabled(False)
def query_language_oql(self):
self.dialog.query_language[self.panel] = QueryLanguage.OQL
self.dialog.action_xml[self.panel].setEnabled(True)
self.dialog.action_oql[self.panel].setEnabled(False)
def query_language_updated(self):
if self.dialog.query_language[Panels.Query] != self.dialog.query_language[Panels.QuickQuery]:
self.dialog.query_language[Panels.Query] = self.dialog.query_language[Panels.QuickQuery]
if self.dialog.query_language[Panels.Query] == QueryLanguage.OQL:
self.dialog.action_xml[Panels.Query].setEnabled(True)
self.dialog.action_oql[Panels.Query].setEnabled(False)
elif self.dialog.query_language[Panels.Query] == QueryLanguage.XML:
self.dialog.action_oql[Panels.Query].setEnabled(True)
self.dialog.action_xml[Panels.Query].setEnabled(False)
def init_nominatim_autofill(self):
"""Open the nominatim file and start setting up the auto-completion."""
# Useful to avoid duplicate if we add a new completer.
for line_edit in self.dialog.places_edits.values():
line_edit.setCompleter(None)
user_file = nominatim_file()
with io.open(user_file, 'r', encoding='utf8') as f:
self.last_places = []
for line in f:
self.last_places.append(line.rstrip('\n'))
nominatim_completer = QCompleter(self.last_places)
for line_edit in self.dialog.places_edits.values():
line_edit.setCompleter(nominatim_completer)
line_edit.completer().setCompletionMode(
QCompleter.PopupCompletion)
@staticmethod
def sort_nominatim_places(existing_places: list, place: str) -> list:
"""Helper to sort and limit results of saved nominatim places."""
if place in existing_places:
existing_places.pop(existing_places.index(place))
existing_places.insert(0, place)
existing_places = list(dict.fromkeys(existing_places))
return existing_places[:10]
def write_nominatim_file(self, panel: Panels):
"""Write new nominatim value in the file.
:param panel: The panel to use so as to fetch the nominatim value.
:type panel: Panels
"""
value = self.dialog.places_edits[panel].text()
new_list = self.sort_nominatim_places(self.last_places, value)
user_file = nominatim_file()
try:
with io.open(user_file, 'w', encoding='utf8') as f:
for item in new_list:
if item:
f.write('{}\n'.format(item))
except UnicodeDecodeError:
# The file is corrupted ?
# Remove all old places
with io.open(user_file, 'w', encoding='utf8') as f:
f.write('\n')
self.init_nominatim_autofill()
def _core_query_type_updated(self, combo_query_type, widget, spinbox=None, checkbox=None):
"""Enable/disable the extent/layer widget."""
current = combo_query_type.currentData()
if combo_query_type.count() == 2:
# Query tab, widget is the layer selector
if current == 'layer':
widget.setVisible(True)
layer = self.dialog.layers_buttons[self.panel].currentLayer()
if isinstance(layer, QgsVectorLayer):
checkbox.setVisible(True)
else:
checkbox.setVisible(False)
checkbox.setChecked(False)
else:
widget.setVisible(False)
checkbox.setVisible(False)
checkbox.setChecked(False)
else:
# Quick query tab, widget is the stacked widget
if current in ['in', 'around']:
widget.setCurrentIndex(0)
spinbox.setVisible(current == 'around')
elif current in ['layer']:
widget.setCurrentIndex(1)
layer = self.dialog.layers_buttons[self.panel].currentLayer()
if isinstance(layer, QgsVectorLayer):
checkbox.setVisible(True)
else:
checkbox.setVisible(False)
checkbox.setChecked(False)
elif current in ['canvas', 'attributes']:
widget.setCurrentIndex(2)
# TODO remove
def _start_process(self):
"""Make some stuff before launching the process."""
self.dialog.button_show_query.setDisabled(True)
self.dialog.button_generate_query.setDisabled(True)
super()._start_process()
# TODO remove
def _end_process(self):
"""Make some stuff after the process."""
self.dialog.button_show_query.setDisabled(False)
self.dialog.button_generate_query.setDisabled(False)
super()._end_process()
def end_query(self, num_layers):
"""Display the message at the end of the query.
:param num_layers: Number of layers which have been loaded.
:rtype num_layers: int
"""
if num_layers:
text = tr(
'Successful query, {} layer(s) has been loaded.').format(
num_layers)
self.dialog.set_progress_text(text)
self.dialog.display_message_bar(text, level=Qgis.Success, duration=5)
else:
self.dialog.set_progress_text(tr('No result'))
self.dialog.display_message_bar(
tr('Successful query, but no result.'),
level=Qgis.Warning, duration=7)
def gather_values(self):
properties = super().gather_values()
place = self.dialog.places_edits[self.panel].text()
if place == '':
place = None
properties['place'] = place
query_type = self.dialog.query_type_buttons[self.panel].currentData()
if query_type in ['in', 'around']:
place = self.dialog.places_edits[self.panel].text()
properties['place'] = place
properties['bbox'] = None
elif query_type in ['canvas', 'layer']:
if query_type == 'canvas':
geom_extent = self.dialog.iface.mapCanvas().extent()
source_crs = self.dialog.iface.mapCanvas().mapSettings().destinationCrs()
elif query_type == 'layer':
# Else if a layer is checked
layer = self.dialog.layers_buttons[self.panel].currentLayer()
if not layer:
raise MissingLayerUI
if self.dialog.selection_features[self.panel].isChecked() \
and isinstance(layer, QgsVectorLayer):
geom_extent = layer.boundingBoxOfSelected()
if geom_extent == QgsRectangle(0, 0, 0, 0):
raise NoSelectedFeatures
else:
geom_extent = layer.extent()
source_crs = layer.crs()
else:
raise NotImplementedError
# noinspection PyArgumentList
geom_extent = QgsGeometry.fromRect(geom_extent)
epsg_4326 = QgsCoordinateReferenceSystem('EPSG:4326')
# noinspection PyArgumentList
crs_transform = QgsCoordinateTransform(
source_crs, epsg_4326, QgsProject.instance())
geom_extent.transform(crs_transform)
properties['bbox'] = geom_extent.boundingBox()
properties['place'] = None
else:
properties['bbox'] = None
if query_type == 'in':
properties['query_type'] = QueryType.InArea
elif query_type == 'around':
properties['query_type'] = QueryType.AroundArea
elif query_type == 'canvas':
properties['query_type'] = QueryType.BBox
elif query_type == 'layer':
properties['query_type'] = QueryType.BBox
elif query_type == 'attributes':
properties['query_type'] = QueryType.NotSpatial
else:
raise NotImplementedError
return properties
|
gpl-2.0
| 7,361,262,418,288,470,000
| 38.283401
| 101
| 0.601154
| false
| 4.209544
| false
| false
| false
|
hzlf/openbroadcast
|
website/apps/alibrary/migrations/0099_auto__add_distributor.py
|
1
|
56270
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Distributor'
db.create_table('alibrary_distributor', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('legacy_id', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('migrated', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('uuid', self.gf('django.db.models.fields.CharField')(max_length=36, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=400)),
('slug', self.gf('django_extensions.db.fields.AutoSlugField')(allow_duplicates=False, max_length=50, separator=u'-', blank=True, populate_from='name', overwrite=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=50)),
('country', self.gf('django_countries.fields.CountryField')(max_length=2, null=True, blank=True)),
('address', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('email_main', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('description', self.gf('lib.fields.extra.MarkdownTextField')(null=True, blank=True)),
('first_placeholder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Placeholder'], null=True)),
('created', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateField')(auto_now=True, blank=True)),
('parent', self.gf('mptt.fields.TreeForeignKey')(blank=True, related_name='label_children', null=True, to=orm['alibrary.Distributor'])),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='distributors_owner', null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='distributors_creator', null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('publisher', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='distributors_publisher', null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('type', self.gf('django.db.models.fields.CharField')(default='unknown', max_length=12)),
('d_tags', self.gf('tagging.fields.TagField')(null=True)),
('description_html', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('alibrary', ['Distributor'])
def backwards(self, orm):
# Deleting model 'Distributor'
db.delete_table('alibrary_distributor')
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'alibrary.apilookup': {
'Meta': {'ordering': "('created',)", 'object_name': 'APILookup'},
'api_data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'provider': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50'}),
'ressource_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'alibrary.artist': {
'Meta': {'ordering': "('name',)", 'object_name': 'Artist'},
'aliases': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'aliases_rel_+'", 'null': 'True', 'to': "orm['alibrary.Artist']"}),
'biography': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artists_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'disable_editing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disable_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disambiguation': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_folder'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'listed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Artist']", 'through': "orm['alibrary.ArtistMembership']", 'symmetrical': 'False'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artists_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'professions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Profession']", 'through': "orm['alibrary.ArtistProfessions']", 'symmetrical': 'False'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artists_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.artistmembership': {
'Meta': {'object_name': 'ArtistMembership'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_child'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_parent'", 'to': "orm['alibrary.Artist']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_membership_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.artistplugin': {
'Meta': {'object_name': 'ArtistPlugin', 'db_table': "'cmsplugin_artistplugin'", '_ormbases': ['cms.CMSPlugin']},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Artist']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'alibrary.artistprofessions': {
'Meta': {'object_name': 'ArtistProfessions'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Profession']"})
},
'alibrary.daypart': {
'Meta': {'ordering': "('day', 'time_start')", 'object_name': 'Daypart'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'day': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '1', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_end': ('django.db.models.fields.TimeField', [], {}),
'time_start': ('django.db.models.fields.TimeField', [], {})
},
'alibrary.distributor': {
'Meta': {'ordering': "('name',)", 'object_name': 'Distributor'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'distributors_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_main': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'distributors_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'label_children'", 'null': 'True', 'to': "orm['alibrary.Distributor']"}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'distributors_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '12'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.format': {
'Meta': {'ordering': "('format', 'version')", 'object_name': 'Format'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_price': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'base'", 'max_length': '10'})
},
'alibrary.label': {
'Meta': {'ordering': "('name',)", 'object_name': 'Label'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disable_editing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disable_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_main': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'label_folder'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labelcode': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'listed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'label_children'", 'null': 'True', 'to': "orm['alibrary.Label']"}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '12'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.license': {
'Meta': {'ordering': "('name',)", 'object_name': 'License'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'license_children'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'restricted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'c3cb9777-e3ff-4e41-8cf2-0b19c0e7b258'", 'max_length': '36'})
},
'alibrary.licensetranslation': {
'Meta': {'ordering': "('language_code',)", 'unique_together': "(('language_code', 'master'),)", 'object_name': 'LicenseTranslation', 'db_table': "'alibrary_license_translation'"},
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '15', 'blank': 'True'}),
'license_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['alibrary.License']"}),
'name_translated': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'alibrary.media': {
'Meta': {'ordering': "('tracknumber',)", 'object_name': 'Media'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_artist'", 'null': 'True', 'to': "orm['alibrary.Artist']"}),
'base_bitrate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'base_duration': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'base_filesize': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'base_format': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'base_samplerate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'conversion_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'echoprint_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Artist']", 'null': 'True', 'through': "orm['alibrary.MediaExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isrc': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_license'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'lock': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '1'}),
'master': ('django.db.models.fields.files.FileField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'master_sha1': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'mediatype': ('django.db.models.fields.CharField', [], {'default': "'track'", 'max_length': '12'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_release'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['alibrary.Release']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tracknumber': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.mediaextraartists': {
'Meta': {'ordering': "('profession__name', 'artist__name')", 'object_name': 'MediaExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extraartist_media'", 'to': "orm['alibrary.Media']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.mediaformat': {
'Meta': {'ordering': "('name',)", 'object_name': 'Mediaformat'},
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_listing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'alibrary.mediaplugin': {
'Meta': {'object_name': 'MediaPlugin', 'db_table': "'cmsplugin_mediaplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'headline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"})
},
'alibrary.playlist': {
'Meta': {'ordering': "('-updated',)", 'object_name': 'Playlist'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'dayparts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'daypart_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Daypart']"}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '12', 'null': 'True'}),
'edit_mode': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.PlaylistItem']", 'null': 'True', 'through': "orm['alibrary.PlaylistItemPlaylist']", 'blank': 'True'}),
'main_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'seasons': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'season_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Season']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'target_duration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '12', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'weather': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'weather_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Weather']"})
},
'alibrary.playlistitem': {
'Meta': {'object_name': 'PlaylistItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.playlistitemplaylist': {
'Meta': {'object_name': 'PlaylistItemPlaylist'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cue_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'cue_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_cross': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.PlaylistItem']"}),
'playlist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Playlist']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.playlistmedia': {
'Meta': {'object_name': 'PlaylistMedia'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cue_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'cue_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_cross': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"}),
'playlist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Playlist']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.profession': {
'Meta': {'ordering': "('name',)", 'object_name': 'Profession'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_listing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'alibrary.relation': {
'Meta': {'ordering': "('url',)", 'object_name': 'Relation'},
'action': ('django.db.models.fields.CharField', [], {'default': "'information'", 'max_length': '50'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'default': "'generic'", 'max_length': '50'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'})
},
'alibrary.release': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Release'},
'asin': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'catalognumber': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'cover_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_cover_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'releases_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Artist']", 'null': 'True', 'through': "orm['alibrary.ReleaseExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_folder'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_label'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['alibrary.Label']"}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_license'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'main_format': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Mediaformat']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'media': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'to': "orm['alibrary.Media']", 'through': "orm['alibrary.ReleaseMedia']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'releases_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'pressings': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'releases_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'release_country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'releasedate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'releasedate_approx': ('django_date_extensions.fields.ApproximateDateField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'releasestatus': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'releasetype': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '12'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'totaltracks': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.releaseextraartists': {
'Meta': {'object_name': 'ReleaseExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_release'", 'to': "orm['alibrary.Release']"})
},
'alibrary.releasemedia': {
'Meta': {'object_name': 'ReleaseMedia'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Release']"})
},
'alibrary.releaseplugin': {
'Meta': {'object_name': 'ReleasePlugin', 'db_table': "'cmsplugin_releaseplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Release']"})
},
'alibrary.releaserelations': {
'Meta': {'object_name': 'ReleaseRelations'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_relation_relation'", 'to': "orm['alibrary.Relation']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_relation_release'", 'to': "orm['alibrary.Release']"})
},
'alibrary.season': {
'Meta': {'ordering': "('-name',)", 'object_name': 'Season'},
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'alibrary.weather': {
'Meta': {'ordering': "('-name',)", 'object_name': 'Weather'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'arating.vote': {
'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['alibrary']
|
gpl-3.0
| -5,774,735,863,854,991,000
| 98.243386
| 240
| 0.560459
| false
| 3.58819
| false
| false
| false
|
craigbruce/awacs
|
awacs/elasticache.py
|
1
|
2765
|
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action
service_name = 'AWS ElastiCache'
prefix = 'elasticache'
AddTagsToResource = Action(prefix, 'AddTagsToResource')
AuthorizeCacheSecurityGroupIngress = \
Action(prefix, 'AuthorizeCacheSecurityGroupIngress')
CopySnapshot = Action(prefix, 'CopySnapshot')
CreateCacheCluster = Action(prefix, 'CreateCacheCluster')
CreateCacheParameterGroup = Action(prefix, 'CreateCacheParameterGroup')
CreateCacheSecurityGroup = Action(prefix, 'CreateCacheSecurityGroup')
CreateCacheSubnetGroup = Action(prefix, 'CreateCacheSubnetGroup')
CreateReplicationGroup = Action(prefix, 'CreateReplicationGroup')
CreateSnapshot = Action(prefix, 'CreateSnapshot')
DeleteCacheCluster = Action(prefix, 'DeleteCacheCluster')
DeleteCacheParameterGroup = Action(prefix, 'DeleteCacheParameterGroup')
DeleteCacheSecurityGroup = Action(prefix, 'DeleteCacheSecurityGroup')
DeleteCacheSubnetGroup = Action(prefix, 'DeleteCacheSubnetGroup')
DeleteReplicationGroup = Action(prefix, 'DeleteReplicationGroup')
DeleteSnapshot = Action(prefix, 'DeleteSnapshot')
DescribeCacheClusters = Action(prefix, 'DescribeCacheClusters')
DescribeCacheEngineVersions = \
Action(prefix, 'DescribeCacheEngineVersions')
DescribeCacheParameterGroups = \
Action(prefix, 'DescribeCacheParameterGroups')
DescribeCacheParameters = Action(prefix, 'DescribeCacheParameters')
DescribeCacheSecurityGroups = \
Action(prefix, 'DescribeCacheSecurityGroups')
DescribeCacheSubnetGroups = \
Action(prefix, 'DescribeCacheSubnetGroups')
DescribeEngineDefaultParameters = \
Action(prefix, 'DescribeEngineDefaultParameters')
DescribeEvents = Action(prefix, 'DescribeEvents')
DescribeReplicationGroups = Action(prefix, 'DescribeReplicationGroups')
DescribeReservedCacheNodes = \
Action(prefix, 'DescribeReservedCacheNodes')
DescribeReservedCacheNodesOfferings = \
Action(prefix, 'DescribeReservedCacheNodesOfferings')
DescribeSnapshots = Action(prefix, 'DescribeSnapshots')
ListTagsForResource = Action(prefix, 'ListTagsForResource')
ModifyCacheCluster = Action(prefix, 'ModifyCacheCluster')
ModifyCacheParameterGroup = Action(prefix, 'ModifyCacheParameterGroup')
ModifyCacheSubnetGroup = Action(prefix, 'ModifyCacheSubnetGroup')
ModifyReplicationGroup = Action(prefix, 'ModifyReplicationGroup')
PurchaseReservedCacheNodesOffering = \
Action(prefix, 'PurchaseReservedCacheNodesOffering')
RebootCacheCluster = Action(prefix, 'RebootCacheCluster')
RemoveTagsFromResource = Action(prefix, 'RemoveTagsFromResource')
ResetCacheParameterGroup = Action(prefix, 'ResetCacheParameterGroup')
RevokeCacheSecurityGroupIngress = \
Action(prefix, 'RevokeCacheSecurityGroupIngress')
|
bsd-2-clause
| 2,653,114,337,146,885,000
| 47.508772
| 71
| 0.824955
| false
| 4.227829
| false
| false
| false
|
christabor/MoAL
|
MOAL/languages/formal_language_theory/grammars/context_sensitive.py
|
1
|
4856
|
# -*- coding: utf-8 -*-
__author__ = """Chris Tabor (dxdstudio@gmail.com)"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
from MOAL.helpers.display import prnt
from random import choice
from MOAL.languages.formal_language_theory.grammars.context_free \
import ContextFreeGrammar
DEBUG = True if __name__ == '__main__' else False
class ContextSensitiveGrammar(ContextFreeGrammar):
DEBUG = True
def __init__(self):
self.rule_divider = ':'
super(ContextSensitiveGrammar, self).__init__()
self.DEBUG = ContextSensitiveGrammar.DEBUG
@staticmethod
def get_substr_match(rule, string):
"""Return the index of the last matching item of the two strings.
e.g. index = 1 for '[ab]cd' and '[ab]zd'. If the index is the
same as the length then the strings simply match.
"""
if not len(rule) <= string:
raise Exception('Invalid string.')
# Degenerate case
if rule == string:
return len(rule)
rule = list(rule)
string = list(string)
index = 0
for k, letter in enumerate(rule):
if rule[k] != string[k]:
return index
else:
index += 1
return index
@staticmethod
def simple_rule(rule, string=''):
_rule = list(rule)
_string = list(string)
if ContextSensitiveGrammar.DEBUG:
print('simple rule: {} and string: {}'.format(_rule, _string))
# We only replace tokens that match a rule.
# The rest remain unchanged.
for k, char in enumerate(string):
if char in _rule:
_string[k] = ''.join(string).replace(' ', '')
# Replace the token with the rules' string
_string[k] = ContextSensitiveGrammar.simple_rule(
rule, string=_string)
ret = ''.join(_string)
if ContextSensitiveGrammar.DEBUG:
prnt('simple rule retval: ', ret)
return ret
def _evaluate(self, groups, evaluation=''):
for group in groups:
left, right = group
evaluation += ''.join(right)
return evaluation
def evaluate(self, tokens=None, evaluation=''):
"""A basic parser for a custom attribute grammar.
One thing to note is that ambiguous grammars need to be iterated over,
since the duplicate rules can't be mapped via dictionary key.
Unambiguous grammars are therefore more performant,
because the lookup is O(1) vs. O(N).
"""
if tokens is None:
if hasattr(self, 'tokens'):
tokens = self.tokens
else:
raise ContextFreeGrammar.InvalidTokenSet
expressions = [r[0] for r in self.rules]
tokens = [r[1] for r in self.rules]
groups = [[
expressions[k],
tokens[k].split(' ')] for k, _ in enumerate(tokens)
]
prnt('Groups', groups)
evaluation = self._evaluate(groups, evaluation='')
new_tokens = list(evaluation)
for token in new_tokens:
for expression in expressions:
if token in list(expression):
token = self._evaluate(groups, evaluation=evaluation)
if ContextSensitiveGrammar.DEBUG:
print('Final evaluation in `evaluate`: {}'.format(
evaluation, ''.join(new_tokens)))
return evaluation
if DEBUG:
with Section('Grammar parser - basic'):
"""https://en.wikipedia.org/wiki/Context-sensitive_grammar#Examples"""
_csg = ContextSensitiveGrammar
csg_rules = [
'S => a b c',
'S => a S B c',
'c B => W B',
'W B => W X',
'W X => B X',
'B X => B c',
'b B => b b',
]
csg = ContextSensitiveGrammar()
csg.set_rules(csg_rules)
tokens = [choice(
['S', 'S', 'c B', 'W B', 'W X', 'B X', 'b B']) for _ in range(4)]
prnt('Tokens:', tokens)
csg.evaluate(tokens=tokens)
csg.evaluate(tokens=['S'])
# Testing/staticmethods
_csg.simple_rule('S', 'aSaSbb$')
_csg.simple_rule('X', 'aBcXaa')
csg.evaluate(tokens=['S', 'B', 'B X'])
assert len('a b c') == _csg.get_substr_match('a b c', 'a b c')
assert len('a C d') == _csg.get_substr_match('a C d', 'a C d EE')
assert len('a C') == _csg.get_substr_match('a C', 'a C d EE')
assert len('a C d E') == _csg.get_substr_match('a C d E', 'a C d EE')
assert not len('a C d') == _csg.get_substr_match('a C d E', 'a C d EE')
assert not len('a C d') == _csg.get_substr_match('a c d', 'a C d')
|
apache-2.0
| 4,727,653,786,450,231,000
| 33.935252
| 79
| 0.545923
| false
| 3.900402
| false
| false
| false
|
SnowWalkerJ/quantlib
|
quant/data/wind/tables/sindexperformance.py
|
1
|
3151
|
from ....common.db.sql import VARCHAR, Numeric as NUMBER, DateTime as DATETIME, Column, BaseModel, CLOB, DATE
VARCHAR2 = VARCHAR
class SIndexPerformance(BaseModel):
"""
4.89 中国股票指数业绩表现
Attributes
----------
object_id: VARCHAR2(100)
对象ID
s_info_windcode: VARCHAR2(40)
Wind代码
trade_dt: VARCHAR2(8)
交易日期
pct_chg_recent1m: NUMBER(20,6)
最近1月涨跌幅
pct_chg_recent3m: NUMBER(20,6)
最近3月涨跌幅
pct_chg_recent6m: NUMBER(20,6)
最近6月涨跌幅
pct_chg_recent1y: NUMBER(20,6)
最近1年涨跌幅
pct_chg_recent2y: NUMBER(20,6)
最近2年涨跌幅
pct_chg_recent3y: NUMBER(20,6)
最近3年涨跌幅
pct_chg_recent4y: NUMBER(20,6)
最近4年涨跌幅
pct_chg_recent5y: NUMBER(20,6)
最近5年涨跌幅
pct_chg_recent6y: NUMBER(20,6)
最近6年涨跌幅
pct_chg_thisweek: NUMBER(20,6)
本周以来涨跌幅
pct_chg_thismonth: NUMBER(20,6)
本月以来涨跌幅
pct_chg_thisquarter: NUMBER(20,6)
本季以来涨跌幅
pct_chg_thisyear: NUMBER(20,6)
本年以来涨跌幅
si_pct_chg: NUMBER(20,6)
发布以来涨跌幅
annualyeild: NUMBER(20,6)
年化收益率
std_dev_6m: NUMBER(20,6)
6个月标准差
std_dev_1y: NUMBER(20,6)
1年标准差
std_dev_2y: NUMBER(20,6)
2年标准差
std_dev_3y: NUMBER(20,6)
3年标准差
sharpratio_6m: NUMBER(20,6)
6个月夏普比率
sharpratio_1y: NUMBER(20,6)
1年夏普比率
sharpratio_2y: NUMBER(20,6)
2年夏普比率
sharpratio_3y: NUMBER(20,6)
3年夏普比率
opdate: DATETIME
opdate
opmode: VARCHAR(1)
opmode
"""
__tablename__ = "SIndexPerformance"
object_id = Column(VARCHAR2(100), primary_key=True)
s_info_windcode = Column(VARCHAR2(40))
trade_dt = Column(VARCHAR2(8))
pct_chg_recent1m = Column(NUMBER(20,6))
pct_chg_recent3m = Column(NUMBER(20,6))
pct_chg_recent6m = Column(NUMBER(20,6))
pct_chg_recent1y = Column(NUMBER(20,6))
pct_chg_recent2y = Column(NUMBER(20,6))
pct_chg_recent3y = Column(NUMBER(20,6))
pct_chg_recent4y = Column(NUMBER(20,6))
pct_chg_recent5y = Column(NUMBER(20,6))
pct_chg_recent6y = Column(NUMBER(20,6))
pct_chg_thisweek = Column(NUMBER(20,6))
pct_chg_thismonth = Column(NUMBER(20,6))
pct_chg_thisquarter = Column(NUMBER(20,6))
pct_chg_thisyear = Column(NUMBER(20,6))
si_pct_chg = Column(NUMBER(20,6))
annualyeild = Column(NUMBER(20,6))
std_dev_6m = Column(NUMBER(20,6))
std_dev_1y = Column(NUMBER(20,6))
std_dev_2y = Column(NUMBER(20,6))
std_dev_3y = Column(NUMBER(20,6))
sharpratio_6m = Column(NUMBER(20,6))
sharpratio_1y = Column(NUMBER(20,6))
sharpratio_2y = Column(NUMBER(20,6))
sharpratio_3y = Column(NUMBER(20,6))
opdate = Column(DATETIME)
opmode = Column(VARCHAR(1))
|
gpl-3.0
| 941,308,491,787,101,700
| 28.091837
| 109
| 0.593476
| false
| 2.229085
| false
| false
| false
|
joeyoung658/A-Level_2016-18
|
Challenges/Hangman/Everyones/Samuel/hangman.py
|
1
|
2594
|
""" Hangman Game (v1.0)
Name: samuel armstrong
Date:
"""
import random
count=0
def load_file(filename):
""" Function to return a word list from a plain text file;
Note: You will need to open the file, read and append
each line to an array (or list), close the file and
then return the array.
"""
word_list = []
with open(filename, "r") as file:
for line in file:
word_list.append(line.replace("\n", "").lower())
return word_list
def select_word(word_list):
""" Function to return a random word from an array of words;
Note: You will need to import the random module and use
random.randint() to select a random index in the array.
"""
rand = random.randint(0, len(word_list)-1)
word = (word_list[rand])
return word
def find_character(char, word,):
global count
""" Function to return the position(s) of a character in word;
Note: This should return the index of the character as an integer,
if the character is not found, it should return a value of -1
"""
index = 0
while index < len(word):
index = word.find(char, index)
if index == -1:
print ("letter not found within the word")
break
index = index + 1
print('your charcter was found at position: ', index)
count = count + 1
## cant append to list correctly
## indices=[]
## indices.append(word.find(char, index))
def main():
global count
""" Note: This is your main function and should contain your game loop.
"""
i= input ("would you like to play a game y/n: ").lower()
while i== "y":
fudge = "word_list.txt"
attempts_remaining = 10
word_list = load_file("word_list.txt")
word = select_word (word_list)
print ("the word is" , len(word), " letters long")
while attempts_remaining !=0:
char = input ("letter: ")
char_pos = find_character (char, word,)
attempts_remaining = attempts_remaining -1
print ("attempts remaining: ",attempts_remaining)
if count == len(word):
print ("well done you have got all the letter the word was",word)
i= input ("would you like to play a game y/n: ").lower()
break
print ("game over")
if __name__ == "__main__":
main()
|
gpl-3.0
| -9,055,671,597,323,212,000
| 29.880952
| 81
| 0.543177
| false
| 4.183871
| false
| false
| false
|
Mnk3y/plugin.video.lastship
|
lastship.py
|
1
|
10683
|
# -*- coding: UTF-8 -*-
"""
Lastship Add-on (C) 2017
Credits to Placenta and Covenant; our thanks go to their creators
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Addon Name: Lastship
# Addon id: plugin.video.lastship
# Addon Provider: LastShip
import urlparse,sys,urllib
from resources.lib.modules import control
from resources.lib.modules import cache
from resources.lib.modules import views
from resources.lib.modules import playcount
from resources.lib.modules import trailer
from resources.lib.modules import trakt
from resources.lib.modules import sources
from resources.lib.modules import downloader
from resources.lib.modules import libtools
from resources.lib.indexers import navigator
from resources.lib.indexers import movies
from resources.lib.indexers import channels
from resources.lib.indexers import tvshows
from resources.lib.indexers import episodes
import xbmcgui
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))
action = params.get('action')
name = params.get('name')
title = params.get('title')
year = params.get('year')
imdb = params.get('imdb')
tvdb = params.get('tvdb')
tmdb = params.get('tmdb')
season = params.get('season')
episode = params.get('episode')
tvshowtitle = params.get('tvshowtitle')
premiered = params.get('premiered')
url = params.get('url')
image = params.get('image')
meta = params.get('meta')
select = params.get('select')
query = params.get('query')
source = params.get('source')
content = params.get('content')
windowedtrailer = params.get('windowedtrailer')
windowedtrailer = int(windowedtrailer) if windowedtrailer in ("0","1") else 0
if action == None:
cache.cache_version_check()
navigator.navigator().root()
elif action == 'newsNavigator':
navigator.navigator().news()
elif action == 'movieNavigator':
navigator.navigator().movies()
elif action == 'movieliteNavigator':
navigator.navigator().movies(lite=True)
elif action == 'mymovieNavigator':
navigator.navigator().mymovies()
elif action == 'mymovieliteNavigator':
navigator.navigator().mymovies(lite=True)
elif action == 'tvNavigator':
navigator.navigator().tvshows()
elif action == 'tvliteNavigator':
navigator.navigator().tvshows(lite=True)
elif action == 'mytvNavigator':
navigator.navigator().mytvshows()
elif action == 'mytvliteNavigator':
navigator.navigator().mytvshows(lite=True)
elif action == 'downloadNavigator':
navigator.navigator().downloads()
elif action == 'libraryNavigator':
navigator.navigator().library()
elif action == 'toolNavigator':
navigator.navigator().tools()
elif action == 'searchNavigator':
if not control.setting('search.quick') == '0':
searchSelect = xbmcgui.Dialog().select(control.lang(32010).encode('utf-8'),
[
control.lang(32001).encode('utf-8'),
control.lang(32002).encode('utf-8'),
control.lang(32029).encode('utf-8'),
control.lang(32030).encode('utf-8')
])
if searchSelect == 0:
movies.movies().search()
movies.movies().search_new()
elif searchSelect == 1:
tvshows.tvshows().search()
tvshows.tvshows().search_new()
elif searchSelect == 2:
movies.movies().person()
elif searchSelect == 3:
tvshows.tvshows().person()
else:
pass
else:
navigator.navigator().search()
elif action == 'viewsNavigator':
navigator.navigator().views()
elif action == 'clearCache':
navigator.navigator().clearCache()
elif action == 'clearCacheSearch':
navigator.navigator().clearCacheSearch()
elif action == 'clearCacheAll':
navigator.navigator().clearCacheAll()
elif action == 'clearCacheMeta':
navigator.navigator().clearCacheMeta()
elif action == 'infoCheck':
navigator.navigator().infoCheck('')
elif action == 'movies':
movies.movies().get(url)
elif action == 'moviePage':
movies.movies().get(url)
elif action == 'movieWidget':
movies.movies().widget()
elif action == 'movieSearch':
movies.movies().search()
elif action == 'movieSearchnew':
movies.movies().search_new()
elif action == 'movieSearchterm':
movies.movies().search_term(name)
elif action == 'moviePerson':
movies.movies().person()
elif action == 'movieGenres':
movies.movies().genres()
elif action == 'movieLanguages':
movies.movies().languages()
elif action == 'movieCertificates':
movies.movies().certifications()
elif action == 'movieYears':
movies.movies().years()
elif action == 'moviePersons':
movies.movies().persons(url)
elif action == 'movieUserlists':
movies.movies().userlists()
elif action == 'channels':
channels.channels().get()
elif action == 'tvshows':
tvshows.tvshows().get(url)
elif action == 'tvshowPage':
tvshows.tvshows().get(url)
elif action == 'tvSearch':
tvshows.tvshows().search()
elif action == 'tvSearchnew':
tvshows.tvshows().search_new()
elif action == 'tvSearchterm':
tvshows.tvshows().search_term(name)
elif action == 'tvPerson':
tvshows.tvshows().person()
elif action == 'tvGenres':
tvshows.tvshows().genres()
elif action == 'tvNetworks':
tvshows.tvshows().networks()
elif action == 'tvLanguages':
tvshows.tvshows().languages()
elif action == 'tvCertificates':
tvshows.tvshows().certifications()
elif action == 'tvPersons':
tvshows.tvshows().persons(url)
elif action == 'tvUserlists':
tvshows.tvshows().userlists()
elif action == 'seasons':
episodes.seasons().get(tvshowtitle, year, imdb, tvdb)
elif action == 'episodes':
episodes.episodes().get(tvshowtitle, year, imdb, tvdb, season, episode)
elif action == 'calendar':
episodes.episodes().calendar(url)
elif action == 'tvWidget':
episodes.episodes().widget()
elif action == 'calendars':
episodes.episodes().calendars()
elif action == 'episodeUserlists':
episodes.episodes().userlists()
elif action == 'refresh':
control.refresh()
elif action == 'queueItem':
control.queueItem()
elif action == 'openSettings':
control.openSettings(query)
elif action == 'artwork':
control.artwork()
elif action == 'addView':
views.addView(content)
elif action == 'moviePlaycount':
playcount.movies(imdb, query)
elif action == 'episodePlaycount':
playcount.episodes(imdb, tvdb, season, episode, query)
elif action == 'tvPlaycount':
playcount.tvshows(name, imdb, tvdb, season, query)
elif action == 'trailer':
trailer.trailer().play(name, url, windowedtrailer)
elif action == 'traktManager':
trakt.manager(name, imdb, tvdb, content)
elif action == 'authTrakt':
trakt.authTrakt()
elif action == 'urlResolver':
try: import urlresolver
except: pass
urlresolver.display_settings()
elif action == 'download':
import json
try: downloader.download(name, image, sources.sources().sourcesResolve(json.loads(source)[0], True))
except: pass
elif action == 'play':
sources.sources().play(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, meta, select)
elif action == 'addItem':
sources.sources().addItem(title)
elif action == 'playItem':
sources.sources().playItem(title, source)
elif action == 'alterSources':
sources.sources().alterSources(url, meta)
elif action == 'clearSources':
sources.sources().clearSources()
elif action == 'random':
rtype = params.get('rtype')
if rtype == 'movie':
rlist = movies.movies().get(url, create_directory=False)
r = sys.argv[0]+"?action=play"
elif rtype == 'episode':
rlist = episodes.episodes().get(tvshowtitle, year, imdb, tvdb, season, create_directory=False)
r = sys.argv[0]+"?action=play"
elif rtype == 'season':
rlist = episodes.seasons().get(tvshowtitle, year, imdb, tvdb, create_directory=False)
r = sys.argv[0]+"?action=random&rtype=episode"
elif rtype == 'show':
rlist = tvshows.tvshows().get(url, create_directory=False)
r = sys.argv[0]+"?action=random&rtype=season"
from random import randint
import json
try:
rand = randint(1,len(rlist))-1
for p in ['title','year','imdb','tvdb','season','episode','tvshowtitle','premiered','select']:
if rtype == "show" and p == "tvshowtitle":
try: r += '&'+p+'='+urllib.quote_plus(rlist[rand]['title'])
except: pass
else:
try: r += '&'+p+'='+urllib.quote_plus(rlist[rand][p])
except: pass
try: r += '&meta='+urllib.quote_plus(json.dumps(rlist[rand]))
except: r += '&meta='+urllib.quote_plus("{}")
if rtype == "movie":
try: control.infoDialog(rlist[rand]['title'], control.lang(32536).encode('utf-8'), time=30000)
except: pass
elif rtype == "episode":
try: control.infoDialog(rlist[rand]['tvshowtitle']+" - Season "+rlist[rand]['season']+" - "+rlist[rand]['title'], control.lang(32536).encode('utf-8'), time=30000)
except: pass
control.execute('RunPlugin(%s)' % r)
except:
control.infoDialog(control.lang(32537).encode('utf-8'), time=8000)
elif action == 'movieToLibrary':
libtools.libmovies().add(name, title, year, imdb, tmdb)
elif action == 'moviesToLibrary':
libtools.libmovies().range(url)
elif action == 'moviesToLibrarySilent':
libtools.libmovies().silent(url)
elif action == 'tvshowToLibrary':
libtools.libtvshows().add(tvshowtitle, year, imdb, tvdb)
elif action == 'tvshowsToLibrary':
libtools.libtvshows().range(url)
elif action == 'tvshowsToLibrarySilent':
libtools.libtvshows().silent(url)
elif action == 'updateLibrary':
libtools.libepisodes().update(query)
elif action == 'service':
libtools.libepisodes().service()
|
gpl-3.0
| -6,966,446,947,023,699,000
| 26.748052
| 174
| 0.65403
| false
| 3.611562
| false
| false
| false
|
Freeseer/freeseer
|
src/freeseer/plugins/output/videopreview/widget.py
|
1
|
1906
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
freeseer - vga/presentation capture software
Copyright (C) 2013 Free and Open Source Software Learning Centre
http://fosslc.org
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For support, questions, suggestions or any other inquiries, visit:
http://wiki.github.com/Freeseer/freeseer/
@author: Thanh Ha
'''
from PyQt4.QtGui import QComboBox
from PyQt4.QtGui import QFormLayout
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QWidget
class ConfigWidget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
layout = QFormLayout()
self.setLayout(layout)
# Preview
self.previewLabel = QLabel("Preview")
self.previewComboBox = QComboBox()
self.previewComboBox.addItem("autovideosink")
self.previewComboBox.addItem("ximagesink")
self.previewComboBox.addItem("xvimagesink")
self.previewComboBox.addItem("gconfvideosink")
layout.addRow(self.previewLabel, self.previewComboBox)
# Leaky Queue
# Allows user to set queue in video to be leaky - required to work with RTMP streaming plugin
self.leakyQueueLabel = QLabel("Leaky Queue")
self.leakyQueueComboBox = QComboBox()
layout.addRow(self.leakyQueueLabel, self.leakyQueueComboBox)
|
gpl-3.0
| 1,840,719,251,519,555,600
| 31.862069
| 101
| 0.730325
| false
| 3.921811
| false
| false
| false
|
bstroebl/xplanplugin
|
HandleDb.py
|
1
|
2613
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
XPlan
A QGIS plugin
Fachschale XPlan für XPlanung
-------------------
begin : 2011-03-08
copyright : (C) 2011 by Bernhard Stroebl, KIJ/DV
email : bernhard.stroebl@jena.de
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import object
from qgis.PyQt import QtCore, QtSql
from qgis.gui import *
from qgis.core import *
class DbHandler(object):
'''class to handle a QtSql.QSqlDatabase connnection to a PostgreSQL server'''
def __init__(self, iface, tools):
self.iface = iface
self.tools = tools
self.db = None
def dbConnect(self, thisPassword = None):
s = QtCore.QSettings( "XPlanung", "XPlanung-Erweiterung" )
service = ( s.value( "service", "" ) )
host = ( s.value( "host", "" ) )
port = ( s.value( "port", "5432" ) )
database = ( s.value( "dbname", "" ) )
authcfg = s.value( "authcfg", "" )
username, passwd, authcfg = self.tools.getAuthUserNamePassword(authcfg)
if authcfg == None:
username = ( s.value( "uid", "" ) )
passwd = ( s.value( "pwd", "" ) )
if thisPassword:
passwd = thisPassword
# connect to DB
db = QtSql.QSqlDatabase.addDatabase ("QPSQL", "XPlanung")
db.setHostName(host)
db.setPort(int(port))
db.setDatabaseName(database)
db.setUserName(username)
db.setPassword(passwd)
db.authcfg = authcfg # für DDIM
ok2 = db.open()
if not ok2:
self.iface.messageBar().pushMessage("Fehler", \
u"Konnte keine Verbindung mit der Datenbank aufbauen", \
level=Qgis.Critical)
return None
else:
return db
def dbDisconnect(self, db):
db.close()
db = None
|
gpl-2.0
| 514,960,653,646,335,300
| 34.767123
| 81
| 0.458445
| false
| 4.287356
| false
| false
| false
|
ipfire/ddns
|
src/ddns/system.py
|
1
|
12221
|
#!/usr/bin/python3
###############################################################################
# #
# ddns - A dynamic DNS client for IPFire #
# Copyright (C) 2012 IPFire development team #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import base64
import re
import ssl
import socket
import urllib.request
import urllib.parse
import urllib.error
from .__version__ import CLIENT_VERSION
from .errors import *
from .i18n import _
# Initialize the logger.
import logging
logger = logging.getLogger("ddns.system")
logger.propagate = 1
class DDNSSystem(object):
"""
The DDNSSystem class adds a layer of abstraction
between the ddns software and the system.
"""
# The default useragent.
USER_AGENT = "IPFireDDNSUpdater/%s" % CLIENT_VERSION
def __init__(self, core):
# Connection to the core of the program.
self.core = core
# Address cache.
self.__addresses = {}
# Find out on which distribution we are running.
self.distro = self._get_distro_identifier()
logger.debug(_("Running on distribution: %s") % self.distro)
@property
def proxy(self):
proxy = self.core.settings.get("proxy")
# Strip http:// at the beginning.
if proxy and proxy.startswith("http://"):
proxy = proxy[7:]
return proxy
def get_local_ip_address(self, proto):
ip_address = self._get_local_ip_address(proto)
# Check if the IP address is usable and only return it then
if self._is_usable_ip_address(proto, ip_address):
return ip_address
def _get_local_ip_address(self, proto):
# Legacy code for IPFire 2.
if self.distro == "ipfire-2" and proto == "ipv4":
try:
with open("/var/ipfire/red/local-ipaddress") as f:
return f.readline()
except IOError as e:
# File not found
if e.errno == 2:
return
raise
# XXX TODO
raise NotImplementedError
def _guess_external_ip_address(self, url, timeout=10):
"""
Sends a request to an external web server
to determine the current default IP address.
"""
try:
response = self.send_request(url, timeout=timeout)
# If the server could not be reached, we will return nothing.
except DDNSNetworkError:
return
if not response.code == 200:
return
match = re.search(b"^Your IP address is: (.*)$", response.read())
if match is None:
return
return match.group(1).decode()
def guess_external_ip_address(self, family, **kwargs):
if family == "ipv6":
url = "https://checkip6.dns.lightningwirelabs.com"
elif family == "ipv4":
url = "https://checkip4.dns.lightningwirelabs.com"
else:
raise ValueError("unknown address family")
return self._guess_external_ip_address(url, **kwargs)
def send_request(self, url, method="GET", data=None, username=None, password=None, timeout=30):
assert method in ("GET", "POST")
# Add all arguments in the data dict to the URL and escape them properly.
if method == "GET" and data:
query_args = self._format_query_args(data)
data = None
if "?" in url:
url = "%s&%s" % (url, query_args)
else:
url = "%s?%s" % (url, query_args)
logger.debug("Sending request (%s): %s" % (method, url))
if data:
logger.debug(" data: %s" % data)
req = urllib.request.Request(url, data=data)
if username and password:
basic_auth_header = self._make_basic_auth_header(username, password)
req.add_header("Authorization", "Basic %s" % basic_auth_header.decode())
# Set the user agent.
req.add_header("User-Agent", self.USER_AGENT)
# All requests should not be cached anywhere.
req.add_header("Pragma", "no-cache")
# Set the upstream proxy if needed.
if self.proxy:
logger.debug("Using proxy: %s" % self.proxy)
# Configure the proxy for this request.
req.set_proxy(self.proxy, "http")
assert req.get_method() == method
logger.debug(_("Request header:"))
for k, v in req.headers.items():
logger.debug(" %s: %s" % (k, v))
try:
resp = urllib.request.urlopen(req, timeout=timeout)
# Log response header.
logger.debug(_("Response header (Status Code %s):") % resp.code)
for k, v in resp.info().items():
logger.debug(" %s: %s" % (k, v))
# Return the entire response object.
return resp
except urllib.error.HTTPError as e:
# Log response header.
logger.debug(_("Response header (Status Code %s):") % e.code)
for k, v in e.hdrs.items():
logger.debug(" %s: %s" % (k, v))
# 400 - Bad request
if e.code == 400:
raise DDNSRequestError(e.reason)
# 401 - Authorization Required
# 403 - Forbidden
elif e.code in (401, 403):
raise DDNSAuthenticationError(e.reason)
# 404 - Not found
# Either the provider has changed the API, or
# there is an error on the server
elif e.code == 404:
raise DDNSNotFound(e.reason)
# 429 - Too Many Requests
elif e.code == 429:
raise DDNSTooManyRequests(e.reason)
# 500 - Internal Server Error
elif e.code == 500:
raise DDNSInternalServerError(e.reason)
# 503 - Service Unavailable
elif e.code == 503:
raise DDNSServiceUnavailableError(e.reason)
# Raise all other unhandled exceptions.
raise
except urllib.error.URLError as e:
if e.reason:
# Handle SSL errors
if isinstance(e.reason, ssl.SSLError):
e = e.reason
if e.reason == "CERTIFICATE_VERIFY_FAILED":
raise DDNSCertificateError
# Raise all other SSL errors
raise DDNSSSLError(e.reason)
# Name or service not known
if e.reason.errno == -2:
raise DDNSResolveError
# Network Unreachable (e.g. no IPv6 access)
if e.reason.errno == 101:
raise DDNSNetworkUnreachableError
# Connection Refused
elif e.reason.errno == 111:
raise DDNSConnectionRefusedError
# No route to host
elif e.reason.errno == 113:
raise DDNSNoRouteToHostError(req.host)
# Raise all other unhandled exceptions.
raise
except socket.timeout as e:
logger.debug(_("Connection timeout"))
raise DDNSConnectionTimeoutError
def _format_query_args(self, data):
args = []
for k, v in data.items():
arg = "%s=%s" % (k, urllib.parse.quote(v))
args.append(arg)
return "&".join(args)
def _make_basic_auth_header(self, username, password):
authstring = "%s:%s" % (username, password)
# Encode authorization data in base64.
authstring = base64.b64encode(authstring.encode())
return authstring
def get_address(self, proto):
"""
Returns the current IP address for
the given IP protocol.
"""
try:
return self.__addresses[proto]
# IP is currently unknown and needs to be retrieved.
except KeyError:
self.__addresses[proto] = address = \
self._get_address(proto)
return address
def _get_address(self, proto):
assert proto in ("ipv6", "ipv4")
# IPFire 2 does not support IPv6.
if self.distro == "ipfire-2" and proto == "ipv6":
return
# Check if the external IP address should be guessed from
# a remote server.
guess_ip = self.core.settings.get("guess_external_ip", "true")
guess_ip = guess_ip in ("true", "yes", "1")
# Get the local IP address.
local_ip_address = None
if not guess_ip:
try:
local_ip_address = self.get_local_ip_address(proto)
except NotImplementedError:
logger.warning(_("Falling back to check the IP address with help of a public server"))
# If no local IP address could be determined, we will fall back to the guess
# it with help of an external server...
if not local_ip_address:
local_ip_address = self.guess_external_ip_address(proto)
return local_ip_address
def _is_usable_ip_address(self, proto, address):
"""
Returns True is the local IP address is usable
for dynamic DNS (i.e. is not a RFC1918 address or similar).
"""
if proto == "ipv4":
# This is not the most perfect solution to match
# these addresses, but instead of pulling in an entire
# library to handle the IP addresses better, we match
# with regular expressions instead.
matches = (
# RFC1918 address space
r"^10\.\d+\.\d+\.\d+$",
r"^192\.168\.\d+\.\d+$",
r"^172\.(1[6-9]|2[0-9]|31)\.\d+\.\d+$",
# Dual Stack Lite address space
r"^100\.(6[4-9]|[7-9][0-9]|1[01][0-9]|12[0-7])\.\d+\.\d+$",
)
for match in matches:
m = re.match(match, address)
if m is None:
continue
# Found a match. IP address is not usable.
return False
# In all other cases, return OK.
return True
def resolve(self, hostname, proto=None):
addresses = []
if proto is None:
family = 0
elif proto == "ipv6":
family = socket.AF_INET6
elif proto == "ipv4":
family = socket.AF_INET
else:
raise ValueError("Protocol not supported: %s" % proto)
# Resolve the host address.
try:
response = socket.getaddrinfo(hostname, None, family)
except socket.gaierror as e:
# Name or service not known
if e.errno == -2:
return []
# Temporary failure in name resolution
elif e.errno == -3:
raise DDNSResolveError(hostname)
# No record for requested family available (e.g. no AAAA)
elif e.errno == -5:
return []
raise
# Handle responses.
for family, socktype, proto, canonname, sockaddr in response:
# IPv6
if family == socket.AF_INET6:
address, port, flow_info, scope_id = sockaddr
# Only use the global scope.
if not scope_id == 0:
continue
# IPv4
elif family == socket.AF_INET:
address, port = sockaddr
# Ignore everything else...
else:
continue
# Add to repsonse list if not already in there.
if address not in addresses:
addresses.append(address)
return addresses
def _get_distro_identifier(self):
"""
Returns a unique identifier for the distribution
we are running on.
"""
os_release = self.__parse_os_release()
if os_release:
return os_release
system_release = self.__parse_system_release()
if system_release:
return system_release
# If nothing else could be found, we return
# just "unknown".
return "unknown"
def __parse_os_release(self):
"""
Tries to parse /etc/os-release and
returns a unique distribution identifier
if the file exists.
"""
try:
f = open("/etc/os-release", "r")
except IOError as e:
# File not found
if e.errno == 2:
return
raise
os_release = {}
with f:
for line in f.readlines():
m = re.match(r"^([A-Z\_]+)=(.*)$", line)
if m is None:
continue
os_release[m.group(1)] = m.group(2)
try:
return "%(ID)s-%(VERSION_ID)s" % os_release
except KeyError:
return
def __parse_system_release(self):
"""
Tries to parse /etc/system-release and
returns a unique distribution identifier
if the file exists.
"""
try:
f = open("/etc/system-release", "r")
except IOError as e:
# File not found
if e.errno == 2:
return
raise
with f:
# Read first line
line = f.readline()
# Check for IPFire systems
m = re.match(r"^IPFire (\d).(\d+)", line)
if m:
return "ipfire-%s" % m.group(1)
|
gpl-3.0
| -9,210,014,487,743,441,000
| 25.452381
| 96
| 0.617544
| false
| 3.375967
| false
| false
| false
|
mandeepjadon/python-game
|
battleship.py
|
1
|
1203
|
from random import randint
board = []
for x in range(5):
board.append(["O"] * 5)
def print_board(board):
for row in board:
print " ".join(row)
print "Let's play Battleship!"
print_board(board)
def random_row(board):
return randint(0, len(board) - 1)
def random_col(board):
return randint(0, len(board[0]) - 1)
ship_row = random_row(board)
ship_col = random_col(board)
# Everything from here on should go in your for loop!
# Be sure to indent four spaces!
for turn in range(4):
guess_row = int(raw_input("Guess Row:"))
guess_col = int(raw_input("Guess Col:"))
if guess_row == ship_row and guess_col == ship_col:
print "Congratulations! You sunk my battleship!"
break
else:
if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4):
print "Oops, that's not even in the ocean."
elif(board[guess_row][guess_col] == "X"):
print "You guessed that one already."
else:
print "You missed my battleship!"
board[guess_row][guess_col] = "X"
print_board(board)
if turn==3:
print "Game Over !!"
else :
print "turn number ",turn+1
|
mit
| -3,335,248,240,708,133,000
| 25.733333
| 80
| 0.594347
| false
| 3.242588
| false
| false
| false
|
TheAlgorithms/Python
|
bit_manipulation/binary_twos_complement.py
|
1
|
1121
|
# Information on 2's complement: https://en.wikipedia.org/wiki/Two%27s_complement
def twos_complement(number: int) -> str:
"""
Take in a negative integer 'number'.
Return the two's complement representation of 'number'.
>>> twos_complement(0)
'0b0'
>>> twos_complement(-1)
'0b11'
>>> twos_complement(-5)
'0b1011'
>>> twos_complement(-17)
'0b101111'
>>> twos_complement(-207)
'0b100110001'
>>> twos_complement(1)
Traceback (most recent call last):
...
ValueError: input must be a negative integer
"""
if number > 0:
raise ValueError("input must be a negative integer")
binary_number_length = len(bin(number)[3:])
twos_complement_number = bin(abs(number) - (1 << binary_number_length))[3:]
twos_complement_number = (
(
"1"
+ "0" * (binary_number_length - len(twos_complement_number))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
|
mit
| 2,760,411,776,246,601,700
| 25.069767
| 81
| 0.580731
| false
| 3.366366
| false
| false
| false
|
jobiols/odoo-argentina
|
l10n_ar_account/models/res_company.py
|
1
|
1884
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api
from openerp.addons.account_document.models.res_company import ResCompany
localizations = ResCompany._localization_selection
new_selection = localizations.append(('argentina', 'Argentina'))
ResCompany._localization_selection = new_selection
class ResCompany(models.Model):
_inherit = "res.company"
gross_income_number = fields.Char(
related='partner_id.gross_income_number',
string='Gross Income'
)
gross_income_type = fields.Selection(
related='partner_id.gross_income_type',
string='Gross Income'
)
gross_income_jurisdiction_ids = fields.Many2many(
related='partner_id.gross_income_jurisdiction_ids',
)
start_date = fields.Date(
related='partner_id.start_date',
)
afip_responsability_type_id = fields.Many2one(
related='partner_id.afip_responsability_type_id',
)
company_requires_vat = fields.Boolean(
related='afip_responsability_type_id.company_requires_vat',
readonly=True,
)
# use globally as default so that if child companies are created they
# also use this as default
tax_calculation_rounding_method = fields.Selection(
default='round_globally',
)
@api.onchange('localization')
def change_localization(self):
if self.localization == 'argentina' and not self.country_id:
self.country_id = self.env.ref('base.ar')
# TODO ver si lo movemos a account_document
# journal_ids = fields.One2many(
# 'account.journal',
# 'company_id',
# 'Journals'
# )
|
agpl-3.0
| 8,990,268,929,781,817,000
| 34.54717
| 78
| 0.612527
| false
| 3.908714
| false
| false
| false
|
CiscoSystems/nova
|
nova/compute/utils.py
|
1
|
19238
|
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute-related Utilities and helpers."""
import itertools
import re
import string
import traceback
from oslo.config import cfg
from nova import block_device
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova.network import model as network_model
from nova import notifications
from nova.objects import instance as instance_obj
from nova.objects import instance_fault as instance_fault_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log
from nova.openstack.common import timeutils
from nova import rpc
from nova import utils
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
LOG = log.getLogger(__name__)
def exception_to_dict(fault):
"""Converts exceptions to a dict for use in notifications."""
#TODO(johngarbutt) move to nova/exception.py to share with wrap_exception
code = 500
if hasattr(fault, "kwargs"):
code = fault.kwargs.get('code', 500)
# get the message from the exception that was thrown
# if that does not exist, use the name of the exception class itself
try:
message = fault.format_message()
# These exception handlers are broad so we don't fail to log the fault
# just because there is an unexpected error retrieving the message
except Exception:
try:
message = unicode(fault)
except Exception:
message = None
if not message:
message = fault.__class__.__name__
# NOTE(dripton) The message field in the database is limited to 255 chars.
# MySQL silently truncates overly long messages, but PostgreSQL throws an
# error if we don't truncate it.
u_message = unicode(message)[:255]
fault_dict = dict(exception=fault)
fault_dict["message"] = u_message
fault_dict["code"] = code
return fault_dict
def _get_fault_details(exc_info, error_code):
details = ''
if exc_info and error_code == 500:
tb = exc_info[2]
if tb:
details = ''.join(traceback.format_tb(tb))
return unicode(details)
def add_instance_fault_from_exc(context, instance, fault, exc_info=None):
"""Adds the specified fault to the database."""
fault_obj = instance_fault_obj.InstanceFault(context=context)
fault_obj.host = CONF.host
fault_obj.instance_uuid = instance['uuid']
fault_obj.update(exception_to_dict(fault))
code = fault_obj.code
fault_obj.details = _get_fault_details(exc_info, code)
fault_obj.create()
def pack_action_start(context, instance_uuid, action_name):
values = {'action': action_name,
'instance_uuid': instance_uuid,
'request_id': context.request_id,
'user_id': context.user_id,
'project_id': context.project_id,
'start_time': context.timestamp}
return values
def pack_action_finish(context, instance_uuid):
values = {'instance_uuid': instance_uuid,
'request_id': context.request_id,
'finish_time': timeutils.utcnow()}
return values
def pack_action_event_start(context, instance_uuid, event_name):
values = {'event': event_name,
'instance_uuid': instance_uuid,
'request_id': context.request_id,
'start_time': timeutils.utcnow()}
return values
def pack_action_event_finish(context, instance_uuid, event_name, exc_val=None,
exc_tb=None):
values = {'event': event_name,
'instance_uuid': instance_uuid,
'request_id': context.request_id,
'finish_time': timeutils.utcnow()}
if exc_tb is None:
values['result'] = 'Success'
else:
values['result'] = 'Error'
values['message'] = str(exc_val)
values['traceback'] = ''.join(traceback.format_tb(exc_tb))
return values
def get_device_name_for_instance(context, instance, bdms, device):
"""Validates (or generates) a device name for instance.
This method is a wrapper for get_next_device_name that gets the list
of used devices and the root device from a block device mapping.
"""
mappings = block_device.instance_block_mapping(instance, bdms)
return get_next_device_name(instance, mappings.values(),
mappings['root'], device)
def default_device_names_for_instance(instance, root_device_name,
*block_device_lists):
"""Generate missing device names for an instance."""
dev_list = [bdm.device_name
for bdm in itertools.chain(*block_device_lists)
if bdm.device_name]
if root_device_name not in dev_list:
dev_list.append(root_device_name)
for bdm in itertools.chain(*block_device_lists):
dev = bdm.device_name
if not dev:
dev = get_next_device_name(instance, dev_list,
root_device_name)
bdm.device_name = dev
bdm.save()
dev_list.append(dev)
def get_next_device_name(instance, device_name_list,
root_device_name=None, device=None):
"""Validates (or generates) a device name for instance.
If device is not set, it will generate a unique device appropriate
for the instance. It uses the root_device_name (if provided) and
the list of used devices to find valid device names. If the device
name is valid but applicable to a different backend (for example
/dev/vdc is specified but the backend uses /dev/xvdc), the device
name will be converted to the appropriate format.
"""
req_prefix = None
req_letter = None
if device:
try:
req_prefix, req_letter = block_device.match_device(device)
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=device)
if not root_device_name:
root_device_name = block_device.DEFAULT_ROOT_DEV_NAME
try:
prefix = block_device.match_device(root_device_name)[0]
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=root_device_name)
# NOTE(vish): remove this when xenapi is setting default_root_device
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
prefix = '/dev/xvd'
if req_prefix != prefix:
LOG.debug("Using %(prefix)s instead of %(req_prefix)s",
{'prefix': prefix, 'req_prefix': req_prefix})
used_letters = set()
for device_path in device_name_list:
letter = block_device.strip_prefix(device_path)
# NOTE(vish): delete numbers in case we have something like
# /dev/sda1
letter = re.sub("\d+", "", letter)
used_letters.add(letter)
# NOTE(vish): remove this when xenapi is properly setting
# default_ephemeral_device and default_swap_device
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
flavor = flavors.extract_flavor(instance)
if flavor['ephemeral_gb']:
used_letters.add('b')
if flavor['swap']:
used_letters.add('c')
if not req_letter:
req_letter = _get_unused_letter(used_letters)
if req_letter in used_letters:
raise exception.DevicePathInUse(path=device)
return prefix + req_letter
def _get_unused_letter(used_letters):
doubles = [first + second for second in string.ascii_lowercase
for first in string.ascii_lowercase]
all_letters = set(list(string.ascii_lowercase) + doubles)
letters = list(all_letters - used_letters)
# NOTE(vish): prepend ` so all shorter sequences sort first
letters.sort(key=lambda x: x.rjust(2, '`'))
return letters[0]
def get_image_metadata(context, image_service, image_id, instance):
# If the base image is still available, get its metadata
try:
image = image_service.show(context, image_id)
except Exception as e:
LOG.warning(_("Can't access image %(image_id)s: %(error)s"),
{"image_id": image_id, "error": e}, instance=instance)
image_system_meta = {}
else:
flavor = flavors.extract_flavor(instance)
image_system_meta = utils.get_system_metadata_from_image(image, flavor)
# Get the system metadata from the instance
system_meta = utils.instance_sys_meta(instance)
# Merge the metadata from the instance with the image's, if any
system_meta.update(image_system_meta)
# Convert the system metadata to image metadata
return utils.get_image_from_system_metadata(system_meta)
def notify_usage_exists(notifier, context, instance_ref, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
"""Generates 'exists' notification for an instance for usage auditing
purposes.
:param notifier: a messaging.Notifier
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
:param ignore_missing_network_data: if True, log any exceptions generated
while getting network info; if False, raise the exception.
:param system_metadata: system_metadata DB entries for the instance,
if not None. *NOTE*: Currently unused here in trunk, but needed for
potential custom modifications.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification if not None.
"""
audit_start, audit_end = notifications.audit_period_bounds(current_period)
bw = notifications.bandwidth_usage(instance_ref, audit_start,
ignore_missing_network_data)
if system_metadata is None:
system_metadata = utils.instance_sys_meta(instance_ref)
# add image metadata to the notification:
image_meta = notifications.image_meta(system_metadata)
extra_info = dict(audit_period_beginning=str(audit_start),
audit_period_ending=str(audit_end),
bandwidth=bw, image_meta=image_meta)
if extra_usage_info:
extra_info.update(extra_usage_info)
notify_about_instance_usage(notifier, context, instance_ref, 'exists',
system_metadata=system_metadata, extra_usage_info=extra_info)
def notify_about_instance_usage(notifier, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
"""Send a notification about an instance.
:param notifier: a messaging.Notifier
:param event_suffix: Event type like "delete.start" or "exists"
:param network_info: Networking information, if provided.
:param system_metadata: system_metadata DB entries for the instance,
if provided.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification.
"""
if not extra_usage_info:
extra_usage_info = {}
usage_info = notifications.info_from_instance(context, instance,
network_info, system_metadata, **extra_usage_info)
if fault:
# NOTE(johngarbutt) mirrors the format in wrap_exception
fault_payload = exception_to_dict(fault)
LOG.debug(fault_payload["message"], instance=instance,
exc_info=True)
usage_info.update(fault_payload)
if event_suffix.endswith("error"):
method = notifier.error
else:
method = notifier.info
method(context, 'compute.instance.%s' % event_suffix, usage_info)
def notify_about_aggregate_update(context, event_suffix, aggregate_payload):
"""Send a notification about aggregate update.
:param event_suffix: Event type like "create.start" or "create.end"
:param aggregate_payload: payload for aggregate update
"""
aggregate_identifier = aggregate_payload.get('aggregate_id', None)
if not aggregate_identifier:
aggregate_identifier = aggregate_payload.get('name', None)
if not aggregate_identifier:
LOG.debug("No aggregate id or name specified for this "
"notification and it will be ignored")
return
notifier = rpc.get_notifier(service='aggregate',
host=aggregate_identifier)
notifier.info(context, 'aggregate.%s' % event_suffix, aggregate_payload)
def notify_about_host_update(context, event_suffix, host_payload):
"""Send a notification about host update.
:param event_suffix: Event type like "create.start" or "create.end"
:param host_payload: payload for host update. It is a dict and there
should be at least the 'host_name' key in this
dict.
"""
host_identifier = host_payload.get('host_name')
if not host_identifier:
LOG.warn(_("No host name specified for the notification of "
"HostAPI.%s and it will be ignored"), event_suffix)
return
notifier = rpc.get_notifier(service='api', host=host_identifier)
notifier.info(context, 'HostAPI.%s' % event_suffix, host_payload)
def get_nw_info_for_instance(instance):
if isinstance(instance, instance_obj.Instance):
if instance.info_cache is None:
return network_model.NetworkInfo.hydrate([])
return instance.info_cache.network_info
# FIXME(comstud): Transitional while we convert to objects.
info_cache = instance['info_cache'] or {}
nw_info = info_cache.get('network_info') or []
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = network_model.NetworkInfo.hydrate(nw_info)
return nw_info
def has_audit_been_run(context, conductor, host, timestamp=None):
begin, end = utils.last_completed_audit_period(before=timestamp)
task_log = conductor.task_log_get(context, "instance_usage_audit",
begin, end, host)
if task_log:
return True
else:
return False
def start_instance_usage_audit(context, conductor, begin, end, host,
num_instances):
conductor.task_log_begin_task(context, "instance_usage_audit", begin,
end, host, num_instances,
"Instance usage audit started...")
def finish_instance_usage_audit(context, conductor, begin, end, host, errors,
message):
conductor.task_log_end_task(context, "instance_usage_audit", begin, end,
host, errors, message)
def usage_volume_info(vol_usage):
def null_safe_str(s):
return str(s) if s else ''
tot_refreshed = vol_usage['tot_last_refreshed']
curr_refreshed = vol_usage['curr_last_refreshed']
if tot_refreshed and curr_refreshed:
last_refreshed_time = max(tot_refreshed, curr_refreshed)
elif tot_refreshed:
last_refreshed_time = tot_refreshed
else:
# curr_refreshed must be set
last_refreshed_time = curr_refreshed
usage_info = dict(
volume_id=vol_usage['volume_id'],
tenant_id=vol_usage['project_id'],
user_id=vol_usage['user_id'],
availability_zone=vol_usage['availability_zone'],
instance_id=vol_usage['instance_uuid'],
last_refreshed=null_safe_str(last_refreshed_time),
reads=vol_usage['tot_reads'] + vol_usage['curr_reads'],
read_bytes=vol_usage['tot_read_bytes'] +
vol_usage['curr_read_bytes'],
writes=vol_usage['tot_writes'] + vol_usage['curr_writes'],
write_bytes=vol_usage['tot_write_bytes'] +
vol_usage['curr_write_bytes'])
return usage_info
def get_reboot_type(task_state, current_power_state):
"""Checks if the current instance state requires a HARD reboot."""
if current_power_state != power_state.RUNNING:
return 'HARD'
soft_types = [task_states.REBOOT_STARTED, task_states.REBOOT_PENDING,
task_states.REBOOTING]
reboot_type = 'SOFT' if task_state in soft_types else 'HARD'
return reboot_type
class EventReporter(object):
"""Context manager to report instance action events."""
def __init__(self, context, conductor, event_name, *instance_uuids):
self.context = context
self.conductor = conductor
self.event_name = event_name
self.instance_uuids = instance_uuids
def __enter__(self):
for uuid in self.instance_uuids:
event = pack_action_event_start(self.context, uuid,
self.event_name)
self.conductor.action_event_start(self.context, event)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for uuid in self.instance_uuids:
event = pack_action_event_finish(self.context, uuid,
self.event_name, exc_val, exc_tb)
self.conductor.action_event_finish(self.context, event)
return False
def periodic_task_spacing_warn(config_option_name):
"""Decorator to warn about an upcoming breaking change in methods which
use the @periodic_task decorator.
Some methods using the @periodic_task decorator specify spacing=0 or
None to mean "do not call this method", but the decorator itself uses
0/None to mean "call at the default rate".
Starting with the K release the Nova methods will be changed to conform
to the Oslo decorator. This decorator should be present wherever a
spacing value from user-supplied config is passed to @periodic_task, and
there is also a check to skip the method if the value is zero. It will
log a warning if the spacing value from config is 0/None.
"""
# TODO(gilliard) remove this decorator, its usages and the early returns
# near them after the K release.
def wrapper(f):
if (hasattr(f, "_periodic_spacing") and
(f._periodic_spacing == 0 or f._periodic_spacing is None)):
LOG.warning(_("Value of 0 or None specified for %s."
" This behaviour will change in meaning in the K release, to"
" mean 'call at the default rate' rather than 'do not call'."
" To keep the 'do not call' behaviour, use a negative value."),
config_option_name)
return f
return wrapper
|
apache-2.0
| 2,790,141,694,222,280,700
| 36.870079
| 79
| 0.647001
| false
| 4.060363
| false
| false
| false
|
googlefonts/color-fonts
|
config/more_samples-glyf_colr_1.py
|
1
|
6716
|
"""Compile samples that are infeasible or difficult by svg compilation.
"""
import datetime
from pathlib import Path
from fontTools import fontBuilder
from fontTools import ttLib
from fontTools.colorLib import builder as colorBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.ttLib.tables._g_l_y_f import Glyph
import sys
from typing import Any, Mapping, NamedTuple, Optional
from fontTools.ttLib.tables import otTables as ot
from nanoemoji.colors import css_colors, Color
from fontTools.misc.transform import Transform
_UPEM = 1000
_ASCENT = 950
_DESCENT = 250
_FAMILY = "More COLR v1 Samples"
_STYLE = "Regular"
_PALETTE = {} # <3 mutable globals
class SampleGlyph(NamedTuple):
glyph_name: str
accessor: str
advance: int
glyph: Glyph
colr: Optional[Mapping[str, Any]] = None
def _cpal(color_str):
color = Color.fromstring(color_str).to_ufo_color()
if color not in _PALETTE:
_PALETTE[color] = len(_PALETTE)
return _PALETTE[color]
def _sample_sweep():
glyph_name = "sweep"
pen = TTGlyphPen(None)
pen.moveTo((100, 500))
pen.qCurveTo((500, 1000), (900, 500))
pen.qCurveTo((500, 0), (100, 500))
pen.closePath()
colr = {
"Format": ot.PaintFormat.PaintGlyph,
"Glyph": glyph_name,
"Paint": {
"Format": ot.PaintFormat.PaintSweepGradient,
"ColorLine": {
"ColorStop": [
(0.0, _cpal("red")),
(0.5, _cpal("yellow")),
(1.0, _cpal("red")),
]
},
"centerX": 500,
"centerY": 500,
"startAngle": 0,
"endAngle": 360,
},
}
return SampleGlyph(
glyph_name=glyph_name, accessor="c", advance=_UPEM, glyph=pen.glyph(), colr=colr
)
def _sample_colr_glyph():
glyph_name = "transformed_sweep"
# Paint the sweep shifted and rotated
colr = {
"Format": ot.PaintFormat.PaintTranslate,
"dx": 250,
"dy": 0,
"Paint": {
"Format": ot.PaintFormat.PaintRotate,
"centerX": _UPEM / 2,
"centerY": _UPEM / 2,
"angle": 60,
"Paint": {
"Format": ot.PaintFormat.PaintColrGlyph,
"Glyph": "sweep",
},
},
}
pen = TTGlyphPen(None)
pen.moveTo((0, 0))
pen.lineTo((_UPEM, _UPEM))
pen.endPath()
return SampleGlyph(
glyph_name=glyph_name, accessor="t", advance=_UPEM, glyph=pen.glyph(), colr=colr
)
def _sample_composite_colr_glyph():
glyph_name = "composite_colr_glyph"
# Scale down the sweep and use it to cut a hole in the sweep
# Transforms combine f(g(x)); build up backwards
t = Transform(dx=-500, dy=-500) # move to origin
t = Transform(xx=0.75, yy=0.75).transform(t)
t = Transform(dx=500, dy=500).transform(t)
t = tuple(t)
colr = {
"Format": ot.PaintFormat.PaintComposite,
"CompositeMode": "SRC_OUT",
"SourcePaint": {
"Format": ot.PaintFormat.PaintColrGlyph,
"Glyph": "sweep",
},
"BackdropPaint": {
"Format": ot.PaintFormat.PaintTransform,
"Paint": {
"Format": ot.PaintFormat.PaintColrGlyph,
"Glyph": "sweep",
},
"Transform": t,
},
}
pen = TTGlyphPen(None)
pen.moveTo((0, 0))
pen.lineTo((_UPEM, _UPEM))
pen.endPath()
return SampleGlyph(
glyph_name=glyph_name, accessor="o", advance=_UPEM, glyph=pen.glyph(), colr=colr
)
def _gradient_stops_repeat(first_stop, second_stop, accessor_char):
glyph_name = f"linear_repeat_{first_stop}_{second_stop}"
pen = TTGlyphPen(None)
pen.moveTo((100, 250))
pen.lineTo((100, 950))
pen.lineTo((900, 950))
pen.lineTo((900, 250))
pen.closePath()
colr = {
"Format": ot.PaintFormat.PaintGlyph,
"Glyph": glyph_name,
"Paint": {
"Format": ot.PaintFormat.PaintLinearGradient,
"ColorLine": {
"ColorStop": [
(first_stop, _cpal("red")),
(second_stop, _cpal("blue")),
],
"Extend": ot.ExtendMode.REPEAT,
},
"x0": 100,
"y0": 250,
"x1": 900,
"y1": 250,
"x2": 100,
"y2": 300,
},
}
return SampleGlyph(
glyph_name=glyph_name,
accessor=accessor_char,
advance=_UPEM,
glyph=pen.glyph(),
colr=colr,
)
def main():
assert len(sys.argv) == 2
build_dir = Path(sys.argv[1])
build_dir.mkdir(exist_ok=True)
out_file = (build_dir / _FAMILY.replace(" ", "")).with_suffix(".ttf")
version = datetime.datetime.now().isoformat()
names = {
"familyName": _FAMILY,
"styleName": _STYLE,
"uniqueFontIdentifier": " ".join((_FAMILY, version)),
"fullName": " ".join((_FAMILY, _STYLE)),
"version": version,
"psName": "-".join((_FAMILY.replace(" ", ""), _STYLE)),
}
glyphs = [
SampleGlyph(glyph_name=".notdef", accessor="", advance=600, glyph=Glyph()),
SampleGlyph(glyph_name=".null", accessor="", advance=0, glyph=Glyph()),
_sample_sweep(),
_sample_colr_glyph(),
_sample_composite_colr_glyph(),
_gradient_stops_repeat(0, 1, "p"),
_gradient_stops_repeat(0.2, 0.8, "q"),
_gradient_stops_repeat(0, 1.5, "r"),
_gradient_stops_repeat(0.5, 1.5, "s"),
]
fb = fontBuilder.FontBuilder(_UPEM)
fb.setupGlyphOrder([g.glyph_name for g in glyphs])
fb.setupCharacterMap(
{ord(g.accessor): g.glyph_name for g in glyphs if len(g.accessor) == 1}
)
fb.setupGlyf({g.glyph_name: g.glyph for g in glyphs})
fb.setupHorizontalMetrics({g.glyph_name: (_UPEM, g.glyph.xMin) for g in glyphs})
fb.setupHorizontalHeader(ascent=_ASCENT, descent=-_DESCENT)
fb.setupOS2(sTypoAscender=_ASCENT, usWinAscent=_ASCENT, usWinDescent=_DESCENT)
fb.setupNameTable(names)
fb.setupPost()
fb.font["head"].xMin = 0
fb.font["head"].yMin = -_DESCENT
fb.font["head"].xMax = _UPEM
fb.font["head"].yMax = _ASCENT
fb.font["OS/2"].fsType = 0
fb.font["OS/2"].version = 4
fb.font["OS/2"].fsSelection |= 1 << 7
fb.font["hhea"].advanceWidthMax = _UPEM
fb.font["COLR"] = colorBuilder.buildCOLR(
{g.glyph_name: g.colr for g in glyphs if g.colr}
)
fb.font["CPAL"] = colorBuilder.buildCPAL([list(_PALETTE)])
fb.save(out_file)
print(f"Wrote {out_file}")
if __name__ == "__main__":
main()
|
apache-2.0
| -2,336,679,535,853,925,400
| 26.983333
| 88
| 0.558368
| false
| 3.284108
| false
| false
| false
|
donspaulding/adspygoogle
|
examples/adspygoogle/adwords/v201302/optimization/get_placement_ideas.py
|
1
|
2587
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example retrieves urls that have content keywords related to a given
website.
Tags: TargetingIdeaService.get
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
PAGE_SIZE = 100
def main(client):
# Initialize appropriate service.
targeting_idea_service = client.GetTargetingIdeaService(version='v201302')
# Construct selector object and retrieve related placements.
offset = 0
url = 'http://mars.google.com'
selector = {
'searchParameters': [{
'xsi_type': 'RelatedToUrlSearchParameter',
'urls': [url],
'includeSubUrls': 'false'
}],
'ideaType': 'PLACEMENT',
'requestType': 'IDEAS',
'requestedAttributeTypes': ['SAMPLE_URL'],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
more_pages = True
while more_pages:
page = targeting_idea_service.Get(selector)[0]
# Display results.
if 'entries' in page:
for result in page['entries']:
result = result['data'][0]['value']
print ('Related content keywords were found at \'%s\' url.'
% result['value'])
print
print ('Total urls found with content keywords related to keywords at '
'\'%s\': %s' % (url, page['totalNumEntries']))
else:
print 'No content keywords were found at \'%s\'.' % url
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client)
|
apache-2.0
| 1,005,778,109,689,792,100
| 29.435294
| 77
| 0.637031
| false
| 3.884384
| false
| false
| false
|
danakj/chromium
|
mojo/public/tools/bindings/generators/mojom_cpp_generator.py
|
1
|
20410
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates C++ source files from a mojom.Module."""
import mojom.generate.generator as generator
import mojom.generate.module as mojom
import mojom.generate.pack as pack
from mojom.generate.template_expander import UseJinja
_kind_to_cpp_type = {
mojom.BOOL: "bool",
mojom.INT8: "int8_t",
mojom.UINT8: "uint8_t",
mojom.INT16: "int16_t",
mojom.UINT16: "uint16_t",
mojom.INT32: "int32_t",
mojom.UINT32: "uint32_t",
mojom.FLOAT: "float",
mojom.INT64: "int64_t",
mojom.UINT64: "uint64_t",
mojom.DOUBLE: "double",
}
_kind_to_cpp_literal_suffix = {
mojom.UINT8: "U",
mojom.UINT16: "U",
mojom.UINT32: "U",
mojom.FLOAT: "f",
mojom.UINT64: "ULL",
}
# TODO(rockot): Get rid of these globals. This requires some refactoring of the
# generator library code so that filters can use the generator as context.
_current_typemap = {}
_for_blink = False
_use_new_wrapper_types = False
# TODO(rockot, yzshen): The variant handling is kind of a hack currently. Make
# it right.
_variant = None
class _NameFormatter(object):
"""A formatter for the names of kinds or values."""
def __init__(self, token, variant):
self._token = token
self._variant = variant
def Format(self, separator, prefixed=False, internal=False,
include_variant=False, add_same_module_namespaces=False):
parts = []
if self._ShouldIncludeNamespace(add_same_module_namespaces):
if prefixed:
parts.append("")
parts.extend(self._GetNamespace())
if include_variant and self._variant:
parts.append(self._variant)
parts.extend(self._GetName(internal))
return separator.join(parts)
def FormatForCpp(self, add_same_module_namespaces=False, internal=False):
return self.Format(
"::", prefixed=True,
add_same_module_namespaces=add_same_module_namespaces,
internal=internal, include_variant=True)
def FormatForMojom(self):
return self.Format(".", add_same_module_namespaces=True)
def _MapKindName(self, token, internal):
if not internal:
return token.name
if (mojom.IsStructKind(token) or mojom.IsUnionKind(token) or
mojom.IsInterfaceKind(token) or mojom.IsEnumKind(token)):
return token.name + "_Data"
return token.name
def _GetName(self, internal):
name = []
if internal:
name.append("internal")
if self._token.parent_kind:
name.append(self._MapKindName(self._token.parent_kind, internal))
# Both variable and enum constants are constructed like:
# Namespace::Struct::CONSTANT_NAME
# For enums, CONSTANT_NAME is EnumName::ENUM_VALUE.
if isinstance(self._token, mojom.EnumValue):
name.extend([self._token.enum.name, self._token.name])
else:
name.append(self._MapKindName(self._token, internal))
return name
def _ShouldIncludeNamespace(self, add_same_module_namespaces):
return add_same_module_namespaces or self._token.imported_from
def _GetNamespace(self):
if self._token.imported_from:
return NamespaceToArray(self._token.imported_from["namespace"])
elif hasattr(self._token, "module"):
return NamespaceToArray(self._token.module.namespace)
return []
def ConstantValue(constant):
return ExpressionToText(constant.value, kind=constant.kind)
# TODO(yzshen): Revisit the default value feature. It was designed prior to
# custom type mapping.
def DefaultValue(field):
if field.default:
if mojom.IsStructKind(field.kind):
assert field.default == "default"
if not IsTypemappedKind(field.kind):
return "%s::New()" % GetNameForKind(field.kind)
return ExpressionToText(field.default, kind=field.kind)
if not _use_new_wrapper_types:
if mojom.IsArrayKind(field.kind) or mojom.IsMapKind(field.kind):
return "nullptr";
if mojom.IsStringKind(field.kind):
return "" if _for_blink else "nullptr"
return ""
def NamespaceToArray(namespace):
return namespace.split(".") if namespace else []
def GetNameForKind(kind, internal=False):
return _NameFormatter(kind, _variant).FormatForCpp(internal=internal)
def GetQualifiedNameForKind(kind, internal=False):
return _NameFormatter(kind, _variant).FormatForCpp(
internal=internal, add_same_module_namespaces=True)
def GetFullMojomNameForKind(kind):
return _NameFormatter(kind, _variant).FormatForMojom()
def IsTypemappedKind(kind):
return hasattr(kind, "name") and \
GetFullMojomNameForKind(kind) in _current_typemap
def IsNativeOnlyKind(kind):
return (mojom.IsStructKind(kind) or mojom.IsEnumKind(kind)) and \
kind.native_only
def GetNativeTypeName(typemapped_kind):
return _current_typemap[GetFullMojomNameForKind(typemapped_kind)]["typename"]
def GetCppPodType(kind):
if mojom.IsStringKind(kind):
return "char*"
return _kind_to_cpp_type[kind]
def GetCppWrapperType(kind):
def _AddOptional(type_name):
pattern = "WTF::Optional<%s>" if _for_blink else "base::Optional<%s>"
return pattern % type_name
if IsTypemappedKind(kind):
type_name = GetNativeTypeName(kind)
if (mojom.IsNullableKind(kind) and
not _current_typemap[GetFullMojomNameForKind(kind)][
"nullable_is_same_type"]):
type_name = _AddOptional(type_name)
return type_name
if mojom.IsEnumKind(kind):
return GetNameForKind(kind)
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
return "%sPtr" % GetNameForKind(kind)
if mojom.IsArrayKind(kind):
pattern = None
if _use_new_wrapper_types:
pattern = "WTF::Vector<%s>" if _for_blink else "std::vector<%s>"
if mojom.IsNullableKind(kind):
pattern = _AddOptional(pattern)
else:
pattern = "mojo::WTFArray<%s>" if _for_blink else "mojo::Array<%s>"
return pattern % GetCppWrapperType(kind.kind)
if mojom.IsMapKind(kind):
pattern = None
if _use_new_wrapper_types:
pattern = ("WTF::HashMap<%s, %s>" if _for_blink else
"std::unordered_map<%s, %s>")
if mojom.IsNullableKind(kind):
pattern = _AddOptional(pattern)
else:
pattern = "mojo::WTFMap<%s, %s>" if _for_blink else "mojo::Map<%s, %s>"
return pattern % (GetCppWrapperType(kind.key_kind),
GetCppWrapperType(kind.value_kind))
if mojom.IsInterfaceKind(kind):
return "%sPtr" % GetNameForKind(kind)
if mojom.IsInterfaceRequestKind(kind):
return "%sRequest" % GetNameForKind(kind.kind)
if mojom.IsAssociatedInterfaceKind(kind):
return "%sAssociatedPtrInfo" % GetNameForKind(kind.kind)
if mojom.IsAssociatedInterfaceRequestKind(kind):
return "%sAssociatedRequest" % GetNameForKind(kind.kind)
if mojom.IsStringKind(kind):
if _for_blink:
return "WTF::String"
if not _use_new_wrapper_types:
return "mojo::String"
type_name = "std::string"
return _AddOptional(type_name) if mojom.IsNullableKind(kind) else type_name
if mojom.IsGenericHandleKind(kind):
return "mojo::ScopedHandle"
if mojom.IsDataPipeConsumerKind(kind):
return "mojo::ScopedDataPipeConsumerHandle"
if mojom.IsDataPipeProducerKind(kind):
return "mojo::ScopedDataPipeProducerHandle"
if mojom.IsMessagePipeKind(kind):
return "mojo::ScopedMessagePipeHandle"
if mojom.IsSharedBufferKind(kind):
return "mojo::ScopedSharedBufferHandle"
if not kind in _kind_to_cpp_type:
raise Exception("Unrecognized kind %s" % kind.spec)
return _kind_to_cpp_type[kind]
def IsMoveOnlyKind(kind):
if IsTypemappedKind(kind):
if mojom.IsEnumKind(kind):
return False
return _current_typemap[GetFullMojomNameForKind(kind)]["move_only"]
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
return True
if mojom.IsArrayKind(kind):
return IsMoveOnlyKind(kind.kind) if _use_new_wrapper_types else True
if mojom.IsMapKind(kind):
return IsMoveOnlyKind(kind.value_kind) if _use_new_wrapper_types else True
if mojom.IsAnyHandleOrInterfaceKind(kind):
return True
return False
def IsCopyablePassByValue(kind):
if not IsTypemappedKind(kind):
return False
return _current_typemap[GetFullMojomNameForKind(kind)][
"copyable_pass_by_value"]
def ShouldPassParamByValue(kind):
return ((not mojom.IsReferenceKind(kind)) or IsMoveOnlyKind(kind) or
IsCopyablePassByValue(kind))
def GetCppWrapperParamType(kind):
cpp_wrapper_type = GetCppWrapperType(kind)
return (cpp_wrapper_type if ShouldPassParamByValue(kind)
else "const %s&" % cpp_wrapper_type)
def GetCppDataViewType(kind):
if mojom.IsEnumKind(kind):
return GetNameForKind(kind)
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
return "%sDataView" % GetNameForKind(kind)
if mojom.IsArrayKind(kind):
return "mojo::ArrayDataView<%s>" % GetCppDataViewType(kind.kind)
if mojom.IsMapKind(kind):
return ("mojo::MapDataView<%s, %s>" % (GetCppDataViewType(kind.key_kind),
GetCppDataViewType(kind.value_kind)))
if mojom.IsStringKind(kind):
return "mojo::StringDataView"
return GetCppWrapperType(kind)
def GetCppFieldType(kind):
if mojom.IsStructKind(kind):
return ("mojo::internal::Pointer<%s>" %
GetNameForKind(kind, internal=True))
if mojom.IsUnionKind(kind):
return "%s" % GetNameForKind(kind, internal=True)
if mojom.IsArrayKind(kind):
return ("mojo::internal::Pointer<mojo::internal::Array_Data<%s>>" %
GetCppFieldType(kind.kind))
if mojom.IsMapKind(kind):
return ("mojo::internal::Pointer<mojo::internal::Map_Data<%s, %s>>" %
(GetCppFieldType(kind.key_kind), GetCppFieldType(kind.value_kind)))
if mojom.IsInterfaceKind(kind):
return "mojo::internal::Interface_Data"
if mojom.IsInterfaceRequestKind(kind):
return "mojo::internal::Handle_Data"
if mojom.IsAssociatedInterfaceKind(kind):
return "mojo::internal::AssociatedInterface_Data"
if mojom.IsAssociatedInterfaceRequestKind(kind):
return "mojo::internal::AssociatedInterfaceRequest_Data"
if mojom.IsEnumKind(kind):
return "int32_t"
if mojom.IsStringKind(kind):
return "mojo::internal::Pointer<mojo::internal::String_Data>"
if mojom.IsAnyHandleKind(kind):
return "mojo::internal::Handle_Data"
return _kind_to_cpp_type[kind]
def GetCppUnionFieldType(kind):
if mojom.IsUnionKind(kind):
return ("mojo::internal::Pointer<%s>" % GetNameForKind(kind, internal=True))
return GetCppFieldType(kind)
def GetUnionGetterReturnType(kind):
if mojom.IsReferenceKind(kind):
return "%s&" % GetCppWrapperType(kind)
return GetCppWrapperType(kind)
def GetUnmappedTypeForSerializer(kind):
if mojom.IsEnumKind(kind):
return GetQualifiedNameForKind(kind)
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
return "%sPtr" % GetQualifiedNameForKind(kind)
if mojom.IsArrayKind(kind):
return "mojo::Array<%s>" % GetUnmappedTypeForSerializer(kind.kind)
if mojom.IsMapKind(kind):
return "mojo::Map<%s, %s>" % (
GetUnmappedTypeForSerializer(kind.key_kind),
GetUnmappedTypeForSerializer(kind.value_kind))
if mojom.IsInterfaceKind(kind):
return "%sPtr" % GetQualifiedNameForKind(kind)
if mojom.IsInterfaceRequestKind(kind):
return "%sRequest" % GetQualifiedNameForKind(kind.kind)
if mojom.IsAssociatedInterfaceKind(kind):
return "%sAssociatedPtrInfo" % GetQualifiedNameForKind(kind.kind)
if mojom.IsAssociatedInterfaceRequestKind(kind):
return "%sAssociatedRequest" % GetQualifiedNameForKind(kind.kind)
if mojom.IsStringKind(kind):
return "mojo::String"
if mojom.IsGenericHandleKind(kind):
return "mojo::ScopedHandle"
if mojom.IsDataPipeConsumerKind(kind):
return "mojo::ScopedDataPipeConsumerHandle"
if mojom.IsDataPipeProducerKind(kind):
return "mojo::ScopedDataPipeProducerHandle"
if mojom.IsMessagePipeKind(kind):
return "mojo::ScopedMessagePipeHandle"
if mojom.IsSharedBufferKind(kind):
return "mojo::ScopedSharedBufferHandle"
return _kind_to_cpp_type[kind]
def TranslateConstants(token, kind):
if isinstance(token, mojom.NamedValue):
return _NameFormatter(token, _variant).FormatForCpp()
if isinstance(token, mojom.BuiltinValue):
if token.value == "double.INFINITY" or token.value == "float.INFINITY":
return "INFINITY";
if token.value == "double.NEGATIVE_INFINITY" or \
token.value == "float.NEGATIVE_INFINITY":
return "-INFINITY";
if token.value == "double.NAN" or token.value == "float.NAN":
return "NAN";
if (kind is not None and mojom.IsFloatKind(kind)):
return token if token.isdigit() else token + "f";
# Per C++11, 2.14.2, the type of an integer literal is the first of the
# corresponding list in Table 6 in which its value can be represented. In this
# case, the list for decimal constants with no suffix is:
# int, long int, long long int
# The standard considers a program ill-formed if it contains an integer
# literal that cannot be represented by any of the allowed types.
#
# As it turns out, MSVC doesn't bother trying to fall back to long long int,
# so the integral constant -2147483648 causes it grief: it decides to
# represent 2147483648 as an unsigned integer, and then warns that the unary
# minus operator doesn't make sense on unsigned types. Doh!
if kind == mojom.INT32 and token == "-2147483648":
return "(-%d - 1) /* %s */" % (
2**31 - 1, "Workaround for MSVC bug; see https://crbug.com/445618")
return "%s%s" % (token, _kind_to_cpp_literal_suffix.get(kind, ""))
def ExpressionToText(value, kind=None):
return TranslateConstants(value, kind)
def RequiresContextForDataView(kind):
for field in kind.fields:
if mojom.IsReferenceKind(field.kind):
return True
return False
def ShouldInlineStruct(struct):
# TODO(darin): Base this on the size of the wrapper class.
if len(struct.fields) > 4:
return False
for field in struct.fields:
if mojom.IsReferenceKind(field.kind) and not mojom.IsStringKind(field.kind):
return False
return True
def ShouldInlineUnion(union):
return not any(
mojom.IsReferenceKind(field.kind) and not mojom.IsStringKind(field.kind)
for field in union.fields)
def GetContainerValidateParamsCtorArgs(kind):
if mojom.IsStringKind(kind):
expected_num_elements = 0
element_is_nullable = False
key_validate_params = "nullptr"
element_validate_params = "nullptr"
enum_validate_func = "nullptr"
elif mojom.IsMapKind(kind):
expected_num_elements = 0
element_is_nullable = False
key_validate_params = GetNewContainerValidateParams(mojom.Array(
kind=kind.key_kind))
element_validate_params = GetNewContainerValidateParams(mojom.Array(
kind=kind.value_kind))
enum_validate_func = "nullptr"
else: # mojom.IsArrayKind(kind)
expected_num_elements = generator.ExpectedArraySize(kind) or 0
element_is_nullable = mojom.IsNullableKind(kind.kind)
key_validate_params = "nullptr"
element_validate_params = GetNewContainerValidateParams(kind.kind)
if mojom.IsEnumKind(kind.kind):
enum_validate_func = ("%s::Validate" %
GetQualifiedNameForKind(kind.kind, internal=True))
else:
enum_validate_func = "nullptr"
if enum_validate_func == "nullptr":
if key_validate_params == "nullptr":
return "%d, %s, %s" % (expected_num_elements,
"true" if element_is_nullable else "false",
element_validate_params)
else:
return "%s, %s" % (key_validate_params, element_validate_params)
else:
return "%d, %s" % (expected_num_elements, enum_validate_func)
def GetNewContainerValidateParams(kind):
if (not mojom.IsArrayKind(kind) and not mojom.IsMapKind(kind) and
not mojom.IsStringKind(kind)):
return "nullptr"
return "new mojo::internal::ContainerValidateParams(%s)" % (
GetContainerValidateParamsCtorArgs(kind))
class Generator(generator.Generator):
cpp_filters = {
"constant_value": ConstantValue,
"cpp_wrapper_param_type": GetCppWrapperParamType,
"cpp_data_view_type": GetCppDataViewType,
"cpp_field_type": GetCppFieldType,
"cpp_union_field_type": GetCppUnionFieldType,
"cpp_pod_type": GetCppPodType,
"cpp_union_getter_return_type": GetUnionGetterReturnType,
"cpp_wrapper_type": GetCppWrapperType,
"default_value": DefaultValue,
"expression_to_text": ExpressionToText,
"get_container_validate_params_ctor_args":
GetContainerValidateParamsCtorArgs,
"get_name_for_kind": GetNameForKind,
"get_pad": pack.GetPad,
"get_qualified_name_for_kind": GetQualifiedNameForKind,
"has_callbacks": mojom.HasCallbacks,
"has_sync_methods": mojom.HasSyncMethods,
"requires_context_for_data_view": RequiresContextForDataView,
"should_inline": ShouldInlineStruct,
"should_inline_union": ShouldInlineUnion,
"is_array_kind": mojom.IsArrayKind,
"is_enum_kind": mojom.IsEnumKind,
"is_integral_kind": mojom.IsIntegralKind,
"is_native_only_kind": IsNativeOnlyKind,
"is_any_handle_or_interface_kind": mojom.IsAnyHandleOrInterfaceKind,
"is_associated_kind": mojom.IsAssociatedKind,
"is_map_kind": mojom.IsMapKind,
"is_nullable_kind": mojom.IsNullableKind,
"is_object_kind": mojom.IsObjectKind,
"is_string_kind": mojom.IsStringKind,
"is_struct_kind": mojom.IsStructKind,
"is_typemapped_kind": IsTypemappedKind,
"is_union_kind": mojom.IsUnionKind,
"passes_associated_kinds": mojom.PassesAssociatedKinds,
"struct_size": lambda ps: ps.GetTotalSize() + _HEADER_SIZE,
"stylize_method": generator.StudlyCapsToCamel,
"under_to_camel": generator.UnderToCamel,
"unmapped_type_for_serializer": GetUnmappedTypeForSerializer,
}
def GetExtraTraitsHeaders(self):
extra_headers = set()
for entry in self.typemap.itervalues():
extra_headers.update(entry.get("traits_headers", []))
return list(extra_headers)
def GetExtraPublicHeaders(self):
extra_headers = set()
for entry in self.typemap.itervalues():
extra_headers.update(entry.get("public_headers", []))
return list(extra_headers)
def GetJinjaExports(self):
return {
"module": self.module,
"namespace": self.module.namespace,
"namespaces_as_array": NamespaceToArray(self.module.namespace),
"imports": self.module.imports,
"kinds": self.module.kinds,
"enums": self.module.enums,
"structs": self.GetStructs(),
"unions": self.GetUnions(),
"interfaces": self.GetInterfaces(),
"variant": self.variant,
"extra_traits_headers": self.GetExtraTraitsHeaders(),
"extra_public_headers": self.GetExtraPublicHeaders(),
"for_blink": self.for_blink,
"use_new_wrapper_types": self.use_new_wrapper_types,
"export_attribute": self.export_attribute,
"export_header": self.export_header,
}
@staticmethod
def GetTemplatePrefix():
return "cpp_templates"
@classmethod
def GetFilters(cls):
return cls.cpp_filters
@UseJinja("module.h.tmpl")
def GenerateModuleHeader(self):
return self.GetJinjaExports()
@UseJinja("module-internal.h.tmpl")
def GenerateModuleInternalHeader(self):
return self.GetJinjaExports()
@UseJinja("module.cc.tmpl")
def GenerateModuleSource(self):
return self.GetJinjaExports()
def GenerateFiles(self, args):
global _current_typemap
_current_typemap = self.typemap
global _for_blink
_for_blink = self.for_blink
global _use_new_wrapper_types
_use_new_wrapper_types = self.use_new_wrapper_types
global _variant
_variant = self.variant
suffix = "-%s" % self.variant if self.variant else ""
self.Write(self.GenerateModuleHeader(),
self.MatchMojomFilePath("%s%s.h" % (self.module.name, suffix)))
self.Write(self.GenerateModuleInternalHeader(),
self.MatchMojomFilePath("%s%s-internal.h" % (self.module.name, suffix)))
self.Write(self.GenerateModuleSource(),
self.MatchMojomFilePath("%s%s.cc" % (self.module.name, suffix)))
|
bsd-3-clause
| -230,045,905,145,959,070
| 36.449541
| 80
| 0.69706
| false
| 3.394312
| false
| false
| false
|
kittiu/sale-workflow
|
sale_sourced_by_line/tests/test_sale_is_delivered.py
|
1
|
3494
|
# -*- coding: utf-8 -*-
# Copyright 2014 Camptocamp SA - Yannick Vaucher
# Copyright 2017 Eficent Business and IT Consulting Services S.L.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import TransactionCase
class TestSaleIsDelivered(TransactionCase):
"""Check the _get_shipped method of Sale Order. """
def test_sale_no_proc(self):
"""False when no procurement on both sale.order.line"""
self.assertFalse(self.sale.shipped)
def test_sale_no_proc_one_service(self):
"""False when, no procurement on both line but one is service"""
self.sale_line1.product_id = self.service_product
self.assertFalse(self.sale.shipped)
def test_sale_no_proc_all_services(self):
"""True when, no procurement on both lines but both are services"""
self.sale_line1.product_id = self.service_product
self.sale_line2.product_id = self.service_product
self.assertTrue(self.sale.shipped)
def test_sale_not_all_proc(self):
"""False, when one line with and one without procurement done"""
self.sale_line1.procurement_group_id = self.proc_group1
self.proc1.state = 'done'
self.assertFalse(self.sale.shipped)
def test_sale_proc_and_service(self):
"""True when, one line with procurement done and one line for service
"""
self.sale_line1.procurement_group_id = self.proc_group1
self.proc1.state = 'done'
self.sale_line2.product_id = self.service_product
self.assertTrue(self.sale.shipped)
def test_sale_partially_delivered(self):
"""False when, all lines with procurement, one is partially delivered
"""
self.sale_line1.procurement_group_id = self.proc_group1
self.sale_line2.procurement_group_id = self.proc_group2
self.proc1.state = 'done'
self.proc2.state = 'running'
self.assertFalse(self.sale.shipped)
def test_sale_is_delivered(self):
"""True, when both line have a done procurement"""
self.sale_line1.procurement_group_id = self.proc_group1
self.sale_line2.procurement_group_id = self.proc_group2
self.proc1.state = 'done'
self.proc2.state = 'done'
self.assertTrue(self.sale.shipped)
def setUp(self):
"""Setup a Sale Order with 2 lines.
And prepare procurements
I use Model.new to get a model instance that is not saved to the
database, but has working methods.
"""
super(TestSaleIsDelivered, self).setUp()
so = self.env['sale.order']
sol = self.env['sale.order.line']
product = self.env['product.product']
procurement = self.env['procurement.order']
procurement_group = self.env['procurement.group']
self.sale = so.new()
self.sale_line1 = sol.new()
self.sale_line2 = sol.new()
self.sale_line1.order_id = self.sale
self.sale_line2.order_id = self.sale
self.sale.order_line = sol.browse([self.sale_line1.id,
self.sale_line2.id])
self.proc1 = procurement.new()
self.proc_group1 = procurement_group.new()
self.proc_group1.procurement_ids = self.proc1
self.proc2 = procurement.new()
self.proc_group2 = procurement_group.new()
self.proc_group2.procurement_ids = self.proc2
self.service_product = product.new({'type': 'service'})
|
agpl-3.0
| -9,047,940,310,241,662,000
| 36.978261
| 77
| 0.641671
| false
| 3.635796
| true
| false
| false
|
scorpionis/docklet
|
src/env.py
|
1
|
2460
|
import os
def getenv(key):
if key == "CLUSTER_NAME":
return os.environ.get("CLUSTER_NAME", "docklet-vc")
elif key == "FS_PREFIX":
return os.environ.get("FS_PREFIX", "/opt/docklet")
elif key == "CLUSTER_SIZE":
return int(os.environ.get("CLUSTER_SIZE", 1))
elif key == "CLUSTER_NET":
return os.environ.get("CLUSTER_NET", "172.16.0.1/16")
elif key == "CONTAINER_CPU":
return int(os.environ.get("CONTAINER_CPU", 100000))
elif key == "CONTAINER_DISK":
return int(os.environ.get("CONTAINER_DISK", 1000))
elif key == "CONTAINER_MEMORY":
return int(os.environ.get("CONTAINER_MEMORY", 1000))
elif key == "DISKPOOL_SIZE":
return int(os.environ.get("DISKPOOL_SIZE", 5000))
elif key == "ETCD":
return os.environ.get("ETCD", "localhost:2379")
elif key == "NETWORK_DEVICE":
return os.environ.get("NETWORK_DEVICE", "eth0")
elif key == "MASTER_IP":
return os.environ.get("MASTER_IP", "0.0.0.0")
elif key == "MASTER_PORT":
return int(os.environ.get("MASTER_PORT", 9000))
elif key == "WORKER_PORT":
return int(os.environ.get("WORKER_PORT", 9001))
elif key == "PROXY_PORT":
return int(os.environ.get("PROXY_PORT", 8000))
elif key == "PROXY_API_PORT":
return int(os.environ.get("PROXY_API_PORT", 8001))
elif key == "WEB_PORT":
return int(os.environ.get("WEB_PORT", 8888))
elif key == "PORTAL_URL":
return os.environ.get("PORTAL_URL",
"http://"+getenv("MASTER_IP") + ":" + str(getenv("PROXY_PORT")))
elif key == "LOG_LEVEL":
return os.environ.get("LOG_LEVEL", "DEBUG")
elif key == "LOG_LIFE":
return int(os.environ.get("LOG_LIFE", 10))
elif key == "WEB_LOG_LEVEL":
return os.environ.get("WEB_LOG_LEVEL", "DEBUG")
elif key == "STORAGE":
return os.environ.get("STORAGE", "file")
elif key =="EXTERNAL_LOGIN":
return os.environ.get("EXTERNAL_LOGIN", "False")
elif key =="EMAIL_FROM_ADDRESS":
return os.environ.get("EMAIL_FROM_ADDRESS", "")
elif key =="ADMIN_EMAIL_ADDRESS":
return os.environ.get("ADMIN_EMAIL_ADDRESS", "")
elif key =="DATA_QUOTA":
return os.environ.get("DATA_QUOTA", "False")
elif key =="DATA_QUOTA_CMD":
return os.environ.get("DATA_QUOTA_CMD", "gluster volume quota docklet-volume limit-usage %s %s")
else:
return os.environ[key]
|
bsd-3-clause
| 8,593,028,398,002,000,000
| 41.413793
| 104
| 0.595122
| false
| 3.324324
| false
| false
| false
|
martinzlocha/mad
|
mad/settings.py
|
1
|
3790
|
"""
Django settings for mad project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config.SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config.DEBUG
ALLOWED_HOSTS = config.ALLOWED_HOSTS
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'portal',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mad.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mad.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
if 'RDS_DB_NAME' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
if config.DEBUG:
# Allows static files to be outside of an app
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
'static/',
]
else:
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
mit
| 7,744,071,093,783,556,000
| 25.319444
| 91
| 0.656992
| false
| 3.568738
| false
| false
| false
|
fdroidtravis/fdroidserver
|
fdroidserver/scanner.py
|
1
|
20098
|
#!/usr/bin/env python3
#
# scanner.py - part of the FDroid server tools
# Copyright (C) 2010-13, Ciaran Gultnieks, ciaran@ciarang.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import imghdr
import json
import os
import re
import sys
import traceback
from argparse import ArgumentParser
import logging
import itertools
from . import _
from . import common
from . import metadata
from .exception import BuildException, VCSException
config = None
options = None
DEFAULT_JSON_PER_BUILD = {'errors': [], 'warnings': [], 'infos': []} # type: ignore
json_per_build = DEFAULT_JSON_PER_BUILD
MAVEN_URL_REGEX = re.compile(r"""\smaven\s*{.*?(?:setUrl|url)\s*=?\s*(?:uri)?\(?\s*["']?([^\s"']+)["']?[^}]*}""",
re.DOTALL)
CODE_SIGNATURES = {
# The `apkanalyzer dex packages` output looks like this:
# M d 1 1 93 <packagename> <other stuff>
# The first column has P/C/M/F for package, class, method or field
# The second column has x/k/r/d for removed, kept, referenced and defined.
# We already filter for defined only in the apkanalyzer call. 'r' will be
# for things referenced but not distributed in the apk.
exp: re.compile(r'.[\s]*d[\s]*[0-9]*[\s]*[0-9*][\s]*[0-9]*[\s]*' + exp, re.IGNORECASE) for exp in [
r'(com\.google\.firebase[^\s]*)',
r'(com\.google\.android\.gms[^\s]*)',
r'(com\.google\.android\.play\.core[^\s]*)',
r'(com\.google\.tagmanager[^\s]*)',
r'(com\.google\.analytics[^\s]*)',
r'(com\.android\.billing[^\s]*)',
]
}
# Common known non-free blobs (always lower case):
NON_FREE_GRADLE_LINES = {
exp: re.compile(r'.*' + exp, re.IGNORECASE) for exp in [
r'flurryagent',
r'paypal.*mpl',
r'admob.*sdk.*android',
r'google.*ad.*view',
r'google.*admob',
r'google.*play.*services',
r'com.google.android.play:core.*',
r'androidx.work:work-gcm',
r'crittercism',
r'heyzap',
r'jpct.*ae',
r'youtube.*android.*player.*api',
r'bugsense',
r'crashlytics',
r'ouya.*sdk',
r'libspen23',
r'firebase',
r'''["']com.facebook.android['":]''',
r'cloudrail',
r'com.tencent.bugly',
r'appcenter-push',
]
}
def get_gradle_compile_commands(build):
compileCommands = ['compile',
'provided',
'apk',
'implementation',
'api',
'compileOnly',
'runtimeOnly']
buildTypes = ['', 'release']
flavors = ['']
if build.gradle and build.gradle != ['yes']:
flavors += build.gradle
commands = [''.join(c) for c in itertools.product(flavors, buildTypes, compileCommands)]
return [re.compile(r'\s*' + c, re.IGNORECASE) for c in commands]
def scan_binary(apkfile):
"""Scan output of apkanalyzer for known non-free classes
apkanalyzer produces useful output when it can run, but it does
not support all recent JDK versions, and also some DEX versions,
so this cannot count on it to always produce useful output or even
to run without exiting with an error.
"""
logging.info(_('Scanning APK with apkanalyzer for known non-free classes.'))
result = common.SdkToolsPopen(["apkanalyzer", "dex", "packages", "--defined-only", apkfile], output=False)
if result.returncode != 0:
logging.warning(_('scanner not cleanly run apkanalyzer: %s') % result.output)
problems = 0
for suspect, regexp in CODE_SIGNATURES.items():
matches = regexp.findall(result.output)
if matches:
for m in set(matches):
logging.debug("Found class '%s'" % m)
problems += 1
if problems:
logging.critical("Found problems in %s" % apkfile)
return problems
def scan_source(build_dir, build=metadata.Build()):
"""Scan the source code in the given directory (and all subdirectories)
and return the number of fatal problems encountered
"""
count = 0
whitelisted = [
'firebase-jobdispatcher', # https://github.com/firebase/firebase-jobdispatcher-android/blob/master/LICENSE
'com.firebaseui', # https://github.com/firebase/FirebaseUI-Android/blob/master/LICENSE
'geofire-android' # https://github.com/firebase/geofire-java/blob/master/LICENSE
]
def is_whitelisted(s):
return any(wl in s for wl in whitelisted)
def suspects_found(s):
for n, r in NON_FREE_GRADLE_LINES.items():
if r.match(s) and not is_whitelisted(s):
yield n
allowed_repos = [re.compile(r'^https://' + re.escape(repo) + r'/*') for repo in [
'repo1.maven.org/maven2', # mavenCentral()
'jcenter.bintray.com', # jcenter()
'jitpack.io',
'www.jitpack.io',
'repo.maven.apache.org/maven2',
'oss.jfrog.org/artifactory/oss-snapshot-local',
'oss.sonatype.org/content/repositories/snapshots',
'oss.sonatype.org/content/repositories/releases',
'oss.sonatype.org/content/groups/public',
'clojars.org/repo', # Clojure free software libs
's3.amazonaws.com/repo.commonsware.com', # CommonsWare
'plugins.gradle.org/m2', # Gradle plugin repo
'maven.google.com', # Google Maven Repo, https://developer.android.com/studio/build/dependencies.html#google-maven
]
] + [re.compile(r'^file://' + re.escape(repo) + r'/*') for repo in [
'/usr/share/maven-repo', # local repo on Debian installs
]
]
scanignore = common.getpaths_map(build_dir, build.scanignore)
scandelete = common.getpaths_map(build_dir, build.scandelete)
scanignore_worked = set()
scandelete_worked = set()
def toignore(path_in_build_dir):
for k, paths in scanignore.items():
for p in paths:
if path_in_build_dir.startswith(p):
scanignore_worked.add(k)
return True
return False
def todelete(path_in_build_dir):
for k, paths in scandelete.items():
for p in paths:
if path_in_build_dir.startswith(p):
scandelete_worked.add(k)
return True
return False
def ignoreproblem(what, path_in_build_dir):
"""
:param what: string describing the problem, will be printed in log messages
:param path_in_build_dir: path to the file relative to `build`-dir
"returns: 0 as we explicitly ignore the file, so don't count an error
"""
msg = ('Ignoring %s at %s' % (what, path_in_build_dir))
logging.info(msg)
if json_per_build is not None:
json_per_build['infos'].append([msg, path_in_build_dir])
return 0
def removeproblem(what, path_in_build_dir, filepath):
"""
:param what: string describing the problem, will be printed in log messages
:param path_in_build_dir: path to the file relative to `build`-dir
:param filepath: Path (relative to our current path) to the file
"returns: 0 as we deleted the offending file
"""
msg = ('Removing %s at %s' % (what, path_in_build_dir))
logging.info(msg)
if json_per_build is not None:
json_per_build['infos'].append([msg, path_in_build_dir])
try:
os.remove(filepath)
except FileNotFoundError:
# File is already gone, nothing to do.
# This can happen if we find multiple problems in one file that is setup for scandelete
# I.e. build.gradle files containig multiple unknown maven repos.
pass
return 0
def warnproblem(what, path_in_build_dir):
"""
:param what: string describing the problem, will be printed in log messages
:param path_in_build_dir: path to the file relative to `build`-dir
:returns: 0, as warnings don't count as errors
"""
if toignore(path_in_build_dir):
return 0
logging.warning('Found %s at %s' % (what, path_in_build_dir))
if json_per_build is not None:
json_per_build['warnings'].append([what, path_in_build_dir])
return 0
def handleproblem(what, path_in_build_dir, filepath):
"""Dispatches to problem handlers (ignore, delete, warn) or returns 1
for increasing the error count
:param what: string describing the problem, will be printed in log messages
:param path_in_build_dir: path to the file relative to `build`-dir
:param filepath: Path (relative to our current path) to the file
:returns: 0 if the problem was ignored/deleted/is only a warning, 1 otherwise
"""
if toignore(path_in_build_dir):
return ignoreproblem(what, path_in_build_dir)
if todelete(path_in_build_dir):
return removeproblem(what, path_in_build_dir, filepath)
if 'src/test' in path_in_build_dir or '/test/' in path_in_build_dir:
return warnproblem(what, path_in_build_dir)
if options and 'json' in vars(options) and options.json:
json_per_build['errors'].append([what, path_in_build_dir])
if options and (options.verbose or not ('json' in vars(options) and options.json)):
logging.error('Found %s at %s' % (what, path_in_build_dir))
return 1
def is_executable(path):
return os.path.exists(path) and os.access(path, os.X_OK)
textchars = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7f})
def is_binary(path):
d = None
with open(path, 'rb') as f:
d = f.read(1024)
return bool(d.translate(None, textchars))
# False positives patterns for files that are binary and executable.
safe_paths = [re.compile(r) for r in [
r".*/drawable[^/]*/.*\.png$", # png drawables
r".*/mipmap[^/]*/.*\.png$", # png mipmaps
]
]
def is_image_file(path):
if imghdr.what(path) is not None:
return True
def safe_path(path_in_build_dir):
for sp in safe_paths:
if sp.match(path_in_build_dir):
return True
return False
gradle_compile_commands = get_gradle_compile_commands(build)
def is_used_by_gradle(line):
return any(command.match(line) for command in gradle_compile_commands)
# Iterate through all files in the source code
for root, dirs, files in os.walk(build_dir, topdown=True):
# It's topdown, so checking the basename is enough
for ignoredir in ('.hg', '.git', '.svn', '.bzr'):
if ignoredir in dirs:
dirs.remove(ignoredir)
for curfile in files:
if curfile in ['.DS_Store']:
continue
# Path (relative) to the file
filepath = os.path.join(root, curfile)
if os.path.islink(filepath):
continue
path_in_build_dir = os.path.relpath(filepath, build_dir)
extension = os.path.splitext(path_in_build_dir)[1]
if curfile in ('gradle-wrapper.jar', 'gradlew', 'gradlew.bat'):
removeproblem(curfile, path_in_build_dir, filepath)
elif extension == '.apk':
removeproblem(_('Android APK file'), path_in_build_dir, filepath)
elif extension == '.a':
count += handleproblem(_('static library'), path_in_build_dir, filepath)
elif extension == '.aar':
count += handleproblem(_('Android AAR library'), path_in_build_dir, filepath)
elif extension == '.class':
count += handleproblem(_('Java compiled class'), path_in_build_dir, filepath)
elif extension == '.dex':
count += handleproblem(_('Android DEX code'), path_in_build_dir, filepath)
elif extension == '.gz':
count += handleproblem(_('gzip file archive'), path_in_build_dir, filepath)
elif extension == '.so':
count += handleproblem(_('shared library'), path_in_build_dir, filepath)
elif extension == '.zip':
count += handleproblem(_('ZIP file archive'), path_in_build_dir, filepath)
elif extension == '.jar':
for name in suspects_found(curfile):
count += handleproblem('usual suspect \'%s\'' % name, path_in_build_dir, filepath)
count += handleproblem(_('Java JAR file'), path_in_build_dir, filepath)
elif extension == '.java':
if not os.path.isfile(filepath):
continue
with open(filepath, 'r', errors='replace') as f:
for line in f:
if 'DexClassLoader' in line:
count += handleproblem('DexClassLoader', path_in_build_dir, filepath)
break
elif extension == '.gradle':
if not os.path.isfile(filepath):
continue
with open(filepath, 'r', errors='replace') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if is_used_by_gradle(line):
for name in suspects_found(line):
count += handleproblem("usual suspect \'%s\'" % (name),
path_in_build_dir, filepath)
noncomment_lines = [line for line in lines if not common.gradle_comment.match(line)]
no_comments = re.sub(r'/\*.*?\*/', '', ''.join(noncomment_lines), flags=re.DOTALL)
for url in MAVEN_URL_REGEX.findall(no_comments):
if not any(r.match(url) for r in allowed_repos):
count += handleproblem('unknown maven repo \'%s\'' % url, path_in_build_dir, filepath)
elif extension in ['', '.bin', '.out', '.exe']:
if is_binary(filepath):
count += handleproblem('binary', path_in_build_dir, filepath)
elif is_executable(filepath):
if is_binary(filepath) and not (safe_path(path_in_build_dir) or is_image_file(filepath)):
warnproblem(_('executable binary, possibly code'), path_in_build_dir)
for p in scanignore:
if p not in scanignore_worked:
logging.error(_('Unused scanignore path: %s') % p)
count += 1
for p in scandelete:
if p not in scandelete_worked:
logging.error(_('Unused scandelete path: %s') % p)
count += 1
return count
def main():
global config, options, json_per_build
# Parse command line...
parser = ArgumentParser(usage="%(prog)s [options] [APPID[:VERCODE] [APPID[:VERCODE] ...]]")
common.setup_global_opts(parser)
parser.add_argument("appid", nargs='*', help=_("application ID with optional versionCode in the form APPID[:VERCODE]"))
parser.add_argument("-f", "--force", action="store_true", default=False,
help=_("Force scan of disabled apps and builds."))
parser.add_argument("--json", action="store_true", default=False,
help=_("Output JSON to stdout."))
metadata.add_metadata_arguments(parser)
options = parser.parse_args()
metadata.warnings_action = options.W
json_output = dict()
if options.json:
if options.verbose:
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
else:
logging.getLogger().setLevel(logging.ERROR)
config = common.read_config(options)
# Read all app and srclib metadata
allapps = metadata.read_metadata()
apps = common.read_app_args(options.appid, allapps, True)
probcount = 0
build_dir = 'build'
if not os.path.isdir(build_dir):
logging.info("Creating build directory")
os.makedirs(build_dir)
srclib_dir = os.path.join(build_dir, 'srclib')
extlib_dir = os.path.join(build_dir, 'extlib')
for appid, app in apps.items():
json_per_appid = dict()
if app.Disabled and not options.force:
logging.info(_("Skipping {appid}: disabled").format(appid=appid))
json_per_appid['disabled'] = json_per_build['infos'].append('Skipping: disabled')
continue
try:
if app.RepoType == 'srclib':
build_dir = os.path.join('build', 'srclib', app.Repo)
else:
build_dir = os.path.join('build', appid)
if app.get('Builds'):
logging.info(_("Processing {appid}").format(appid=appid))
# Set up vcs interface and make sure we have the latest code...
vcs = common.getvcs(app.RepoType, app.Repo, build_dir)
else:
logging.info(_("{appid}: no builds specified, running on current source state")
.format(appid=appid))
json_per_build = DEFAULT_JSON_PER_BUILD
json_per_appid['current-source-state'] = json_per_build
count = scan_source(build_dir)
if count > 0:
logging.warning(_('Scanner found {count} problems in {appid}:')
.format(count=count, appid=appid))
probcount += count
app['Builds'] = []
for build in app.get('Builds', []):
json_per_build = DEFAULT_JSON_PER_BUILD
json_per_appid[build.versionCode] = json_per_build
if build.disable and not options.force:
logging.info("...skipping version %s - %s" % (
build.versionName, build.get('disable', build.commit[1:])))
continue
logging.info("...scanning version " + build.versionName)
# Prepare the source code...
common.prepare_source(vcs, app, build,
build_dir, srclib_dir,
extlib_dir, False)
count = scan_source(build_dir, build)
if count > 0:
logging.warning(_('Scanner found {count} problems in {appid}:{versionCode}:')
.format(count=count, appid=appid, versionCode=build.versionCode))
probcount += count
except BuildException as be:
logging.warning('Could not scan app %s due to BuildException: %s' % (
appid, be))
probcount += 1
except VCSException as vcse:
logging.warning('VCS error while scanning app %s: %s' % (appid, vcse))
probcount += 1
except Exception:
logging.warning('Could not scan app %s due to unknown error: %s' % (
appid, traceback.format_exc()))
probcount += 1
for k, v in json_per_appid.items():
if len(v['errors']) or len(v['warnings']) or len(v['infos']):
json_output[appid] = json_per_appid
break
logging.info(_("Finished"))
if options.json:
print(json.dumps(json_output))
else:
print(_("%d problems found") % probcount)
if __name__ == "__main__":
main()
|
agpl-3.0
| 577,908,906,297,143,800
| 39.276553
| 123
| 0.574535
| false
| 3.889685
| false
| false
| false
|
redtoad/python-amazon-product-api
|
amazonproduct/contrib/retry.py
|
1
|
1626
|
import socket
import time
import sys
# support Python 2 and Python 3 without conversion
try:
from urllib.request import URLError
except ImportError:
from urllib2 import URLError
from amazonproduct.api import API
class RetryAPI (API):
"""
API which will try up to ``TRIES`` times to fetch a result from Amazon
should it run into a timeout. For the time being this will remain in
:mod:`amazonproduct.contrib` but its functionality may be merged into the
main API at a later date.
Based on work by Jerry Ji
"""
#: Max number of tries before giving up
TRIES = 5
#: Delay between tries in seconds
DELAY = 3
#: Between each try the delay will be lengthened by this backoff multiplier
BACKOFF = 1
def _fetch(self, url):
"""
Retrieves XML response from Amazon. In case of a timeout, it will try
:const:`~RetryAPI.TRIES`` times before raising an error.
"""
attempts = 0
delay = self.DELAY
while True:
try:
attempts += 1
return API._fetch(self, url)
except URLError:
e = sys.exc_info()[1] # Python 2/3 compatible
# if a timeout occurred
# wait for some time before trying again
reason = getattr(e, 'reason', None)
if isinstance(reason, socket.timeout) and attempts < self.TRIES:
time.sleep(delay)
delay *= self.BACKOFF
continue
# otherwise reraise the original error
raise
|
bsd-3-clause
| -2,704,879,142,169,400,000
| 26.559322
| 80
| 0.589176
| false
| 4.659026
| false
| false
| false
|
mapr/sahara
|
sahara/plugins/mapr/plugin.py
|
1
|
2831
|
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sahara.plugins.mapr.versions.version_handler_factory as vhf
import sahara.plugins.provisioning as p
class MapRPlugin(p.ProvisioningPluginBase):
title = 'MapR Hadoop Distribution'
description = ('The MapR Distribution provides a full Hadoop stack that'
' includes the MapR File System (MapR-FS), MapReduce,'
' a complete Hadoop ecosystem, and the MapR Control System'
' user interface')
user = 'mapr'
def _get_handler(self, hadoop_version):
return vhf.VersionHandlerFactory.get().get_handler(hadoop_version)
def get_title(self):
return MapRPlugin.title
def get_description(self):
return MapRPlugin.description
def get_versions(self):
return vhf.VersionHandlerFactory.get().get_versions()
def get_node_processes(self, hadoop_version):
return self._get_handler(hadoop_version).get_np_dict()
def get_configs(self, hadoop_version):
return self._get_handler(hadoop_version).get_configs()
def configure_cluster(self, cluster):
self._get_handler(cluster.hadoop_version).configure_cluster(cluster)
def start_cluster(self, cluster):
self._get_handler(cluster.hadoop_version).start_cluster(cluster)
def validate(self, cluster):
self._get_handler(cluster.hadoop_version).validate(cluster)
def validate_scaling(self, cluster, existing, additional):
v_handler = self._get_handler(cluster.hadoop_version)
v_handler.validate_scaling(cluster, existing, additional)
def scale_cluster(self, cluster, instances):
v_handler = self._get_handler(cluster.hadoop_version)
v_handler.scale_cluster(cluster, instances)
def decommission_nodes(self, cluster, instances):
v_handler = self._get_handler(cluster.hadoop_version)
v_handler.decommission_nodes(cluster, instances)
def get_edp_engine(self, cluster, job_type):
v_handler = self._get_handler(cluster.hadoop_version)
return v_handler.get_edp_engine(cluster, job_type)
def get_open_ports(self, node_group):
v_handler = self._get_handler(node_group.cluster.hadoop_version)
return v_handler.get_open_ports(node_group)
|
apache-2.0
| -8,880,976,792,963,895,000
| 37.780822
| 78
| 0.702932
| false
| 3.904828
| false
| false
| false
|
jkettleb/iris
|
lib/iris/tests/unit/experimental/um/test_Field.py
|
1
|
6019
|
# (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for :class:`iris.experimental.um.Field`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import mock
import numpy as np
from iris.experimental.um import Field
class Test_int_headers(tests.IrisTest):
def test(self):
field = Field(np.arange(45), list(range(19)), None)
self.assertArrayEqual(field.int_headers, np.arange(45))
class Test_real_headers(tests.IrisTest):
def test(self):
field = Field(list(range(45)), np.arange(19), None)
self.assertArrayEqual(field.real_headers, np.arange(19))
class Test___eq__(tests.IrisTest):
def test_equal(self):
field1 = Field(list(range(45)), list(range(19)), None)
field2 = Field(np.arange(45), np.arange(19), None)
self.assertTrue(field1.__eq__(field2))
def test_not_equal_ints(self):
field1 = Field(list(range(45)), list(range(19)), None)
field2 = Field(np.arange(45, 90), np.arange(19), None)
self.assertFalse(field1.__eq__(field2))
def test_not_equal_reals(self):
field1 = Field(list(range(45)), list(range(19)), None)
field2 = Field(np.arange(45), np.arange(19, 38), None)
self.assertFalse(field1.__eq__(field2))
def test_not_equal_data(self):
field1 = Field(list(range(45)), list(range(19)), None)
field2 = Field(np.arange(45), np.arange(19), np.zeros(3))
self.assertFalse(field1.__eq__(field2))
def test_invalid(self):
field1 = Field(list(range(45)), list(range(19)), None)
self.assertIs(field1.__eq__('foo'), NotImplemented)
class Test___ne__(tests.IrisTest):
def test_equal(self):
field1 = Field(list(range(45)), list(range(19)), None)
field2 = Field(np.arange(45), np.arange(19), None)
self.assertFalse(field1.__ne__(field2))
def test_not_equal_ints(self):
field1 = Field(list(range(45)), list(range(19)), None)
field2 = Field(np.arange(45, 90), np.arange(19), None)
self.assertTrue(field1.__ne__(field2))
def test_not_equal_reals(self):
field1 = Field(list(range(45)), list(range(19)), None)
field2 = Field(np.arange(45), np.arange(19, 38), None)
self.assertTrue(field1.__ne__(field2))
def test_not_equal_data(self):
field1 = Field(list(range(45)), list(range(19)), None)
field2 = Field(np.arange(45), np.arange(19), np.zeros(3))
self.assertTrue(field1.__ne__(field2))
def test_invalid(self):
field1 = Field(list(range(45)), list(range(19)), None)
self.assertIs(field1.__ne__('foo'), NotImplemented)
class Test_num_values(tests.IrisTest):
def test_64(self):
field = Field(list(range(45)), list(range(19)), None)
self.assertEqual(field.num_values(), 64)
def test_128(self):
field = Field(list(range(45)), list(range(83)), None)
self.assertEqual(field.num_values(), 128)
class Test_get_data(tests.IrisTest):
def test_None(self):
field = Field([], [], None)
self.assertIsNone(field.get_data())
def test_ndarray(self):
data = np.arange(12).reshape(3, 4)
field = Field([], [], data)
self.assertIs(field.get_data(), data)
def test_provider(self):
provider = mock.Mock(read_data=lambda: mock.sentinel.DATA)
field = Field([], [], provider)
self.assertIs(field.get_data(), mock.sentinel.DATA)
class Test_set_data(tests.IrisTest):
def test_None(self):
data = np.arange(12).reshape(3, 4)
field = Field([], [], data)
field.set_data(None)
self.assertIsNone(field.get_data())
def test_ndarray(self):
field = Field([], [], None)
data = np.arange(12).reshape(3, 4)
field.set_data(data)
self.assertArrayEqual(field.get_data(), data)
def test_provider(self):
provider = mock.Mock(read_data=lambda: mock.sentinel.DATA)
field = Field([], [], None)
field.set_data(provider)
self.assertIs(field.get_data(), mock.sentinel.DATA)
class Test__can_copy_deferred_data(tests.IrisTest):
def _check_formats(self,
old_lbpack, new_lbpack,
old_bacc=-6, new_bacc=-6,
absent_provider=False):
lookup_entry = mock.Mock(lbpack=old_lbpack, bacc=old_bacc)
provider = mock.Mock(lookup_entry=lookup_entry)
if absent_provider:
# Replace the provider with a simple array.
provider = np.zeros(2)
field = Field(list(range(45)), list(range(19)), provider)
return field._can_copy_deferred_data(new_lbpack, new_bacc)
def test_okay_simple(self):
self.assertTrue(self._check_formats(1234, 1234))
def test_fail_different_lbpack(self):
self.assertFalse(self._check_formats(1234, 1238))
def test_fail_nodata(self):
self.assertFalse(self._check_formats(1234, 1234, absent_provider=True))
def test_fail_different_bacc(self):
self.assertFalse(self._check_formats(1234, 1234, new_bacc=-8))
if __name__ == '__main__':
tests.main()
|
lgpl-3.0
| 4,416,094,728,972,679,700
| 33.994186
| 79
| 0.633328
| false
| 3.406338
| true
| false
| false
|
pfwangthu/Convolutional-Neural-Networks
|
display.py
|
1
|
7128
|
import matplotlib
#Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy.io as sio
import os
import sys
import numpy
import theano
import theano.tensor as T
import gzip
import cPickle
from convolutional_mlp import LeNetConvPoolLayer
from logistic_sgd import LogisticRegression
from mlp import HiddenLayer
def display(params, digit, epoch, mode = 'mat', size = (56, 56)):
#epoch contains a list of numbers to show
#for example, epoch = [0, 2, 4] can show epoch 0 (original stage) and epoch 2 4
#after running the CNN, params can be used directly, and can also use numpy.load('params.npy') to get
#digit is a single digit of image set, for example, digit = train_set_x.get_value()[number]
nkerns=[20, 50]
rng = numpy.random.RandomState(23455)
#show original digit
if os.path.exists('digit') == 0:
os.mkdir('digit')
if mode == 'png':
plt.figure(1)
plt.gray()
plt.axis('off')
plt.imshow(digit.reshape(size))
plt.savefig('digit/activity of layer0 (original digit).png')
digit = digit.reshape(1, 1, size[0], size[1])
inputdigit = T.tensor4()
#building CNN with exactly the same parameters
print '...building layer1'
layer0_input = inputdigit
layer0 = LeNetConvPoolLayer(rng, input=layer0_input,
image_shape=(1, 1, size[0], size[1]),
filter_shape=(nkerns[0], 1, 5, 5), poolsize=(2, 2))
print '...building layer2'
layer1 = LeNetConvPoolLayer(rng, input=layer0.output,
image_shape=(1, nkerns[0], (size[0] - 4) / 2, (size[1] - 4) / 2),
filter_shape=(nkerns[1], nkerns[0], 5, 5), poolsize=(2, 2))
print '...building layer3'
layer2_input = layer1.output.flatten(2)
layer2 = HiddenLayer(rng, input=layer2_input, n_in=nkerns[1] * (size[0] / 4 - 3) * (size[1] / 4 - 3),
n_out=500, activation=T.tanh)
print '...building layer4'
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
f = theano.function(inputs = [inputdigit], outputs = [layer0.conv_out, layer0.output, layer1.conv_out, layer1.output, layer2.output, layer3.p_y_given_x, layer3.y_pred])
#export filters and activity in different epochs
for num in epoch:
print '...epoch ' + str(num)
layer3.W.set_value(params[num][0])
layer3.b.set_value(params[num][1])
layer2.W.set_value(params[num][2])
layer2.b.set_value(params[num][3])
layer1.W.set_value(params[num][4])
layer1.b.set_value(params[num][5])
layer0.W.set_value(params[num][6])
layer0.b.set_value(params[num][7])
[conv0, output0, conv1, output1, output2, output3, y] = f(digit)
if mode == 'png':
plt.figure(2)
plt.gray()
for i in range(nkerns[0]):
plt.subplot(4, 5, i + 1)
plt.axis('off')
plt.imshow(layer0.W.get_value()[i, 0])
plt.savefig('digit/filter of layer1 in epoch ' + str(num) + '.png')
plt.figure(3)
plt.gray()
for i in range(nkerns[1]):
plt.subplot(5, 10, i + 1)
plt.axis('off')
plt.imshow(layer1.W.get_value()[i, 0])
plt.savefig('digit/filter of layer2 in epoch ' + str(num) + '.png')
plt.figure(4)
plt.gray()
plt.axis('off')
plt.imshow(layer2.W.get_value())
plt.savefig('digit/filter of layer3 in epoch ' + str(num) + '.png')
plt.figure(5)
plt.gray()
plt.axis('off')
plt.imshow(layer3.W.get_value())
plt.savefig('digit/filter of layer4 in epoch ' + str(num) + '.png')
plt.figure(6)
plt.gray()
for i in range(nkerns[0]):
plt.subplot(4, 5, i + 1)
plt.axis('off')
plt.imshow(output0[0, i])
plt.savefig('digit/activity of layer1 after downsampling in epoch ' + str(num) + '.png')
plt.figure(7)
plt.gray()
plt.axis('off')
for i in range(nkerns[1]):
plt.subplot(5, 10, i + 1)
plt.axis('off')
plt.imshow(conv1[0, i])
plt.savefig('digit/activity of layer2 before downsampling in epoch ' + str(num) + '.png')
plt.figure(8)
plt.gray()
plt.axis('off')
for i in range(nkerns[0]):
plt.subplot(4, 5, i + 1)
plt.axis('off')
plt.imshow(conv0[0, i])
plt.savefig('digit/activity of layer1 before downsampling in epoch ' + str(num) + '.png')
plt.figure(9)
plt.gray()
for i in range(nkerns[1]):
plt.subplot(5, 10, i + 1)
plt.axis('off')
plt.imshow(output1[0, i])
plt.savefig('digit/activity of layer2 after downsampling in epoch ' + str(num) + '.png')
plt.figure(10)
plt.gray()
plt.axis('off')
plt.imshow(numpy.tile(output2, (10, 1)))
plt.savefig('digit/activity of layer3 in epoch ' + str(num) + '.png')
plt.figure(11)
plt.gray()
plt.axis('off')
plt.imshow(numpy.tile(output3, (10, 1)))
plt.savefig('digit/activity of layer4 in epoch ' + str(num) + '.png')
if mode == 'mat':
sio.savemat('digit in epoch ' + str(num) + '.mat', {'ActivityOfLayer0' : digit.reshape(size),
'ActivityOfLayer1before' : conv0[0],
'ActivityOfLayer1after' : output0[0],
'ActivityOfLayer2before' : conv1[0],
'ActivityOfLayer2after' : output1[0],
'ActivityOfLayer3' : output2,
'ActivityOfLayer4' : output3,
'FilterOfLayer1' : layer0.W.get_value()[:, 0, :, :],
'FilterOfLayer2' : layer1.W.get_value()[:, 0, :, :],
'FilterOfLayer3' : layer2.W.get_value(),
'FilterOfLayer4' : layer3.W.get_value(),
'y_predict' : y})
return y
if __name__ == '__main__':
#when using shell, the first parameter is name of digit as .npy format
#the second and other parameters are the epochs to export
params = numpy.load('params.npy')
if sys.argv[1].find('.npy') != -1:
digit = numpy.load(sys.argv[1])
elif sys.argv[1].find('.txt') != -1:
digit = numpy.loadtxt(sys.argv[1])
size = [int(sys.argv[3]), int(sys.argv[4])]
epoch = []
for i in sys.argv[5:]:
epoch.append(int(i))
y = display(params, digit, epoch, sys.argv[2])
print 'classification result of ' + sys.argv[1] + ' is ' + str(y)
|
mpl-2.0
| -2,081,077,153,515,145,500
| 36.128342
| 172
| 0.525814
| false
| 3.490695
| false
| false
| false
|
umich-brcf-bioinf/Jacquard
|
jacquard/utils/command_validator.py
|
1
|
7653
|
"""Validates command preconditions.
Specifically checks that the command, arguments, and environment
(e.g. input/output directories or files) are consistent and plausible.
Each validation function evaluates a specific precondition.
Each function is allowed to:
* change the environment (e.g. create a dir)
* change the args (replace the original output dir with a new temp output dir)
* add arguments which may be required for sub-commands
* delegate to/interact with a sub-command
* raise a UsageException if things look problematic
"""
from __future__ import print_function, absolute_import, division
import errno
import glob
import os
import time
import jacquard.utils.utils as utils
_TEMP_WORKING_DIR_FORMAT = "jacquard.{}.{}.tmp"
def _actual_type(path):
if os.path.isdir(path):
return "directory"
else:
return "file"
def _build_collision_message(command, collisions):
total_collisions = len(collisions)
if total_collisions == 1:
return ("The {} command would "
"overwrite the existing file [{}]; review "
"command/output dir to avoid overwriting or "
"use the flag '--force'.").format(command,
collisions[0])
cutoff = 5
collision_list = ", ".join(collisions[0:min(cutoff, total_collisions)])
if total_collisions > cutoff:
omitted = total_collisions - cutoff
collision_list += ", ...({} file(s) omitted)".format(omitted)
return ("The {} command would "
"overwrite {} existing files [{}]; review "
"command/output dir to avoid overwriting or "
"use the flag '--force'.").format(command,
total_collisions,
collision_list)
def _check_input_correct_type(dummy, args):
module_name = args.subparser_name
input_path = args.input
required_type = args.required_input_type
actual_type = _actual_type(input_path)
if required_type != actual_type:
raise utils.UsageError(("The {} command requires a {} as "
"input, but the specified input [{}] is a {}. "
"Review inputs and try again.") \
.format(module_name,
required_type,
input_path,
actual_type))
def _check_input_exists(dummy, args):
if not os.path.exists(args.input):
raise utils.UsageError(("Specified input [{}] does not exist. Review "\
"inputs and try again.").format(args.input))
def _check_input_readable(dummy, args):
try:
if os.path.isdir(args.input):
os.listdir(args.input)
else:
open(args.input, "r").close()
except (OSError, IOError):
raise utils.UsageError(("Specified input [{}] cannot be read. Review "
"inputs and try again.").format(args.input))
def _check_output_correct_type(module_name, output_path, required_type):
actual_type = _actual_type(output_path)
if required_type != actual_type:
raise utils.UsageError(("The {} command outputs a {}, but the "
"specified output [{}] is a {}. "
"Review inputs and try again.")\
.format(module_name,
required_type,
output_path,
actual_type))
def _check_output_exists(dummy, args):
if os.path.exists(args.output_path):
_check_output_correct_type(args.subparser_name,
args.output_path,
args.required_output_type)
def _check_overwrite_existing_files(module, args):
output = args.output_path
if not os.path.isdir(output):
output = os.path.dirname(output)
existing_output_paths = sorted(glob.glob(os.path.join(output, "*")))
existing_output = set([os.path.basename(i) for i in existing_output_paths])
predicted_output = module.report_prediction(args)
collisions = sorted(list(existing_output.intersection(predicted_output)))
if collisions and not args.force:
message = _build_collision_message(args.subparser_name, collisions)
raise utils.UsageError(message)
def _check_there_will_be_output(module, args):
predicted_output = module.report_prediction(args)
if not predicted_output:
message = ("Executing the {} command with the input [{}] would not "
"create any output files. Review inputs and try again.")\
.format(args.subparser_name, args.input)
raise utils.UsageError(message)
def _check_valid_args(module, args):
module.validate_args(args)
def _create_temp_working_dir(dummy, args):
try:
_makepath(args.temp_working_dir)
if args.required_output_type == "directory":
_makepath(args.output)
except OSError:
parent_dir = os.path.dirname(args.temp_working_dir)
raise utils.UsageError(("Jacquard cannot write to output directory "
"[{}]. Review inputs and try again.")\
.format(parent_dir))
def _set_temp_working_dir(dummy, args):
original_output = args.original_output
required_output = args.required_output_type
abs_original_output = os.path.abspath(original_output)
pid = os.getpid()
microseconds_since_epoch = int(time.time() * 1000 * 1000)
dir_name = _TEMP_WORKING_DIR_FORMAT.format(str(pid),
str(microseconds_since_epoch))
place_temp_inside_output = True
if required_output == "file":
place_temp_inside_output = False
elif required_output == "directory" and not os.path.isdir(original_output):
place_temp_inside_output = False
if place_temp_inside_output:
base_dir = abs_original_output
temp_working_dir = os.path.join(base_dir, dir_name)
new_output = temp_working_dir
else:
base_dir = os.path.dirname(abs_original_output)
temp_working_dir = os.path.join(base_dir, dir_name)
new_output = os.path.join(temp_working_dir,
os.path.basename(abs_original_output))
args.temp_working_dir = temp_working_dir
args.output = new_output
def _makepath(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def _set_required_types(module, args):
(args.required_input_type,
args.required_output_type) = module.get_required_input_output_types()
def _set_output_paths(dummy, args):
args.original_output = args.output
args.output_path = os.path.abspath(args.original_output)
_VALIDATION_TASKS = [_set_output_paths,
_set_required_types,
_set_temp_working_dir,
_check_input_exists,
_check_input_readable,
_check_input_correct_type,
_check_output_exists,
_create_temp_working_dir,
_check_there_will_be_output,
_check_overwrite_existing_files,
_check_valid_args]
def preflight(command, args):
for validate in _VALIDATION_TASKS:
validate(command, args)
|
apache-2.0
| 766,562,585,724,645,900
| 38.448454
| 79
| 0.580818
| false
| 4.116729
| false
| false
| false
|
lptorres/noah-inasafe
|
web_api/third_party/simplejson/__init__.py
|
1
|
22985
|
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print(json.dumps("\"foo\bar"))
"\"foo\bar"
>>> print(json.dumps(u'\u1234'))
"\u1234"
>>> print(json.dumps('\\'))
"\\"
>>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
{"a": 0, "b": 0, "c": 0}
>>> from simplejson.compat import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> obj = [1,2,3,{'4': 5, '6': 7}]
>>> json.dumps(obj, separators=(',',':'), sort_keys=True)
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' '))
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from simplejson.compat import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 3 (char 2)
"""
from __future__ import absolute_import
__version__ = '3.3.0'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict', 'simple_first',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from .scanner import JSONDecodeError
from .decoder import JSONDecoder
from .encoder import JSONEncoder, JSONEncoderForHTML
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
from . import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from ._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
bigint_as_string=False,
item_sort_key=None,
for_json=False,
ignore_nan=False,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If *skipkeys* is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If *ensure_ascii* is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If *check_circular* is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If *allow_nan* is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the original JSON specification, instead of using
the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). See
*ignore_nan* for ECMA-262 compliant behavior.
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, *separators* should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
*encoding* is the character encoding for str instances, default is UTF-8.
*default(obj)* is a function that should return a serializable version
of obj or raise ``TypeError``. The default simply raises ``TypeError``.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *bigint_as_string* is true (default: ``False``), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise. Note that this is still a
lossy operation that will not round-trip correctly and should be used
sparingly.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precedence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
``null`` in compliance with the ECMA-262 specification. If true, this will
override *allow_nan*.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* or *for_json* instead
of subclassing whenever possible.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array
and not bigint_as_string and not item_sort_key
and not for_json and not ignore_nan and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, ``separators`` should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *bigint_as_string* is true (not the default), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precendence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
``null`` in compliance with the ECMA-262 specification. If true, this will
override *allow_nan*.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* instead of subclassing
whenever possible.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array
and not bigint_as_string and not sort_keys
and not item_sort_key and not for_json
and not ignore_nan and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
from . import decoder as dec
from . import encoder as enc
from . import scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def simple_first(kv):
"""Helper function to pass to item_sort_key to sort simple
elements to the top, then container elements.
"""
return (isinstance(kv[1], (list, dict, tuple)), kv[0])
|
gpl-3.0
| 5,512,813,960,546,909,000
| 40.02011
| 79
| 0.641157
| false
| 3.982155
| false
| false
| false
|
cmacro/flaskblog
|
app/main/views.py
|
1
|
9124
|
from flask import render_template, redirect, url_for, abort, flash, request, current_app, make_response
from flask_login import login_required, current_user
from . import main
from ..models import User, Role, Permission, Post, Comment
from ..decorators import admin_required, permission_required
from .. import db
from .forms import EditProfileForm, EditProfileAdminForm, PostForm, CommentForm
@main.route('/', methods = ['GET', 'POST'])
def index():
form = PostForm()
if current_user.can(Permission.WRITE_ARTICLES) and \
form.validate_on_submit():
post = Post(body=form.body.data, author=current_user._get_current_object())
db.session.add(post)
return redirect(url_for('.index'))
show_followed = False
User.is_authenticated
if current_user.is_authenticated:
show_followed = bool(request.cookies.get('show_followed', ''))
if show_followed:
query = current_user.followed_posts
else:
query = Post.query
# 分页处理
page = request.args.get('page', 1, type=int)
pagination = query.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts= pagination.items
cfgtag=current_app.config['SQLALCHEMY_DATABASE_URI']
return render_template('index.html', form=form, posts=posts,
show_followed=show_followed, pagination=pagination, cfgtag=cfgtag)
def show_index_resp(followed):
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', followed, max_age=30*24*60*60)
return resp
@main.route('/all')
@login_required
def show_all():
return show_index_resp('')
@main.route('/followed')
@login_required
def show_followed():
return show_index_resp('1')
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('user.html', user=user, posts=posts,
pagination=pagination)
@main.route('/edit-profile', methods = ['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
db.session.add(current_user)
flash('Your profile has been updated.')
return redirect(url_for('.user', username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', form = form)
@main.route('/edit-profile/<int:id>', methods = ['GET', 'POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
db.session.add(user)
flash('The profile has been updated.')
return redirect(url_for('.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
return render_template('edit_profile.html', form=form, user=user)
@main.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,
post=post,
author=current_user._get_current_object())
db.session.add(comment)
flash('Your comment has been published.')
return redirect(url_for('.post', id=post.id, page=-1))
page = request.args.get('page', 1, type=int)
if page == -1:
page = (post.comments.count() - 1) / current_app.config['FLASKY_COMMENTS_PER_PAGE'] + 1
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('post.html', posts=[post], form=form,
comments=comments, pagination=pagination)
@main.route('/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit(id):
post = Post.query.get_or_404(id)
if current_user != post.author and \
not current_user.can(Permission.ADMINISTER):
abort(403)
form = PostForm()
if form.validate_on_submit():
post.body = form.body.data
db.session.add(post)
flash('The post has been updated.')
return redirect(url_for('.post', id=post.id))
form.body.data = post.body
return render_template('edit_post.html', form=form)
@main.route('/follow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if current_user.is_following(user):
flash('You are already following this user.')
return redirect(url_for('.user', username=username))
current_user.follow(user)
flash('You are now following %s.' % username)
return redirect(url_for('.user', username=username))
@main.route('/unfollow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if not current_user.is_following(user):
flash('You are not following this user.')
return redirect(url_for('.user', username=username))
current_user.unfollow(user)
flash('You are not following %s anymore.' % username)
return redirect(url_for('.user', username=username))
@main.route('/followers/<username>')
def followers(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination= user.followers.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.follower, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title='Followers of',
endpoint='.followers', pagination=pagination, follows=follows)
@main.route('/followed-by/<username>')
def followed_by(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followed.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.followed, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followed by",
endpoint='.followed_by', pagination=pagination,
follows=follows)
@main.route('/moderate')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('moderate.html', comments=comments, pagination=pagination, page=page)
@main.route('/moderate/enable/<int:id>')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate_enable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = False
db.session.add(comment)
return redirect(url_for('.moderate', page=request.args.get('page', 1, type=int)))
@main.route('/moderate/disable/<int:id>')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate_disable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = True
db.session.add(comment)
return redirect(url_for('.moderate', page=request.args.get('page', 1, type=int)))
|
mit
| -2,767,377,335,529,196,500
| 36.208163
| 103
| 0.658183
| false
| 3.591805
| true
| false
| false
|
nagyistoce/geokey
|
geokey/contributions/serializers.py
|
1
|
21039
|
import requests
import tempfile
from django.core import files
from django.core.exceptions import PermissionDenied, ValidationError
from easy_thumbnails.files import get_thumbnailer
from rest_framework import serializers
from rest_framework_gis import serializers as geoserializers
from rest_framework.serializers import BaseSerializer
from geokey.categories.serializer import CategorySerializer
from geokey.categories.models import Category
from geokey.users.serializers import UserSerializer
from .models import Observation, Comment
from .models import Location
from .models import MediaFile, ImageFile, VideoFile
class LocationSerializer(geoserializers.GeoFeatureModelSerializer):
"""
Serialiser for geokey.contribtions.models.Location
"""
class Meta:
model = Location
geo_field = 'geometry'
fields = ('id', 'name', 'description', 'status', 'created_at')
write_only_fields = ('status',)
class LocationContributionSerializer(serializers.ModelSerializer):
"""
Serialiser for `Location`; to be used within `ContributionSerializer`.
"""
class Meta:
model = Location
fields = ('id', 'name', 'description', 'status', 'created_at',
'geometry', 'private', 'private_for_project')
write_only_fields = ('status', 'private', 'private_for_project')
def create(self, validated_data):
"""
Creates a new contribution from `validated_data`
Parameter
---------
validated_data : dict
Input data after validation
Returns
-------
Location
"""
validated_data['creator'] = self.context.get('user')
return super(
LocationContributionSerializer,
self
).create(validated_data)
class ContributionSerializer(BaseSerializer):
"""
Serialiser for geokey.contribtions.models.Observations. This is a custom
serialiser, not a standard ModelSerializer
"""
@classmethod
def many_init(cls, *args, **kwargs):
"""
Is called when many=True property is set when instantiating the
serialiser.
"""
kwargs['context']['many'] = True
return super(ContributionSerializer, cls).many_init(*args, **kwargs)
def validate_category(self, project, category_id):
"""
Validates if the category can be used with the project
Parameters
----------
project : geokey.projects.models.Project
Project that the category is used for
category_id : int
identifies the category in the database
Returns
-------
geokey.categories.models.Category
The valid category
"""
errors = []
category = None
try:
category = project.categories.get(pk=category_id)
if category.status == 'inactive':
errors.append('The category can not be used because it is '
'inactive.')
else:
self._validated_data['meta']['category'] = category
except Category.DoesNotExist:
errors.append('The category can not be used with the project '
'or does not exist.')
if errors:
self._errors['category'] = errors
return category
def replace_null(self, properties):
"""
Replaces all empty str or unicode values with None and returns the
properies dict
Parameter
---------
properties : dict
Contribution properties
Returns
-------
dict
Contribution properties with replaced null values
"""
for key, value in properties.iteritems():
if isinstance(value, (str, unicode)) and len(value) == 0:
properties[key] = None
return properties
def validate_properties(self, properties, category=None, status=None):
"""
Validates the properties and adds error messages to self._errors
Parameter
---------
properties : dict
Contribution properties
category : geokey.categories.models.Category
Category the properties are validated against
status : str
Status for the contribution
"""
errors = []
if self.instance:
status = status or self.instance.status
if self.instance.properties:
update = self.instance.properties.copy()
update.update(properties)
properties = update
else:
status = status or category.default_status
properties = self.replace_null(properties)
try:
if status == 'draft':
Observation.validate_partial(category, properties)
else:
Observation.validate_full(category, properties)
except ValidationError, e:
errors.append(e)
self._validated_data['properties'] = properties
self._validated_data['meta']['status'] = status
if errors:
self._errors['properties'] = errors
def validate_location(self, project, location_id):
"""
Validates if the location can be used with the project
Parameters
----------
project : geokey.projects.models.Project
Project that the category is used for
location_id : int
identifies the location in the database
"""
errors = []
self.location = None
try:
if location_id is not None:
self.location = Location.objects.get_single(
self.context.get('user'),
project.id,
location_id
)
except PermissionDenied, error:
errors.append(error)
except Location.DoesNotExist, error:
errors.append(error)
if errors:
self._errors['location'] = errors
def is_valid(self, raise_exception=False):
"""
Checks if the contribution that is deserialised is valid. Validates
location, category and properties.
Parameter
---------
raise_exception : Boolean
indicates if an exeption should be raised if the data is invalid.
If set to false, this method will return False if the data is
invalid.
Returns
-------
Boolean
indicating if data is valid
Raises
------
ValidationError
If data is invalid. Exception is raised when raise_exception is set
tp True.
"""
self._errors = {}
self._validated_data = self.initial_data
project = self.context.get('project')
meta = self.initial_data.get('meta')
if meta is None:
self._validated_data['meta'] = dict()
# Validate location
location_id = None
if self.initial_data.get('location') is not None:
location_id = self.initial_data.get('location').get('id')
self.validate_location(project, location_id)
# Validate category
category = None
if self.instance is None and meta is not None:
category = self.validate_category(project, meta.get('category'))
else:
category = self.instance.category
self._validated_data['meta']['category'] = category
# Validatie properties
properties = self.initial_data.get('properties') or {}
status = None
if meta is not None:
status = meta.get('status', None)
if properties is not None and category is not None:
self.validate_properties(
properties,
category=category,
status=status
)
# raise the exception
if self._errors and raise_exception:
raise ValidationError(self._errors)
return not bool(self._errors)
def create(self, validated_data):
"""
Creates a new observation and returns the instance.
Parameter
---------
validated_data : dict
the data dict after validation
Returns
-------
geokey.contributions.models.Observation
The instance created
"""
project = self.context.get('project')
meta = validated_data.pop('meta')
location_serializer = LocationContributionSerializer(
self.location,
data=validated_data.pop('location', None),
context=self.context
)
if location_serializer.is_valid():
location_serializer.save()
self.instance = Observation.create(
properties=validated_data.get('properties'),
creator=self.context.get('user'),
location=location_serializer.instance,
project=project,
category=meta.get('category'),
status=meta.pop('status', None)
)
return self.instance
def update(self, instance, validated_data):
"""
Updates an existing observation and returns the instance.
Parameter
---------
instance : geokey.contributions.models.Observation
the instance to be updated
validated_data : dict
the data dict after validation
Returns
-------
geokey.contributions.models.Observation
The instance update
"""
meta = validated_data.get('meta')
status = None
if meta is not None:
status = meta.get('status', None)
location_serializer = LocationContributionSerializer(
instance.location,
data=validated_data.pop('location', {}),
context=self.context,
partial=True
)
if location_serializer.is_valid():
location_serializer.save()
return instance.update(
properties=validated_data.get('properties'),
updator=self.context.get('user'),
status=status
)
def get_display_field(self, obj):
"""
Returns a native representation of the display_field property.
Parameter
---------
obj : geokey.contributions.models.Observation
The instance that is serialised
Returns
-------
dict
serialised display_field; e.g.
{
'key': 'field_key',
'value': 'The value of the field'
}
"""
if obj.display_field is not None:
display_field = obj.display_field.split(':', 1)
value = display_field[1] if display_field[1] != 'None' else None
return {
'key': display_field[0],
'value': value
}
else:
return None
def get_search_result(self, obj, q):
"""
Returns all fields which values have matched a search query
Parameter
---------
obj : geokey.contributions.models.Observation
The instance that is serialised
q : str
The query string of the search
Return
------
dict
the field that matched the query, e.g.
{
'field_key_1': 'value 1',
'field_key_2': 'value 2',
}
"""
search_matches = {}
matcher = obj.search_matches.split('#####')
for field in matcher:
match = field.split(':', 1)
if q.lower() in match[1].lower():
search_matches[match[0]] = match[1]
return search_matches
def to_representation(self, obj):
"""
Returns the native representation of a contribution
Parameter
---------
obj : geokey.contributions.models.Observation
The instance that is serialised
Returns
-------
dict
Native represenation of the Contribution
"""
location = obj.location
isowner = False
if not self.context.get('user').is_anonymous():
isowner = obj.creator == self.context.get('user')
updator = None
if obj.updator is not None:
updator = {
'id': obj.updator.id,
'display_name': obj.updator.display_name
}
feature = {
'id': obj.id,
'properties': obj.properties,
'display_field': self.get_display_field(obj),
'meta': {
'status': obj.status,
'creator': {
'id': obj.creator.id,
'display_name': obj.creator.display_name
},
'updator': updator,
'created_at': str(obj.created_at),
'updated_at': str(obj.updated_at),
'version': obj.version,
'isowner': isowner,
'num_media': obj.num_media,
'num_comments': obj.num_comments
},
'location': {
'id': location.id,
'name': location.name,
'description': location.description,
'geometry': location.geometry.geojson
}
}
if self.context.get('many'):
cat = obj.category
feature['meta']['category'] = {
'id': cat.id,
'name': cat.name,
'description': cat.description,
'symbol': cat.symbol.url if cat.symbol else None,
'colour': cat.colour
}
q = self.context.get('search')
if q is not None:
feature['search_matches'] = self.get_search_result(obj, q)
else:
category_serializer = CategorySerializer(
obj.category, context=self.context)
feature['meta']['category'] = category_serializer.data
comment_serializer = CommentSerializer(
obj.comments.filter(respondsto=None),
many=True,
context=self.context
)
feature['comments'] = comment_serializer.data
review_serializer = CommentSerializer(
obj.comments.filter(review_status='open'),
many=True,
context=self.context
)
feature['review_comments'] = review_serializer.data
file_serializer = FileSerializer(
obj.files_attached.all(),
many=True,
context=self.context
)
feature['media'] = file_serializer.data
return feature
class CommentSerializer(serializers.ModelSerializer):
"""
Serialiser for geokey.contributions.models.Comment
"""
creator = UserSerializer(fields=('id', 'display_name'), read_only=True)
isowner = serializers.SerializerMethodField()
class Meta:
model = Comment
fields = ('id', 'respondsto', 'created_at', 'text', 'isowner',
'creator', 'review_status')
read_only = ('id', 'respondsto', 'created_at')
def to_representation(self, obj):
"""
Returns native represenation of the Comment. Adds reponses to comment
Parameter
---------
obj : geokey.contributions.models.Comment
The instance that is serialised
Returns
-------
dict
Native represenation of the Comment
"""
native = super(CommentSerializer, self).to_representation(obj)
native['responses'] = CommentSerializer(
obj.responses.all(),
many=True,
context=self.context
).data
return native
def get_isowner(self, comment):
"""
Returns True if the user serialising the Comment has created the
comment
Parameter
---------
comment : geokey.contributions.models.Comment
The instance that is serialised
Returns
-------
Boolean
indicating of user is creator of comment
"""
if not self.context.get('user').is_anonymous():
return comment.creator == self.context.get('user')
else:
return False
class FileSerializer(serializers.ModelSerializer):
"""
Serialiser for geokey.contributions.models.MediaFile instances
"""
creator = UserSerializer(fields=('id', 'display_name'))
isowner = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
file_type = serializers.SerializerMethodField()
thumbnail_url = serializers.SerializerMethodField()
class Meta:
model = MediaFile
fields = (
'id', 'name', 'description', 'created_at', 'creator', 'isowner',
'url', 'thumbnail_url', 'file_type'
)
def get_file_type(self, obj):
"""
Returns the type of the MediaFile
Parameter
---------
obj : geokey.contributions.models.MediaFile
The instance that is serialised
Returns
-------
str
The type of the, e.g. 'ImageFile'
"""
return obj.type_name
def get_isowner(self, obj):
"""
Returns `True` if the user provided in the serializer context is the
creator of this file
Parameter
---------
obj : geokey.contributions.models.MediaFile
The instance that is serialised
Returns
-------
Boolean
indicating if user created the file
"""
if not self.context.get('user').is_anonymous():
return obj.creator == self.context.get('user')
else:
return False
def get_url(self, obj):
"""
Return the url to access this file based on its file type
Parameter
---------
obj : geokey.contributions.models.MediaFile
The instance that is serialised
Returns
-------
str
The URL to embed the file on client side
"""
if isinstance(obj, ImageFile):
return obj.image.url
elif isinstance(obj, VideoFile):
return obj.youtube_link
def _get_thumb(self, image, size=(300, 300)):
"""
Returns the thumbnail of the media file base on the size privoded
Parameter
---------
image : Image
The image to be thumbnailed
size : tuple
width and height of the thumbnail, defaults to 300 by 300
Returns
-------
Image
The thumbnail
"""
thumbnailer = get_thumbnailer(image)
thumb = thumbnailer.get_thumbnail({
'crop': True,
'size': size
})
return thumb
def get_thumbnail_url(self, obj):
"""
Creates and returns a thumbnail for the MediaFile object
Parameter
---------
obj : geokey.contributions.models.MediaFile
The instance that is serialised
Returns
-------
str
The url to embed thumbnails on client side
"""
if isinstance(obj, ImageFile):
# Some of the imported image files in the original community maps
# seem to be broken. The error thrown when the image can not be
# read is caught here.
try:
return self._get_thumb(obj.image).url
except IOError:
return ''
elif isinstance(obj, VideoFile):
if obj.thumbnail:
# thumbnail has been downloaded, return the link
return self._get_thumb(obj.thumbnail).url
request = requests.get(
'http://img.youtube.com/vi/%s/0.jpg' % obj.youtube_id,
stream=True
)
if request.status_code != requests.codes.ok:
# Image not found, return placeholder thumbnail
return '/static/img/play.png'
lf = tempfile.NamedTemporaryFile()
# Read the streamed image in sections
for block in request.iter_content(1024 * 8):
# If no more file then stop
if not block:
break
# Write image block to temporary file
lf.write(block)
file_name = obj.youtube_id + '.jpg'
obj.thumbnail.save(file_name, files.File(lf))
from PIL import Image
w, h = Image.open(obj.thumbnail).size
thumb = self._get_thumb(obj.thumbnail, size=(h, h))
obj.thumbnail.save(file_name, thumb)
return self._get_thumb(obj.thumbnail).url
|
apache-2.0
| -2,704,248,957,482,316,000
| 28.884943
| 79
| 0.547032
| false
| 4.916803
| false
| false
| false
|
davehunt/selenium
|
py/selenium/webdriver/ie/webdriver.py
|
1
|
3912
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from selenium.webdriver.common import utils
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .service import Service
from .options import Options
DEFAULT_TIMEOUT = 30
DEFAULT_PORT = 0
DEFAULT_HOST = None
DEFAULT_LOG_LEVEL = None
DEFAULT_SERVICE_LOG_PATH = None
class WebDriver(RemoteWebDriver):
""" Controls the IEServerDriver and allows you to drive Internet Explorer """
def __init__(self, executable_path='IEDriverServer.exe', capabilities=None,
port=DEFAULT_PORT, timeout=DEFAULT_TIMEOUT, host=DEFAULT_HOST,
log_level=DEFAULT_LOG_LEVEL, service_log_path=DEFAULT_SERVICE_LOG_PATH, options=None,
ie_options=None, desired_capabilities=None, log_file=None):
"""
Creates a new instance of the chrome driver.
Starts the service and then creates new instance of chrome driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- capabilities: capabilities Dictionary object
- port - port you would like the service to run, if left as 0, a free port will be found.
- log_level - log level you would like the service to run.
- service_log_path - target of logging of service, may be "stdout", "stderr" or file path.
- options: IE Options instance, providing additional IE options
- desired_capabilities: alias of capabilities; this will make the signature consistent with RemoteWebDriver.
"""
if log_file:
warnings.warn('use service_log_path instead of log_file', DeprecationWarning)
service_log_path = log_file
if ie_options:
warnings.warn('use options instead of ie_options', DeprecationWarning)
options = ie_options
self.port = port
if self.port == 0:
self.port = utils.free_port()
self.host = host
# If both capabilities and desired capabilities are set, ignore desired capabilities.
if capabilities is None and desired_capabilities:
capabilities = desired_capabilities
if options is None:
if capabilities is None:
capabilities = self.create_options().to_capabilities()
else:
if capabilities is None:
capabilities = options.to_capabilities()
else:
# desired_capabilities stays as passed in
capabilities.update(options.to_capabilities())
self.iedriver = Service(
executable_path,
port=self.port,
host=self.host,
log_level=log_level,
log_file=service_log_path)
self.iedriver.start()
RemoteWebDriver.__init__(
self,
command_executor='http://localhost:%d' % self.port,
desired_capabilities=capabilities)
self._is_remote = False
def quit(self):
RemoteWebDriver.quit(self)
self.iedriver.stop()
def create_options(self):
return Options()
|
apache-2.0
| -8,441,446,402,448,930,000
| 39.329897
| 117
| 0.665644
| false
| 4.554133
| false
| false
| false
|
ioam/holoviews
|
holoviews/core/data/grid.py
|
1
|
28568
|
from __future__ import absolute_import
import sys
import datetime as dt
from collections import OrderedDict, defaultdict, Iterable
try:
import itertools.izip as zip
except ImportError:
pass
import numpy as np
from .dictionary import DictInterface
from .interface import Interface, DataError
from ..dimension import dimension_name
from ..element import Element
from ..dimension import OrderedDict as cyODict
from ..ndmapping import NdMapping, item_check, sorted_context
from .. import util
from .interface import is_dask, dask_array_module, get_array_types
class GridInterface(DictInterface):
"""
Interface for simple dictionary-based dataset format using a
compressed representation that uses the cartesian product between
key dimensions. As with DictInterface, the dictionary keys correspond
to the column (i.e dimension) names and the values are NumPy arrays
representing the values in that column.
To use this compressed format, the key dimensions must be orthogonal
to one another with each key dimension specifying an axis of the
multidimensional space occupied by the value dimension data. For
instance, given an temperature recordings sampled regularly across
the earth surface, a list of N unique latitudes and M unique
longitudes can specify the position of NxM temperature samples.
"""
types = (dict, OrderedDict, cyODict)
datatype = 'grid'
gridded = True
@classmethod
def init(cls, eltype, data, kdims, vdims):
if kdims is None:
kdims = eltype.kdims
if vdims is None:
vdims = eltype.vdims
if not vdims:
raise ValueError('GridInterface interface requires at least '
'one value dimension.')
ndims = len(kdims)
dimensions = [dimension_name(d) for d in kdims+vdims]
if isinstance(data, tuple):
data = {d: v for d, v in zip(dimensions, data)}
elif isinstance(data, list) and data == []:
data = OrderedDict([(d, []) for d in dimensions])
elif not any(isinstance(data, tuple(t for t in interface.types if t is not None))
for interface in cls.interfaces.values()):
data = {k: v for k, v in zip(dimensions, zip(*data))}
elif isinstance(data, np.ndarray):
if data.ndim == 1:
if eltype._auto_indexable_1d and len(kdims)+len(vdims)>1:
data = np.column_stack([np.arange(len(data)), data])
else:
data = np.atleast_2d(data).T
data = {k: data[:,i] for i,k in enumerate(dimensions)}
elif isinstance(data, list) and data == []:
data = {d: np.array([]) for d in dimensions[:ndims]}
data.update({d: np.empty((0,) * ndims) for d in dimensions[ndims:]})
elif not isinstance(data, dict):
raise TypeError('GridInterface must be instantiated as a '
'dictionary or tuple')
for dim in kdims+vdims:
name = dimension_name(dim)
if name not in data:
raise ValueError("Values for dimension %s not found" % dim)
if not isinstance(data[name], get_array_types()):
data[name] = np.array(data[name])
kdim_names = [dimension_name(d) for d in kdims]
vdim_names = [dimension_name(d) for d in vdims]
expected = tuple([len(data[kd]) for kd in kdim_names])
irregular_shape = data[kdim_names[0]].shape if kdim_names else ()
valid_shape = irregular_shape if len(irregular_shape) > 1 else expected[::-1]
shapes = tuple([data[kd].shape for kd in kdim_names])
for vdim in vdim_names:
shape = data[vdim].shape
error = DataError if len(shape) > 1 else ValueError
if (not expected and shape == (1,)) or (len(set((shape,)+shapes)) == 1 and len(shape) > 1):
# If empty or an irregular mesh
pass
elif len(shape) != len(expected):
raise error('The shape of the %s value array does not '
'match the expected dimensionality indicated '
'by the key dimensions. Expected %d-D array, '
'found %d-D array.' % (vdim, len(expected), len(shape)))
elif any((s!=e and (s+1)!=e) for s, e in zip(shape, valid_shape)):
raise error('Key dimension values and value array %s '
'shapes do not match. Expected shape %s, '
'actual shape: %s' % (vdim, valid_shape, shape), cls)
return data, {'kdims':kdims, 'vdims':vdims}, {}
@classmethod
def concat(cls, datasets, dimensions, vdims):
from . import Dataset
with sorted_context(False):
datasets = NdMapping(datasets, kdims=dimensions)
datasets = datasets.clone([(k, v.data if isinstance(v, Dataset) else v)
for k, v in datasets.data.items()])
if len(datasets.kdims) > 1:
items = datasets.groupby(datasets.kdims[:-1]).data.items()
return cls.concat([(k, cls.concat(v, v.kdims, vdims=vdims)) for k, v in items],
datasets.kdims[:-1], vdims)
return cls.concat_dim(datasets, datasets.kdims[0], vdims)
@classmethod
def concat_dim(cls, datasets, dim, vdims):
values, grids = zip(*datasets.items())
new_data = {k: v for k, v in grids[0].items() if k not in vdims}
new_data[dim.name] = np.array(values)
for vdim in vdims:
arrays = [grid[vdim.name] for grid in grids]
shapes = set(arr.shape for arr in arrays)
if len(shapes) > 1:
raise DataError('When concatenating gridded data the shape '
'of arrays must match. %s found that arrays '
'along the %s dimension do not match.' %
(cls.__name__, vdim.name))
stack = dask_array_module().stack if any(is_dask(arr) for arr in arrays) else np.stack
new_data[vdim.name] = stack(arrays, -1)
return new_data
@classmethod
def irregular(cls, dataset, dim):
return dataset.data[dimension_name(dim)].ndim > 1
@classmethod
def isscalar(cls, dataset, dim):
values = cls.values(dataset, dim, expanded=False)
return values.shape in ((), (1,)) or len(np.unique(values)) == 1
@classmethod
def validate(cls, dataset, vdims=True):
Interface.validate(dataset, vdims)
@classmethod
def dimension_type(cls, dataset, dim):
if dim in dataset.dimensions():
arr = cls.values(dataset, dim, False, False)
else:
return None
return arr.dtype.type
@classmethod
def shape(cls, dataset, gridded=False):
shape = dataset.data[dataset.vdims[0].name].shape
if gridded:
return shape
else:
return (np.product(shape, dtype=np.intp), len(dataset.dimensions()))
@classmethod
def length(cls, dataset):
return cls.shape(dataset)[0]
@classmethod
def _infer_interval_breaks(cls, coord, axis=0):
"""
>>> GridInterface._infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
>>> GridInterface._infer_interval_breaks([[0, 1], [3, 4]], axis=1)
array([[-0.5, 0.5, 1.5],
[ 2.5, 3.5, 4.5]])
"""
coord = np.asarray(coord)
if sys.version_info.major == 2 and len(coord) and isinstance(coord[0], (dt.datetime, dt.date)):
# np.diff does not work on datetimes in python 2
coord = coord.astype('datetime64')
deltas = 0.5 * np.diff(coord, axis=axis)
first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis)
last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis)
trim_last = tuple(slice(None, -1) if n == axis else slice(None)
for n in range(coord.ndim))
return np.concatenate([first, coord[trim_last] + deltas, last], axis=axis)
@classmethod
def coords(cls, dataset, dim, ordered=False, expanded=False, edges=False):
"""
Returns the coordinates along a dimension. Ordered ensures
coordinates are in ascending order and expanded creates
ND-array matching the dimensionality of the dataset.
"""
dim = dataset.get_dimension(dim, strict=True)
irregular = cls.irregular(dataset, dim)
if irregular or expanded:
if irregular:
data = dataset.data[dim.name]
else:
data = util.expand_grid_coords(dataset, dim)
if edges and data.shape == dataset.data[dataset.vdims[0].name].shape:
data = cls._infer_interval_breaks(data, axis=1)
data = cls._infer_interval_breaks(data, axis=0)
return data
data = dataset.data[dim.name]
if ordered and np.all(data[1:] < data[:-1]):
data = data[::-1]
shape = cls.shape(dataset, True)
if dim in dataset.kdims:
idx = dataset.get_dimension_index(dim)
isedges = (dim in dataset.kdims and len(shape) == dataset.ndims
and len(data) == (shape[dataset.ndims-idx-1]+1))
else:
isedges = False
if edges and not isedges:
data = cls._infer_interval_breaks(data)
elif not edges and isedges:
data = data[:-1] + np.diff(data)/2.
return data
@classmethod
def canonicalize(cls, dataset, data, data_coords=None, virtual_coords=[]):
"""
Canonicalize takes an array of values as input and reorients
and transposes it to match the canonical format expected by
plotting functions. In certain cases the dimensions defined
via the kdims of an Element may not match the dimensions of
the underlying data. A set of data_coords may be passed in to
define the dimensionality of the data, which can then be used
to np.squeeze the data to remove any constant dimensions. If
the data is also irregular, i.e. contains multi-dimensional
coordinates, a set of virtual_coords can be supplied, required
by some interfaces (e.g. xarray) to index irregular datasets
with a virtual integer index. This ensures these coordinates
are not simply dropped.
"""
if data_coords is None:
data_coords = dataset.dimensions('key', label='name')[::-1]
# Transpose data
dims = [name for name in data_coords
if isinstance(cls.coords(dataset, name), get_array_types())]
dropped = [dims.index(d) for d in dims
if d not in dataset.kdims+virtual_coords]
if dropped:
data = np.squeeze(data, axis=tuple(dropped))
if not any(cls.irregular(dataset, d) for d in dataset.kdims):
inds = [dims.index(kd.name) for kd in dataset.kdims]
inds = [i - sum([1 for d in dropped if i>=d]) for i in inds]
if inds:
data = data.transpose(inds[::-1])
# Reorient data
invert = False
slices = []
for d in dataset.kdims[::-1]:
coords = cls.coords(dataset, d)
if np.all(coords[1:] < coords[:-1]) and not coords.ndim > 1:
slices.append(slice(None, None, -1))
invert = True
else:
slices.append(slice(None))
data = data[tuple(slices)] if invert else data
# Allow lower dimensional views into data
if len(dataset.kdims) < 2:
data = data.flatten()
return data
@classmethod
def invert_index(cls, index, length):
if np.isscalar(index):
return length - index
elif isinstance(index, slice):
start, stop = index.start, index.stop
new_start, new_stop = None, None
if start is not None:
new_stop = length - start
if stop is not None:
new_start = length - stop
return slice(new_start-1, new_stop-1)
elif isinstance(index, Iterable):
new_index = []
for ind in index:
new_index.append(length-ind)
return new_index
@classmethod
def ndloc(cls, dataset, indices):
selected = {}
adjusted_inds = []
all_scalar = True
for i, (kd, ind) in enumerate(zip(dataset.kdims[::-1], indices)):
coords = cls.coords(dataset, kd.name, True)
if np.isscalar(ind):
ind = [ind]
else:
all_scalar = False
selected[kd.name] = coords[ind]
adjusted_inds.append(ind)
for kd in dataset.kdims:
if kd.name not in selected:
coords = cls.coords(dataset, kd.name)
selected[kd.name] = coords
all_scalar = False
for d in dataset.dimensions():
if d in dataset.kdims and not cls.irregular(dataset, d):
continue
arr = cls.values(dataset, d, flat=False, compute=False)
if all_scalar and len(dataset.vdims) == 1:
return arr[tuple(ind[0] for ind in adjusted_inds)]
selected[d.name] = arr[tuple(adjusted_inds)]
return tuple(selected[d.name] for d in dataset.dimensions())
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True, compute=True):
dim = dataset.get_dimension(dim, strict=True)
if dim in dataset.vdims or dataset.data[dim.name].ndim > 1:
data = dataset.data[dim.name]
data = cls.canonicalize(dataset, data)
da = dask_array_module()
if compute and da and isinstance(data, da.Array):
data = data.compute()
return data.T.flatten() if flat else data
elif expanded:
data = cls.coords(dataset, dim.name, expanded=True)
return data.T.flatten() if flat else data
else:
return cls.coords(dataset, dim.name, ordered=True)
@classmethod
def groupby(cls, dataset, dim_names, container_type, group_type, **kwargs):
# Get dimensions information
dimensions = [dataset.get_dimension(d, strict=True) for d in dim_names]
if 'kdims' in kwargs:
kdims = kwargs['kdims']
else:
kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions]
kwargs['kdims'] = kdims
invalid = [d for d in dimensions if dataset.data[d.name].ndim > 1]
if invalid:
if len(invalid) == 1: invalid = "'%s'" % invalid[0]
raise ValueError("Cannot groupby irregularly sampled dimension(s) %s."
% invalid)
# Update the kwargs appropriately for Element group types
group_kwargs = {}
group_type = dict if group_type == 'raw' else group_type
if issubclass(group_type, Element):
group_kwargs.update(util.get_param_values(dataset))
else:
kwargs.pop('kdims')
group_kwargs.update(kwargs)
drop_dim = any(d not in group_kwargs['kdims'] for d in kdims)
# Find all the keys along supplied dimensions
keys = [cls.coords(dataset, d.name) for d in dimensions]
transpose = [dataset.ndims-dataset.kdims.index(kd)-1 for kd in kdims]
transpose += [i for i in range(dataset.ndims) if i not in transpose]
# Iterate over the unique entries applying selection masks
grouped_data = []
for unique_key in zip(*util.cartesian_product(keys)):
select = dict(zip(dim_names, unique_key))
if drop_dim:
group_data = dataset.select(**select)
group_data = group_data if np.isscalar(group_data) else group_data.columns()
else:
group_data = cls.select(dataset, **select)
if np.isscalar(group_data) or (isinstance(group_data, get_array_types()) and group_data.shape == ()):
group_data = {dataset.vdims[0].name: np.atleast_1d(group_data)}
for dim, v in zip(dim_names, unique_key):
group_data[dim] = np.atleast_1d(v)
elif not drop_dim:
if isinstance(group_data, get_array_types()):
group_data = {dataset.vdims[0].name: group_data}
for vdim in dataset.vdims:
data = group_data[vdim.name]
data = data.transpose(transpose[::-1])
group_data[vdim.name] = np.squeeze(data)
group_data = group_type(group_data, **group_kwargs)
grouped_data.append((tuple(unique_key), group_data))
if issubclass(container_type, NdMapping):
with item_check(False):
return container_type(grouped_data, kdims=dimensions)
else:
return container_type(grouped_data)
@classmethod
def key_select_mask(cls, dataset, values, ind):
if isinstance(ind, tuple):
ind = slice(*ind)
if isinstance(ind, get_array_types()):
mask = ind
elif isinstance(ind, slice):
mask = True
if ind.start is not None:
mask &= ind.start <= values
if ind.stop is not None:
mask &= values < ind.stop
# Expand empty mask
if mask is True:
mask = np.ones(values.shape, dtype=np.bool)
elif isinstance(ind, (set, list)):
iter_slcs = []
for ik in ind:
iter_slcs.append(values == ik)
mask = np.logical_or.reduce(iter_slcs)
elif callable(ind):
mask = ind(values)
elif ind is None:
mask = None
else:
index_mask = values == ind
if (dataset.ndims == 1 or dataset._binned) and np.sum(index_mask) == 0:
data_index = np.argmin(np.abs(values - ind))
mask = np.zeros(len(values), dtype=np.bool)
mask[data_index] = True
else:
mask = index_mask
if mask is None:
mask = np.ones(values.shape, dtype=bool)
return mask
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
dimensions = dataset.kdims
val_dims = [vdim for vdim in dataset.vdims if vdim in selection]
if val_dims:
raise IndexError('Cannot slice value dimensions in compressed format, '
'convert to expanded format before slicing.')
indexed = cls.indexed(dataset, selection)
full_selection = [(d, selection.get(d.name, selection.get(d.label)))
for d in dimensions]
data = {}
value_select = []
for i, (dim, ind) in enumerate(full_selection):
irregular = cls.irregular(dataset, dim)
values = cls.coords(dataset, dim, irregular)
mask = cls.key_select_mask(dataset, values, ind)
if irregular:
if np.isscalar(ind) or isinstance(ind, (set, list)):
raise IndexError("Indexing not supported for irregularly "
"sampled data. %s value along %s dimension."
"must be a slice or 2D boolean mask."
% (ind, dim))
mask = mask.max(axis=i)
elif dataset._binned:
edges = cls.coords(dataset, dim, False, edges=True)
inds = np.argwhere(mask)
if np.isscalar(ind):
emin, emax = edges.min(), edges.max()
if ind < emin:
raise IndexError("Index %s less than lower bound "
"of %s for %s dimension." % (ind, emin, dim))
elif ind >= emax:
raise IndexError("Index %s more than or equal to upper bound "
"of %s for %s dimension." % (ind, emax, dim))
idx = max([np.digitize([ind], edges)[0]-1, 0])
mask = np.zeros(len(values), dtype=np.bool)
mask[idx] = True
values = edges[idx:idx+2]
elif len(inds):
values = edges[inds.min(): inds.max()+2]
else:
values = edges[0:0]
else:
values = values[mask]
values, mask = np.asarray(values), np.asarray(mask)
value_select.append(mask)
data[dim.name] = np.array([values]) if np.isscalar(values) else values
int_inds = [np.argwhere(v) for v in value_select][::-1]
index = np.ix_(*[np.atleast_1d(np.squeeze(ind)) if ind.ndim > 1 else np.atleast_1d(ind)
for ind in int_inds])
for kdim in dataset.kdims:
if cls.irregular(dataset, dim):
da = dask_array_module()
if da and isinstance(dataset.data[kdim.name], da.Array):
data[kdim.name] = dataset.data[kdim.name].vindex[index]
else:
data[kdim.name] = np.asarray(data[kdim.name])[index]
for vdim in dataset.vdims:
da = dask_array_module()
if da and isinstance(dataset.data[vdim.name], da.Array):
data[vdim.name] = dataset.data[vdim.name].vindex[index]
else:
data[vdim.name] = np.asarray(dataset.data[vdim.name])[index]
if indexed:
if len(dataset.vdims) == 1:
da = dask_array_module()
arr = np.squeeze(data[dataset.vdims[0].name])
if da and isinstance(arr, da.Array):
arr = arr.compute()
return arr if np.isscalar(arr) else arr[()]
else:
return np.array([np.squeeze(data[vd.name])
for vd in dataset.vdims])
return data
@classmethod
def sample(cls, dataset, samples=[]):
"""
Samples the gridded data into dataset of samples.
"""
ndims = dataset.ndims
dimensions = dataset.dimensions(label='name')
arrays = [dataset.data[vdim.name] for vdim in dataset.vdims]
data = defaultdict(list)
for sample in samples:
if np.isscalar(sample): sample = [sample]
if len(sample) != ndims:
sample = [sample[i] if i < len(sample) else None
for i in range(ndims)]
sampled, int_inds = [], []
for d, ind in zip(dimensions, sample):
cdata = dataset.data[d]
mask = cls.key_select_mask(dataset, cdata, ind)
inds = np.arange(len(cdata)) if mask is None else np.argwhere(mask)
int_inds.append(inds)
sampled.append(cdata[mask])
for d, arr in zip(dimensions, np.meshgrid(*sampled)):
data[d].append(arr)
for vdim, array in zip(dataset.vdims, arrays):
da = dask_array_module()
flat_index = np.ravel_multi_index(tuple(int_inds)[::-1], array.shape)
if da and isinstance(array, da.Array):
data[vdim.name].append(array.flatten().vindex[tuple(flat_index)])
else:
data[vdim.name].append(array.flat[flat_index])
concatenated = {d: np.concatenate(arrays).flatten() for d, arrays in data.items()}
return concatenated
@classmethod
def aggregate(cls, dataset, kdims, function, **kwargs):
kdims = [dimension_name(kd) for kd in kdims]
data = {kdim: dataset.data[kdim] for kdim in kdims}
axes = tuple(dataset.ndims-dataset.get_dimension_index(kdim)-1
for kdim in dataset.kdims if kdim not in kdims)
da = dask_array_module()
dropped = []
for vdim in dataset.vdims:
values = dataset.data[vdim.name]
atleast_1d = da.atleast_1d if is_dask(values) else np.atleast_1d
try:
data[vdim.name] = atleast_1d(function(values, axis=axes, **kwargs))
except TypeError:
dropped.append(vdim)
return data, dropped
@classmethod
def reindex(cls, dataset, kdims, vdims):
dropped_kdims = [kd for kd in dataset.kdims if kd not in kdims]
dropped_vdims = ([vdim for vdim in dataset.vdims
if vdim not in vdims] if vdims else [])
constant = {}
for kd in dropped_kdims:
vals = cls.values(dataset, kd.name, expanded=False)
if len(vals) == 1:
constant[kd.name] = vals[0]
data = {k: values for k, values in dataset.data.items()
if k not in dropped_kdims+dropped_vdims}
if len(constant) == len(dropped_kdims):
joined_dims = kdims+dropped_kdims
axes = tuple(dataset.ndims-dataset.kdims.index(d)-1
for d in joined_dims)
dropped_axes = tuple(dataset.ndims-joined_dims.index(d)-1
for d in dropped_kdims)
for vdim in vdims:
vdata = data[vdim.name]
if len(axes) > 1:
vdata = vdata.transpose(axes[::-1])
if dropped_axes:
vdata = np.squeeze(vdata, axis=dropped_axes)
data[vdim.name] = vdata
return data
elif dropped_kdims:
return tuple(dataset.columns(kdims+vdims).values())
return data
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
if not vdim:
raise Exception("Cannot add key dimension to a dense representation.")
dim = dimension_name(dimension)
return dict(dataset.data, **{dim: values})
@classmethod
def sort(cls, dataset, by=[], reverse=False):
if not by or by in [dataset.kdims, dataset.dimensions()]:
return dataset.data
else:
raise Exception('Compressed format cannot be sorted, either instantiate '
'in the desired order or use the expanded format.')
@classmethod
def iloc(cls, dataset, index):
rows, cols = index
scalar = False
if np.isscalar(cols):
scalar = np.isscalar(rows)
cols = [dataset.get_dimension(cols, strict=True)]
elif isinstance(cols, slice):
cols = dataset.dimensions()[cols]
else:
cols = [dataset.get_dimension(d, strict=True) for d in cols]
if np.isscalar(rows):
rows = [rows]
new_data = []
for d in cols:
new_data.append(cls.values(dataset, d, compute=False)[rows])
if scalar:
da = dask_array_module()
if new_data and (da and isinstance(new_data[0], da.Array)):
return new_data[0].compute()[0]
return new_data[0][0]
return tuple(new_data)
@classmethod
def range(cls, dataset, dimension):
if dataset._binned and dimension in dataset.kdims:
expanded = cls.irregular(dataset, dimension)
column = cls.coords(dataset, dimension, expanded=expanded, edges=True)
else:
column = cls.values(dataset, dimension, expanded=False, flat=False)
da = dask_array_module()
if column.dtype.kind == 'M':
dmin, dmax = column.min(), column.max()
if da and isinstance(column, da.Array):
return da.compute(dmin, dmax)
return dmin, dmax
elif len(column) == 0:
return np.NaN, np.NaN
else:
try:
dmin, dmax = (np.nanmin(column), np.nanmax(column))
if da and isinstance(column, da.Array):
return da.compute(dmin, dmax)
return dmin, dmax
except TypeError:
column.sort()
return column[0], column[-1]
Interface.register(GridInterface)
|
bsd-3-clause
| 5,546,692,618,131,833,000
| 40.402899
| 113
| 0.555027
| false
| 4.048179
| false
| false
| false
|
whiteclover/dbpy
|
tests/pymysqlt.py
|
1
|
12224
|
#!/usr/bin/env python
# Copyright (C) 2014-2015 Thomas Huang
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import db
import logging
global config
config = {
'passwd': 'test',
'user': 'test',
'host': 'localhost',
'db': 'test'
}
def _create():
db.execute('DROP TABLE IF EXISTS `users`')
db.execute("""CREATE TABLE `users` (
`uid` int(10) unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(20),
PRIMARY KEY (`uid`))""")
class TestDBBase(unittest.TestCase):
def setUp(self):
setattr(db, '__db', {})
def test_dup_key(self):
db.setup(config,adapter='pymysql')
f = lambda: db.setup(config,adapter='pymysql')
self.assertRaises(db.DBError, f)
def test_invalid_key(self):
f = lambda: db.setup(config, key='dd.xx')
self.assertRaises(TypeError, f)
def test_database(self):
db.setup(config,adapter='pymysql')
self.assertEqual(db.database(), db.database('default', slave=True))
conns = getattr(db, '__db', [])
self.assertEqual(len(conns['default.slave']), 1)
db.setup(config, slave=True)
self.assertNotEqual(db.database(), db.database('default', slave=True))
conns = getattr(db, '__db', [])
self.assertEqual(len(conns['default.slave']), 1)
db.setup(config, slave=True)
conns = getattr(db, '__db', [])
self.assertEqual(len(conns['default.slave']), 2)
class TestBase(unittest.TestCase):
def setUp(self):
setattr(db, '__db', {})
db.setup(config,adapter='pymysql')
db.execute('DROP TABLE IF EXISTS `users`')
db.execute("""CREATE TABLE `users` (
`uid` int(10) unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(20) NOT NULL,
PRIMARY KEY (`uid`))""")
def test_query(self):
self.assertEqual(1, db.query('SELECT 1')[0][0])
self.assertEqual(0, len(db.query('SELECT * FROM users')))
def test_execute(self):
res = db.execute('INSERT INTO users VALUES(%s, %s)', [(10, 'execute_test'), (9, 'execute_test')])
self.assertTrue(res)
res = db.execute('DELETE FROM users WHERE name=%s', ('execute_test',))
self.assertEqual(res, 2)
def test_pool(self):
import threading
def q(n):
for i in range(10):
res = db.query('select count(*) from users')
self.assertEqual(0, res[0][0])
n = 50
ts = []
for i in range(n):
t = threading.Thread(target=q, args=(i,))
ts.append(t)
for t in ts:
t.start()
for t in ts:
t.join()
class TestMultilDB(unittest.TestCase):
def setUp(self):
setattr(db, '__db', {})
db.setup(config, key='test')
db.setup(config, key='test', slave=True)
db.execute('DROP TABLE IF EXISTS `users`', key='test')
db.execute("""CREATE TABLE `users` (
`uid` int(10) unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(20) NOT NULL,
PRIMARY KEY (`uid`))""", key='test')
rows = []
for _ in range(1, 10):
rows.append('(%d , "name_%d")' % (_, _))
db.execute('INSERT INTO users VALUES ' + ', '.join(rows), key='test')
def tearDown(self):
db.execute('DELETE FROM users', key='test')
def test_excute(self):
res = db.execute('insert into users values(%s, %s)', [(10L, 'thomas'), (11L, 'animer')], key='test')
res = db.query('SELECT count(*) FROM users WHERE uid>=10', key='test')
self.assertEqual(2, res[0][0])
def test_query(self):
res = db.query('select name from users limit 5', key='test')
self.assertEqual(len(res), 5)
res = db.query('select name from users limit %s', (100,), many=20, key='test')
rows = []
for r in res:
rows.append(r)
self.assertTrue(10, len(rows))
class TestSelectQuery(unittest.TestCase):
def setUp(self):
setattr(db, '__db', {})
db.setup(config,adapter='pymysql')
_create()
users = []
for i in range(1, 5):
users.append((i, 'user_' + str(i)))
users.append((5, None))
db.execute('INSERT INTO users VALUES(%s, %s)', users)
self.select = db.select('users')
def tearDown(self):
db.execute('delete from users')
def test_select_all(self):
self.assertEquals(len(self.select
.execute()), 5)
def test_select_as_dict(self):
res = self.select.condition('uid', 1).execute(as_dict=True)
self.assertEqual(len(res), 1)
self.assertEqual(type(res[0]), dict)
self.assertEqual(res[0]['uid'], 1)
def test_select_many(self):
res = (self.select.fields('*')
.execute(many=2))
rows = []
for row in res:
rows.append(row)
self.assertEquals(len(rows), 5)
def test_select_condition(self):
res = (self.select
.condition('name', 'user_1')
.condition('uid', 1)
.execute())
self.assertEquals(res[0][1], 'user_1')
def test_select_or_condition(self):
from db import or_
or_con = or_()
or_con.condition('name', 'user_1')
or_con.condition('name', 'user_2')
res = (self.select
.condition(or_con)
.execute())
self.assertEquals(res[0][1], 'user_1')
def test_select_like(self):
res = (self.select
.condition('name', 'user_%', 'like')
.execute())
self.assertEquals(len(res), 4)
def test_select_in(self):
res = (self.select.fields('*')
.condition('name', ['user_1', 'user_2'])
.execute())
self.assertEquals(res[0][1], 'user_1')
self.assertEquals(res[1][1], 'user_2')
def test_select_group_by(self):
self.assertEquals(len(self.select
.group_by('name', 'uid')
.execute()), 5)
def test_select_order_by_ASC(self):
self.assertEquals(len(self.select
.order_by('name')
.execute()), 5)
def test_select_order_by_DESC(self):
self.assertEquals(len(self.select
.order_by('name', 'DESC')
.execute()), 5)
def test_select_limit(self):
self.assertEquals(len(self.select.limit(2).execute()), 2)
def test_table_dot_condition(self):
res = self.select.condition('users.uid', 5).execute()
self.assertEqual(res[0][0], 5)
def test_is_null(self):
res = self.select.is_null('name').condition('uid', 5).execute()
self.assertEqual(res[0][0], 5)
def test_is_not_null(self):
self.assertEqual(len(self.select.is_not_null('uid').execute()), 5)
def test_expr(self):
from db import expr
res = self.select.fields(expr('count(*)')).execute()
self.assertEqual(res[0][0], 5)
res = db.select('users').fields(expr('count(uid)', 'total')).execute()
self.assertEqual(res[0][0], 5)
class TestUpdateQuery(unittest.TestCase):
def setUp(self):
setattr(db, '__db', {})
db.setup(config,adapter='pymysql')
_create()
users = []
for i in range(1, 6):
users.append((i, 'user_' + str(i)))
db.execute('delete from users')
db.execute('INSERT INTO users VALUES(%s, %s)', users)
self.update = db.update('users')
def tearDown(self):
db.execute('delete from users')
def test_update_on_name(self):
res = (self.update.
mset({'name':'update_test'})
.condition('name','user_1')
.execute())
self.assertEquals(res, 1)
def test_update_on_name_and_uid(self):
res = (self.update.
set('name', 'update_test')
.condition('name', 'user_2')
.condition('uid', 2)
.execute())
self.assertEquals(res, 1)
def test_update_not_exists(self):
res = (self.update.
mset({'name':'update', 'uid': 10})
.condition('name', 'not_exists')
.execute())
self.assertEquals(res, 0)
class TestInsertQuery(unittest.TestCase):
def setUp(self):
setattr(db, '__db', {})
db.setup(config,adapter='pymysql')
_create()
users = []
for i in range(1, 6):
users.append((i, 'user_' + str(i)))
db.execute('delete from users')
db.execute('INSERT INTO users VALUES(%s, %s)', users)
self.insert = db.insert('users')
self.select = db.select('users')
def tearDown(self):
db.execute('delete from users')
def test_insert(self):
res = self.insert.values((10, 'test_insert')).execute()
res = self.select.condition('name', 'test_insert').execute()
self.assertEqual(res[0][1], 'test_insert')
def test_insert_dict_values(self):
self.insert.fields('name').values({'name': 'insert_1'}).values(('insert_2',)).execute()
res = self.select.condition('name', ['insert_1', 'insert_2']).execute()
self.assertEqual(len(res), 2)
class TestDeleteQuery(unittest.TestCase):
def setUp(self):
setattr(db, '__db', {})
db.setup(config,adapter='pymysql')
_create()
users = []
for i in range(1, 6):
users.append((i, 'user_' + str(i)))
db.execute('INSERT INTO users VALUES(%s, %s)', users)
self.delete = db.delete('users')
def tearDown(self):
db.execute('delete from users')
def test_delete_by_uid(self):
res = self.delete.condition('uid', 1).execute()
self.assertEqual(res, 1)
def test_delete_by_condtions(self):
res = self.delete.condition('uid', 2).condition('name', 'user_2').execute()
self.assertEqual(res, 1)
def test_delete_or_condtions(self):
from db import or_
or_con = or_().condition('name', 'user_1').condition('name', 'user_2')
res = self.delete.condition(or_con).execute()
self.assertEqual(res, 2)
class TestTransaction(unittest.TestCase):
def setUp(self):
setattr(db, '__db', {})
db.setup(config,adapter='pymysql')
_create()
users = []
for i in range(1, 6):
users.append((i, 'user_' + str(i)))
db.execute('INSERT INTO users VALUES(%s, %s)', users)
def tearDown(self):
db.execute('delete from users')
def test_with(self):
with db.transaction() as t:
t.delete('users').condition('uid', 1).execute()
res = db.select('users').condition('uid', 1).execute()
self.assertEqual(len(res), 1)
res = db.select('users').condition('uid', 1).execute()
self.assertEqual(len(res), 0)
def test_begin_commit(self):
t = db.transaction()
t.begin()
t.delete('users').condition('uid', 1).execute()
res = db.select('users').condition('uid', 1).execute()
self.assertEqual(len(res), 1)
t.commit()
res = db.select('users').condition('uid', 1).execute()
self.assertEqual(len(res), 0)
if __name__ == '__main__':
debug = True
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
unittest.main(verbosity=2 if debug else 0)
|
gpl-2.0
| 4,985,435,720,029,420,000
| 30.589147
| 108
| 0.551865
| false
| 3.678604
| true
| false
| false
|
akx/shoop
|
_misc/ensure_license_headers.py
|
1
|
4075
|
#!/usr/bin/env python3
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
"""
License header updater.
"""
from __future__ import unicode_literals
import argparse
import os
import sys
import sanity_utils
HEADER = """
This file is part of Shoop.
Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
This source code is licensed under the AGPLv3 license found in the
LICENSE file in the root directory of this source tree.
""".strip()
PY_HEADER = '\n'.join(('# ' + line).strip() for line in HEADER.splitlines())
JS_HEADER = (
'/**\n' +
'\n'.join((' * ' + line).rstrip() for line in HEADER.splitlines()) +
'\n */')
PY_HEADER_LINES = PY_HEADER.encode('utf-8').splitlines()
JS_HEADER_LINES = JS_HEADER.encode('utf-8').splitlines()
def get_adders():
return {
'.py': add_header_to_python_file,
'.js': add_header_to_javascript_file
}
def main():
ap = argparse.ArgumentParser()
ap.add_argument("root", nargs="+", help="Directory roots to recurse through")
ap.add_argument("-w", "--write", help="Actually write changes", action="store_true")
ap.add_argument("-s", "--exit-status", help="Exit with error status when missing headers", action="store_true")
ap.add_argument("-v", "--verbose", help="Log OK files too", action="store_true")
args = ap.parse_args()
adders = get_adders()
paths = find_files(roots=args.root, extensions=set(adders.keys()))
missing = process_files(paths, adders, verbose=args.verbose, write=args.write)
if args.exit_status and missing:
return 1
return 0
def process_files(paths, adders, verbose, write):
width = max(len(s) for s in paths)
missing = set()
for path in sorted(paths):
if os.stat(path).st_size == 0:
if verbose:
print('[+]:%-*s: File is empty' % (width, path))
elif not has_header(path):
missing.add(path)
if write:
adder = adders[os.path.splitext(path)[1]]
adder(path)
print('[!]:%-*s: Modified' % (width, path))
else:
print('[!]:%-*s: Requires license header' % (width, path))
else:
if verbose:
print('[+]:%-*s: File has license header' % (width, path))
return missing
def find_files(roots, extensions):
paths = set()
generated_resources = set()
for root in roots:
for file in sanity_utils.find_files(
root,
generated_resources=generated_resources,
allowed_extensions=extensions,
ignored_dirs=sanity_utils.IGNORED_DIRS + ["migrations"]
):
if not is_file_ignored(file):
paths.add(file)
paths -= generated_resources
return paths
def is_file_ignored(filepath):
filepath = filepath.replace(os.sep, "/")
return (
('vendor' in filepath) or
('doc/_ext/djangodocs.py' in filepath)
)
def has_header(path):
with open(path, 'rb') as fp:
return b"This file is part of Shoop." in fp.read(256)
def add_header_to_python_file(path):
lines = get_lines(path)
if lines:
i = 0
if lines[i].startswith(b'#!'):
i += 1
if i < len(lines) and b'coding' in lines[i]:
i += 1
new_lines = lines[:i] + PY_HEADER_LINES + lines[i:]
write_lines(path, new_lines)
def add_header_to_javascript_file(path):
lines = get_lines(path)
if lines:
new_lines = JS_HEADER_LINES + lines
write_lines(path, new_lines)
def get_lines(path):
with open(path, 'rb') as fp:
contents = fp.read()
if not contents.strip():
return []
return contents.splitlines()
def write_lines(path, new_lines):
with open(path, 'wb') as fp:
for line in new_lines:
fp.write(line + b'\n')
if __name__ == '__main__':
sys.exit(main())
|
agpl-3.0
| 5,779,910,031,184,858,000
| 26.910959
| 115
| 0.593374
| false
| 3.509905
| false
| false
| false
|
aglitke/vdsm
|
vdsm/storage/image.py
|
1
|
51140
|
#
# Copyright 2009-2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import os
import logging
import threading
import uuid
from contextlib import contextmanager
import volume
from vdsm import qemuImg
from sdc import sdCache
import sd
import misc
import fileUtils
import imageSharing
from vdsm.config import config
from vdsm.utils import ActionStopped
import storage_exception as se
import task
from threadLocal import vars
import resourceFactories
import resourceManager as rm
log = logging.getLogger('Storage.Image')
rmanager = rm.ResourceManager.getInstance()
# Disk type
UNKNOWN_DISK_TYPE = 0
SYSTEM_DISK_TYPE = 1
DATA_DISK_TYPE = 2
SHARED_DISK_TYPE = 3
SWAP_DISK_TYPE = 4
TEMP_DISK_TYPE = 5
DISK_TYPES = {UNKNOWN_DISK_TYPE: 'UNKNOWN', SYSTEM_DISK_TYPE: 'SYSTEM',
DATA_DISK_TYPE: 'DATA', SHARED_DISK_TYPE: 'SHARED',
SWAP_DISK_TYPE: 'SWAP', TEMP_DISK_TYPE: 'TEMP'}
# What volumes to synchronize
SYNC_VOLUMES_ALL = 'ALL'
SYNC_VOLUMES_INTERNAL = 'INTERNAL'
SYNC_VOLUMES_LEAF = 'LEAF'
# Image Operations
UNKNOWN_OP = 0
COPY_OP = 1
MOVE_OP = 2
OP_TYPES = {UNKNOWN_OP: 'UNKNOWN', COPY_OP: 'COPY', MOVE_OP: 'MOVE'}
RENAME_RANDOM_STRING_LEN = 8
# Temporary size of a volume when we optimize out the prezeroing
TEMPORARY_VOLUME_SIZE = 20480 # in sectors (10M)
def _deleteImage(dom, imgUUID, postZero):
"""This ancillary function will be removed.
Replaces Image.delete() in Image.[copyCollapsed(), move(), multimove()].
"""
allVols = dom.getAllVolumes()
imgVols = sd.getVolsOfImage(allVols, imgUUID)
if not imgVols:
log.warning("No volumes found for image %s. %s", imgUUID, allVols)
return
elif postZero:
dom.zeroImage(dom.sdUUID, imgUUID, imgVols)
else:
dom.deleteImage(dom.sdUUID, imgUUID, imgVols)
class Image:
""" Actually represents a whole virtual disk.
Consist from chain of volumes.
"""
log = logging.getLogger('Storage.Image')
_fakeTemplateLock = threading.Lock()
@classmethod
def createImageRollback(cls, taskObj, imageDir):
"""
Remove empty image folder
"""
cls.log.info("createImageRollback: imageDir=%s" % (imageDir))
if os.path.exists(imageDir):
if not len(os.listdir(imageDir)):
fileUtils.cleanupdir(imageDir)
else:
cls.log.error("createImageRollback: Cannot remove dirty image "
"folder %s" % (imageDir))
def __init__(self, repoPath):
self.repoPath = repoPath
self.storage_repository = config.get('irs', 'repository')
def create(self, sdUUID, imgUUID):
"""Create placeholder for image's volumes
'sdUUID' - storage domain UUID
'imgUUID' - image UUID
"""
imageDir = os.path.join(self.repoPath, sdUUID, sd.DOMAIN_IMAGES,
imgUUID)
if not os.path.isdir(imageDir):
self.log.info("Create placeholder %s for image's volumes",
imageDir)
taskName = "create image rollback: " + imgUUID
vars.task.pushRecovery(task.Recovery(taskName, "image", "Image",
"createImageRollback",
[imageDir]))
os.mkdir(imageDir)
return imageDir
def getImageDir(self, sdUUID, imgUUID):
"""
Return image directory
"""
return os.path.join(self.repoPath, sdUUID, sd.DOMAIN_IMAGES, imgUUID)
def deletedVolumeName(self, uuid):
"""
Create REMOVED_IMAGE_PREFIX + <random> + uuid string.
"""
randomStr = misc.randomStr(RENAME_RANDOM_STRING_LEN)
return "%s%s_%s" % (sd.REMOVED_IMAGE_PREFIX, randomStr, uuid)
def __chainSizeCalc(self, sdUUID, imgUUID, volUUID, size):
"""
Compute an estimate of the whole chain size
using the sum of the actual size of the chain's volumes
"""
chain = self.getChain(sdUUID, imgUUID, volUUID)
newsize = 0
template = chain[0].getParentVolume()
if template:
newsize = template.getVolumeSize()
for vol in chain:
newsize += vol.getVolumeSize()
if newsize > size:
newsize = size
newsize = int(newsize * 1.1) # allocate %10 more for cow metadata
return newsize
def getChain(self, sdUUID, imgUUID, volUUID=None):
"""
Return the chain of volumes of image as a sorted list
(not including a shared base (template) if any)
"""
chain = []
volclass = sdCache.produce(sdUUID).getVolumeClass()
# Use volUUID when provided
if volUUID:
srcVol = volclass(self.repoPath, sdUUID, imgUUID, volUUID)
# For template images include only one volume (the template itself)
# NOTE: this relies on the fact that in a template there is only
# one volume
if srcVol.isShared():
return [srcVol]
# Find all the volumes when volUUID is not provided
else:
# Find all volumes of image
uuidlist = volclass.getImageVolumes(self.repoPath, sdUUID, imgUUID)
if not uuidlist:
raise se.ImageDoesNotExistInSD(imgUUID, sdUUID)
srcVol = volclass(self.repoPath, sdUUID, imgUUID, uuidlist[0])
# For template images include only one volume (the template itself)
if len(uuidlist) == 1 and srcVol.isShared():
return [srcVol]
# Searching for the leaf
for vol in uuidlist:
srcVol = volclass(self.repoPath, sdUUID, imgUUID, vol)
if srcVol.isLeaf():
break
srcVol = None
if not srcVol:
self.log.error("There is no leaf in the image %s", imgUUID)
raise se.ImageIsNotLegalChain(imgUUID)
# Build up the sorted parent -> child chain
while not srcVol.isShared():
chain.insert(0, srcVol)
if srcVol.getParent() == volume.BLANK_UUID:
break
srcVol = srcVol.getParentVolume()
self.log.info("sdUUID=%s imgUUID=%s chain=%s ", sdUUID, imgUUID, chain)
return chain
def getTemplate(self, sdUUID, imgUUID):
"""
Return template of the image
"""
tmpl = None
# Find all volumes of image (excluding template)
chain = self.getChain(sdUUID, imgUUID)
# check if the chain is build above a template, or it is a standalone
pvol = chain[0].getParentVolume()
if pvol:
tmpl = pvol
elif chain[0].isShared():
tmpl = chain[0]
return tmpl
def createFakeTemplate(self, sdUUID, volParams):
"""
Create fake template (relevant for Backup domain only)
"""
with self._fakeTemplateLock:
try:
destDom = sdCache.produce(sdUUID)
volclass = destDom.getVolumeClass()
# Validate that the destination template exists and accessible
volclass(self.repoPath, sdUUID, volParams['imgUUID'],
volParams['volUUID'])
except (se.VolumeDoesNotExist, se.ImagePathError):
try:
# Create fake parent volume
destDom.createVolume(
imgUUID=volParams['imgUUID'], size=volParams['size'],
volFormat=volume.COW_FORMAT,
preallocate=volume.SPARSE_VOL,
diskType=volParams['disktype'],
volUUID=volParams['volUUID'], desc="Fake volume",
srcImgUUID=volume.BLANK_UUID,
srcVolUUID=volume.BLANK_UUID)
vol = destDom.produceVolume(imgUUID=volParams['imgUUID'],
volUUID=volParams['volUUID'])
# Mark fake volume as "FAKE"
vol.setLegality(volume.FAKE_VOL)
# Mark fake volume as shared
vol.setShared()
# Now we should re-link all hardlinks of this template in
# all VMs based on it
destDom.templateRelink(volParams['imgUUID'],
volParams['volUUID'])
self.log.debug("Succeeded to create fake image %s in "
"domain %s", volParams['imgUUID'],
destDom.sdUUID)
except Exception:
self.log.error("Failure to create fake image %s in domain "
"%s", volParams['imgUUID'], destDom.sdUUID,
exc_info=True)
def isLegal(self, sdUUID, imgUUID):
"""
Check correctness of the whole chain (excluding template)
"""
try:
legal = True
volclass = sdCache.produce(sdUUID).getVolumeClass()
vollist = volclass.getImageVolumes(self.repoPath, sdUUID, imgUUID)
self.log.info("image %s in domain %s has vollist %s", imgUUID,
sdUUID, str(vollist))
for v in vollist:
vol = volclass(self.repoPath, sdUUID, imgUUID, v)
if not vol.isLegal() or vol.isFake():
legal = False
break
except:
legal = False
return legal
def __cleanupMove(self, srcVol, dstVol):
"""
Cleanup environments after move operation
"""
try:
if srcVol:
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID)
if dstVol:
dstVol.teardown(sdUUID=dstVol.sdUUID, volUUID=dstVol.volUUID)
except Exception:
self.log.error("Unexpected error", exc_info=True)
def _createTargetImage(self, destDom, srcSdUUID, imgUUID):
# Before actual data copying we need perform several operation
# such as: create all volumes, create fake template if needed, ...
try:
# Find all volumes of source image
srcChain = self.getChain(srcSdUUID, imgUUID)
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception as e:
self.log.error("Unexpected error", exc_info=True)
raise se.SourceImageActionError(imgUUID, srcSdUUID, str(e))
fakeTemplate = False
pimg = volume.BLANK_UUID # standalone chain
# check if the chain is build above a template, or it is a standalone
pvol = srcChain[0].getParentVolume()
if pvol:
# find out parent volume parameters
volParams = pvol.getVolumeParams()
pimg = volParams['imgUUID'] # pimg == template image
if destDom.isBackup():
# FIXME: This workaround help as copy VM to the backup domain
# without its template. We will create fake template
# for future VM creation and mark it as FAKE volume.
# This situation is relevant for backup domain only.
fakeTemplate = True
@contextmanager
def justLogIt(img):
self.log.debug("You don't really need lock parent of image %s",
img)
yield
dstImageResourcesNamespace = sd.getNamespace(
destDom.sdUUID, resourceFactories.IMAGE_NAMESPACE)
# In destination domain we need to lock image's template if exists
with rmanager.acquireResource(dstImageResourcesNamespace, pimg,
rm.LockType.shared) \
if pimg != volume.BLANK_UUID else justLogIt(imgUUID):
if fakeTemplate:
self.createFakeTemplate(destDom.sdUUID, volParams)
dstChain = []
for srcVol in srcChain:
# Create the dst volume
try:
# find out src volume parameters
volParams = srcVol.getVolumeParams(bs=1)
# To avoid prezeroing preallocated volumes on NFS domains
# we create the target as a sparse volume (since it will be
# soon filled with the data coming from the copy) and then
# we change its metadata back to the original value.
if (volParams['prealloc'] == volume.PREALLOCATED_VOL
and destDom.supportsSparseness):
tmpVolPreallocation = volume.SPARSE_VOL
else:
tmpVolPreallocation = volParams['prealloc']
destDom.createVolume(imgUUID=imgUUID,
size=volParams['size'],
volFormat=volParams['volFormat'],
preallocate=tmpVolPreallocation,
diskType=volParams['disktype'],
volUUID=srcVol.volUUID,
desc=volParams['descr'],
srcImgUUID=pimg,
srcVolUUID=volParams['parent'])
dstVol = destDom.produceVolume(imgUUID=imgUUID,
volUUID=srcVol.volUUID)
# Extend volume (for LV only) size to the actual size
dstVol.extend((volParams['apparentsize'] + 511) / 512)
# Change destination volume metadata back to the original
# type.
if tmpVolPreallocation != volParams['prealloc']:
dstVol.setType(volParams['prealloc'])
dstChain.append(dstVol)
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception as e:
self.log.error("Unexpected error", exc_info=True)
raise se.DestImageActionError(imgUUID, destDom.sdUUID,
str(e))
# only base may have a different parent image
pimg = imgUUID
return {'srcChain': srcChain, 'dstChain': dstChain}
def _interImagesCopy(self, destDom, srcSdUUID, imgUUID, chains):
srcLeafVol = chains['srcChain'][-1]
dstLeafVol = chains['dstChain'][-1]
try:
# Prepare the whole chains before the copy
srcLeafVol.prepare(rw=False)
dstLeafVol.prepare(rw=True, chainrw=True, setrw=True)
except Exception:
self.log.error("Unexpected error", exc_info=True)
# teardown volumes
self.__cleanupMove(srcLeafVol, dstLeafVol)
raise
try:
for srcVol in chains['srcChain']:
# Do the actual copy
try:
dstVol = destDom.produceVolume(imgUUID=imgUUID,
volUUID=srcVol.volUUID)
srcFmt = srcVol.getFormat()
if srcFmt == volume.RAW_FORMAT:
srcFmtStr = volume.fmt2str(srcFmt)
dstFmtStr = volume.fmt2str(dstVol.getFormat())
self.log.debug("start qemu convert")
qemuImg.convert(srcVol.getVolumePath(),
dstVol.getVolumePath(),
vars.task.aborting,
srcFmtStr, dstFmtStr)
else:
srcSize = srcVol.getVolumeSize(bs=1)
misc.ddWatchCopy(srcVol.getVolumePath(),
dstVol.getVolumePath(),
vars.task.aborting,
size=srcSize)
except ActionStopped:
raise
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception:
self.log.error("Copy image error: image=%s, src domain=%s,"
" dst domain=%s", imgUUID, srcSdUUID,
destDom.sdUUID, exc_info=True)
raise se.CopyImageError()
finally:
# teardown volumes
self.__cleanupMove(srcLeafVol, dstLeafVol)
def _finalizeDestinationImage(self, destDom, imgUUID, chains, force):
for srcVol in chains['srcChain']:
try:
dstVol = destDom.produceVolume(imgUUID=imgUUID,
volUUID=srcVol.volUUID)
# In case of copying template, we should set the destination
# volume as SHARED (after copy because otherwise prepare as RW
# would fail)
if srcVol.isShared():
dstVol.setShared()
elif srcVol.isInternal():
dstVol.setInternal()
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception as e:
self.log.error("Unexpected error", exc_info=True)
raise se.DestImageActionError(imgUUID, destDom.sdUUID, str(e))
def move(self, srcSdUUID, dstSdUUID, imgUUID, vmUUID, op, postZero, force):
"""
Move/Copy image between storage domains within same storage pool
"""
self.log.info("srcSdUUID=%s dstSdUUID=%s imgUUID=%s vmUUID=%s op=%s "
"force=%s postZero=%s", srcSdUUID, dstSdUUID, imgUUID,
vmUUID, OP_TYPES[op], str(force), str(postZero))
destDom = sdCache.produce(dstSdUUID)
# If image already exists check whether it illegal/fake, overwrite it
if not self.isLegal(destDom.sdUUID, imgUUID):
force = True
# We must first remove the previous instance of image (if exists)
# in destination domain, if we got the overwrite command
if force:
self.log.info("delete image %s on domain %s before overwriting",
imgUUID, destDom.sdUUID)
_deleteImage(destDom, imgUUID, postZero)
chains = self._createTargetImage(destDom, srcSdUUID, imgUUID)
self._interImagesCopy(destDom, srcSdUUID, imgUUID, chains)
self._finalizeDestinationImage(destDom, imgUUID, chains, force)
if force:
leafVol = chains['dstChain'][-1]
# Now we should re-link all deleted hardlinks, if exists
destDom.templateRelink(imgUUID, leafVol.volUUID)
# At this point we successfully finished the 'copy' part of the
# operation and we can clear all recoveries.
vars.task.clearRecoveries()
# If it's 'move' operation, we should delete src image after copying
if op == MOVE_OP:
# TODO: Should raise here.
try:
dom = sdCache.produce(srcSdUUID)
_deleteImage(dom, imgUUID, postZero)
except se.StorageException:
self.log.warning("Failed to remove img: %s from srcDom %s: "
"after it was copied to: %s", imgUUID,
srcSdUUID, dstSdUUID)
self.log.info("%s task on image %s was successfully finished",
OP_TYPES[op], imgUUID)
return True
def cloneStructure(self, sdUUID, imgUUID, dstSdUUID):
self._createTargetImage(sdCache.produce(dstSdUUID), sdUUID, imgUUID)
def syncData(self, sdUUID, imgUUID, dstSdUUID, syncType):
srcChain = self.getChain(sdUUID, imgUUID)
dstChain = self.getChain(dstSdUUID, imgUUID)
if syncType == SYNC_VOLUMES_INTERNAL:
try:
# Removing the leaf volumes
del srcChain[-1], dstChain[-1]
except IndexError:
raise se.ImageIsNotLegalChain()
elif syncType == SYNC_VOLUMES_LEAF:
try:
# Removing all the internal volumes
del srcChain[:-1], dstChain[:-1]
except IndexError:
raise se.ImageIsNotLegalChain()
elif syncType != SYNC_VOLUMES_ALL:
raise se.NotImplementedException()
if len(srcChain) != len(dstChain):
raise se.DestImageActionError(imgUUID, dstSdUUID)
# Checking the volume uuids (after removing the leaves to allow
# different uuids for the current top layer, see previous check).
for i, v in enumerate(srcChain):
if v.volUUID != dstChain[i].volUUID:
raise se.DestImageActionError(imgUUID, dstSdUUID)
dstDom = sdCache.produce(dstSdUUID)
self._interImagesCopy(dstDom, sdUUID, imgUUID,
{'srcChain': srcChain, 'dstChain': dstChain})
self._finalizeDestinationImage(dstDom, imgUUID,
{'srcChain': srcChain,
'dstChain': dstChain}, False)
def __cleanupMultimove(self, sdUUID, imgList, postZero=False):
"""
Cleanup environments after multiple-move operation
"""
for imgUUID in imgList:
try:
dom = sdCache.produce(sdUUID)
_deleteImage(dom, imgUUID, postZero)
except se.StorageException:
self.log.warning("Delete image failed for image: %s in SD: %s",
imgUUID, sdUUID, exc_info=True)
def multiMove(self, srcSdUUID, dstSdUUID, imgDict, vmUUID, force):
"""
Move multiple images between storage domains within same storage pool
"""
self.log.info("srcSdUUID=%s dstSdUUID=%s imgDict=%s vmUUID=%s "
"force=%s", srcSdUUID, dstSdUUID, str(imgDict), vmUUID,
str(force))
cleanup_candidates = []
# First, copy all images to the destination domain
for (imgUUID, postZero) in imgDict.iteritems():
self.log.info("srcSdUUID=%s dstSdUUID=%s imgUUID=%s postZero=%s",
srcSdUUID, dstSdUUID, imgUUID, postZero)
try:
self.move(srcSdUUID, dstSdUUID, imgUUID, vmUUID, COPY_OP,
postZero, force)
except se.StorageException:
self.__cleanupMultimove(sdUUID=dstSdUUID,
imgList=cleanup_candidates,
postZero=postZero)
raise
except Exception as e:
self.__cleanupMultimove(sdUUID=dstSdUUID,
imgList=cleanup_candidates,
postZero=postZero)
self.log.error(e, exc_info=True)
raise se.CopyImageError("image=%s, src domain=%s, dst "
"domain=%s: msg %s" %
(imgUUID, srcSdUUID, dstSdUUID,
str(e)))
cleanup_candidates.append(imgUUID)
# Remove images from source domain only after successfull copying of
# all images to the destination domain
for (imgUUID, postZero) in imgDict.iteritems():
try:
dom = sdCache.produce(srcSdUUID)
_deleteImage(dom, imgUUID, postZero)
except se.StorageException:
self.log.warning("Delete image failed for image %s in SD: %s",
imgUUID, dom.sdUUID, exc_info=True)
def __cleanupCopy(self, srcVol, dstVol):
"""
Cleanup environments after copy operation
"""
try:
if srcVol:
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID)
if dstVol:
dstVol.teardown(sdUUID=dstVol.sdUUID, volUUID=dstVol.volUUID)
except Exception:
self.log.error("Unexpected error", exc_info=True)
def validateVolumeChain(self, sdUUID, imgUUID):
"""
Check correctness of the whole chain (including template if exists)
"""
if not self.isLegal(sdUUID, imgUUID):
raise se.ImageIsNotLegalChain(imgUUID)
chain = self.getChain(sdUUID, imgUUID)
# check if the chain is build above a template, or it is a standalone
pvol = chain[0].getParentVolume()
if pvol:
if not pvol.isLegal() or pvol.isFake():
raise se.ImageIsNotLegalChain(imgUUID)
def copyCollapsed(self, sdUUID, vmUUID, srcImgUUID, srcVolUUID, dstImgUUID,
dstVolUUID, descr, dstSdUUID, volType, volFormat,
preallocate, postZero, force):
"""
Create new template/volume from VM.
Do it by collapse and copy the whole chain (baseVolUUID->srcVolUUID)
"""
self.log.info("sdUUID=%s vmUUID=%s srcImgUUID=%s srcVolUUID=%s "
"dstImgUUID=%s dstVolUUID=%s dstSdUUID=%s volType=%s "
"volFormat=%s preallocate=%s force=%s postZero=%s",
sdUUID, vmUUID, srcImgUUID, srcVolUUID, dstImgUUID,
dstVolUUID, dstSdUUID, volType,
volume.type2name(volFormat),
volume.type2name(preallocate), str(force), str(postZero))
try:
srcVol = dstVol = None
# Find out dest sdUUID
if dstSdUUID == sd.BLANK_UUID:
dstSdUUID = sdUUID
volclass = sdCache.produce(sdUUID).getVolumeClass()
destDom = sdCache.produce(dstSdUUID)
# find src volume
try:
srcVol = volclass(self.repoPath, sdUUID, srcImgUUID,
srcVolUUID)
except se.StorageException:
raise
except Exception as e:
self.log.error(e, exc_info=True)
raise se.SourceImageActionError(srcImgUUID, sdUUID, str(e))
# Create dst volume
try:
# find out src volume parameters
volParams = srcVol.getVolumeParams()
if volParams['parent'] and \
volParams['parent'] != volume.BLANK_UUID:
# Volume has parent and therefore is a part of a chain
# in that case we can not know what is the exact size of
# the space target file (chain ==> cow ==> sparse).
# Therefore compute an estimate of the target volume size
# using the sum of the actual size of the chain's volumes
if volParams['volFormat'] != volume.COW_FORMAT or \
volParams['prealloc'] != volume.SPARSE_VOL:
raise se.IncorrectFormat(self)
volParams['apparentsize'] = self.__chainSizeCalc(
sdUUID, srcImgUUID, srcVolUUID, volParams['size'])
# Find out dest volume parameters
if preallocate in [volume.PREALLOCATED_VOL, volume.SPARSE_VOL]:
volParams['prealloc'] = preallocate
if volFormat in [volume.COW_FORMAT, volume.RAW_FORMAT]:
dstVolFormat = volFormat
else:
dstVolFormat = volParams['volFormat']
self.log.info("copy source %s:%s:%s vol size %s destination "
"%s:%s:%s apparentsize %s" %
(sdUUID, srcImgUUID, srcVolUUID,
volParams['size'], dstSdUUID, dstImgUUID,
dstVolUUID, volParams['apparentsize']))
# If image already exists check whether it illegal/fake,
# overwrite it
if not self.isLegal(dstSdUUID, dstImgUUID):
force = True
# We must first remove the previous instance of image (if
# exists) in destination domain, if we got the overwrite
# command
if force:
self.log.info("delete image %s on domain %s before "
"overwriting", dstImgUUID, dstSdUUID)
_deleteImage(destDom, dstImgUUID, postZero)
# To avoid 'prezeroing' preallocated volume on NFS domain,
# we create the target volume with minimal size and after that
# we'll change its metadata back to the original size.
tmpSize = TEMPORARY_VOLUME_SIZE # in sectors (10M)
destDom.createVolume(
imgUUID=dstImgUUID, size=tmpSize, volFormat=dstVolFormat,
preallocate=volParams['prealloc'],
diskType=volParams['disktype'], volUUID=dstVolUUID,
desc=descr, srcImgUUID=volume.BLANK_UUID,
srcVolUUID=volume.BLANK_UUID)
dstVol = sdCache.produce(dstSdUUID).produceVolume(
imgUUID=dstImgUUID, volUUID=dstVolUUID)
# For convert to 'raw' we need use the virtual disk size
# instead of apparent size
if dstVolFormat == volume.RAW_FORMAT:
newsize = volParams['size']
else:
newsize = volParams['apparentsize']
dstVol.extend(newsize)
dstPath = dstVol.getVolumePath()
# Change destination volume metadata back to the original size.
dstVol.setSize(volParams['size'])
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception as e:
self.log.error("Unexpected error", exc_info=True)
raise se.CopyImageError("Destination volume %s error: %s" %
(dstVolUUID, str(e)))
try:
# Start the actual copy image procedure
srcVol.prepare(rw=False)
dstVol.prepare(rw=True, setrw=True)
try:
(rc, out, err) = volume.qemuConvert(
volParams['path'], dstPath, volParams['volFormat'],
dstVolFormat, vars.task.aborting,
size=srcVol.getVolumeSize(bs=1),
dstvolType=dstVol.getType())
if rc:
raise se.StorageException("rc: %s, err: %s" %
(rc, err))
except ActionStopped:
raise
except se.StorageException as e:
raise se.CopyImageError(str(e))
# Mark volume as SHARED
if volType == volume.SHARED_VOL:
dstVol.setShared()
dstVol.setLegality(volume.LEGAL_VOL)
if force:
# Now we should re-link all deleted hardlinks, if exists
destDom.templateRelink(dstImgUUID, dstVolUUID)
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception as e:
self.log.error("Unexpected error", exc_info=True)
raise se.CopyImageError("src image=%s, dst image=%s: msg=%s" %
(srcImgUUID, dstImgUUID, str(e)))
self.log.info("Finished copying %s:%s -> %s:%s", sdUUID,
srcVolUUID, dstSdUUID, dstVolUUID)
#TODO: handle return status
return dstVolUUID
finally:
self.__cleanupCopy(srcVol=srcVol, dstVol=dstVol)
def markIllegalSubChain(self, sdDom, imgUUID, chain):
"""
Mark all volumes in the sub-chain as illegal
"""
if not chain:
raise se.InvalidParameterException("chain", str(chain))
volclass = sdDom.getVolumeClass()
ancestor = chain[0]
successor = chain[-1]
tmpVol = volclass(self.repoPath, sdDom.sdUUID, imgUUID, successor)
dstParent = volclass(self.repoPath, sdDom.sdUUID, imgUUID,
ancestor).getParent()
# Mark all volumes as illegal
while tmpVol and dstParent != tmpVol.volUUID:
vol = tmpVol.getParentVolume()
tmpVol.setLegality(volume.ILLEGAL_VOL)
tmpVol = vol
def __teardownSubChain(self, sdUUID, imgUUID, chain):
"""
Teardown all volumes in the sub-chain
"""
if not chain:
raise se.InvalidParameterException("chain", str(chain))
# Teardown subchain ('ancestor' ->...-> 'successor') volumes
# before they will deleted.
# This subchain include volumes that were merged (rebased)
# into 'successor' and now should be deleted.
# We prepared all these volumes as part of preparing the whole
# chain before rebase, but during rebase we detached all of them from
# the chain and couldn't teardown they properly.
# So, now we must teardown them to release they resources.
volclass = sdCache.produce(sdUUID).getVolumeClass()
ancestor = chain[0]
successor = chain[-1]
srcVol = volclass(self.repoPath, sdUUID, imgUUID, successor)
dstParent = volclass(self.repoPath, sdUUID, imgUUID,
ancestor).getParent()
while srcVol and dstParent != srcVol.volUUID:
try:
self.log.info("Teardown volume %s from image %s",
srcVol.volUUID, imgUUID)
vol = srcVol.getParentVolume()
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID,
justme=True)
srcVol = vol
except Exception:
self.log.info("Failure to teardown volume %s in subchain %s "
"-> %s", srcVol.volUUID, ancestor, successor,
exc_info=True)
def removeSubChain(self, sdDom, imgUUID, chain, postZero):
"""
Remove all volumes in the sub-chain
"""
if not chain:
raise se.InvalidParameterException("chain", str(chain))
volclass = sdDom.getVolumeClass()
ancestor = chain[0]
successor = chain[-1]
srcVol = volclass(self.repoPath, sdDom.sdUUID, imgUUID, successor)
dstParent = volclass(self.repoPath, sdDom.sdUUID, imgUUID,
ancestor).getParent()
while srcVol and dstParent != srcVol.volUUID:
self.log.info("Remove volume %s from image %s", srcVol.volUUID,
imgUUID)
vol = srcVol.getParentVolume()
srcVol.delete(postZero=postZero, force=True)
chain.remove(srcVol.volUUID)
srcVol = vol
def _internalVolumeMerge(self, sdDom, srcVolParams, volParams, newSize,
chain):
"""
Merge internal volume
"""
srcVol = sdDom.produceVolume(imgUUID=srcVolParams['imgUUID'],
volUUID=srcVolParams['volUUID'])
# Extend successor volume to new accumulated subchain size
srcVol.extend(newSize)
srcVol.prepare(rw=True, chainrw=True, setrw=True)
try:
backingVolPath = os.path.join('..', srcVolParams['imgUUID'],
volParams['volUUID'])
srcVol.rebase(volParams['volUUID'], backingVolPath,
volParams['volFormat'], unsafe=False, rollback=True)
finally:
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID)
# Prepare chain for future erase
chain.remove(srcVolParams['volUUID'])
self.__teardownSubChain(sdDom.sdUUID, srcVolParams['imgUUID'], chain)
return chain
def _baseCowVolumeMerge(self, sdDom, srcVolParams, volParams, newSize,
chain):
"""
Merge snapshot with base COW volume
"""
# FIXME!!! In this case we need workaround to rebase successor
# and transform it to be a base volume (without pointing to any backing
# volume). Actually this case should be handled by 'qemu-img rebase'
# (RFE to kvm). At this point we can achieve this result by 4 steps
# procedure:
# Step 1: create temporary empty volume similar to ancestor volume
# Step 2: Rebase (safely) successor volume on top of this temporary
# volume
# Step 3: Rebase (unsafely) successor volume on top of "" (empty
# string)
# Step 4: Delete temporary volume
srcVol = sdDom.produceVolume(imgUUID=srcVolParams['imgUUID'],
volUUID=srcVolParams['volUUID'])
# Extend successor volume to new accumulated subchain size
srcVol.extend(newSize)
# Step 1: Create temporary volume with destination volume's parent
# parameters
newUUID = str(uuid.uuid4())
sdDom.createVolume(
imgUUID=srcVolParams['imgUUID'], size=volParams['size'],
volFormat=volParams['volFormat'], preallocate=volume.SPARSE_VOL,
diskType=volParams['disktype'], volUUID=newUUID,
desc="New base volume", srcImgUUID=volume.BLANK_UUID,
srcVolUUID=volume.BLANK_UUID)
tmpVol = sdDom.produceVolume(imgUUID=srcVolParams['imgUUID'],
volUUID=newUUID)
tmpVol.prepare(rw=True, justme=True, setrw=True)
# We should prepare/teardown volume for every single rebase.
# The reason is recheckIfLeaf at the end of the rebase, that change
# volume permissions to RO for internal volumes.
srcVol.prepare(rw=True, chainrw=True, setrw=True)
try:
# Step 2: Rebase successor on top of tmpVol
# qemu-img rebase -b tmpBackingFile -F backingFormat -f srcFormat
# src
backingVolPath = os.path.join('..', srcVolParams['imgUUID'],
newUUID)
srcVol.rebase(newUUID, backingVolPath, volParams['volFormat'],
unsafe=False, rollback=True)
finally:
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID)
srcVol.prepare(rw=True, chainrw=True, setrw=True)
try:
# Step 3: Remove pointer to backing file from the successor by
# 'unsafed' rebase qemu-img rebase -u -b "" -F
# backingFormat -f srcFormat src
srcVol.rebase(volume.BLANK_UUID, "", volParams['volFormat'],
unsafe=True, rollback=False)
finally:
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID)
# Step 4: Delete temporary volume
tmpVol.teardown(sdUUID=tmpVol.sdUUID, volUUID=tmpVol.volUUID,
justme=True)
tmpVol.delete(postZero=False, force=True)
# Prepare chain for future erase
chain.remove(srcVolParams['volUUID'])
self.__teardownSubChain(sdDom.sdUUID, srcVolParams['imgUUID'], chain)
return chain
def _baseRawVolumeMerge(self, sdDom, srcVolParams, volParams, chain):
"""
Merge snapshot with base RAW volume
"""
# In this case we need convert ancestor->successor subchain to new
# volume and rebase successor's children (if exists) on top of it.
# Step 1: Create an empty volume named sucessor_MERGE similar to
# ancestor volume.
# Step 2: qemuConvert successor -> sucessor_MERGE
# Step 3: Rename successor to _remove_me__successor
# Step 4: Rename successor_MERGE to successor
# Step 5: Unsafely rebase successor's children on top of temporary
# volume
srcVol = chain[-1]
with srcVol.scopedPrepare(rw=True, chainrw=True, setrw=True):
# Find out successor's children list
chList = srcVolParams['children']
# Step 1: Create an empty volume named sucessor_MERGE with
# destination volume's parent parameters
newUUID = srcVol.volUUID + "_MERGE"
sdDom.createVolume(
imgUUID=srcVolParams['imgUUID'], size=srcVolParams['size'],
volFormat=volParams['volFormat'],
preallocate=volParams['prealloc'],
diskType=volParams['disktype'], volUUID=newUUID,
desc=srcVolParams['descr'], srcImgUUID=volume.BLANK_UUID,
srcVolUUID=volume.BLANK_UUID)
newVol = sdDom.produceVolume(imgUUID=srcVolParams['imgUUID'],
volUUID=newUUID)
with newVol.scopedPrepare(rw=True, justme=True, setrw=True):
# Step 2: Convert successor to new volume
# qemu-img convert -f qcow2 successor -O raw newUUID
(rc, out, err) = volume.qemuConvert(
srcVolParams['path'], newVol.getVolumePath(),
srcVolParams['volFormat'], volParams['volFormat'],
vars.task.aborting, size=volParams['apparentsize'],
dstvolType=newVol.getType())
if rc:
self.log.error("qemu-img convert failed: rc=%s, out=%s, "
"err=%s", rc, out, err)
raise se.MergeSnapshotsError(newUUID)
if chList:
newVol.setInternal()
# Step 3: Rename successor as to _remove_me__successor
tmpUUID = self.deletedVolumeName(srcVol.volUUID)
srcVol.rename(tmpUUID)
# Step 4: Rename successor_MERGE to successor
newVol.rename(srcVolParams['volUUID'])
# Step 5: Rebase children 'unsafely' on top of new volume
# qemu-img rebase -u -b tmpBackingFile -F backingFormat -f srcFormat
# src
for ch in chList:
ch.prepare(rw=True, chainrw=True, setrw=True, force=True)
backingVolPath = os.path.join('..', srcVolParams['imgUUID'],
srcVolParams['volUUID'])
try:
ch.rebase(srcVolParams['volUUID'], backingVolPath,
volParams['volFormat'], unsafe=True, rollback=True)
finally:
ch.teardown(sdUUID=ch.sdUUID, volUUID=ch.volUUID)
ch.recheckIfLeaf()
# Prepare chain for future erase
rmChain = [vol.volUUID for
vol in chain if vol.volUUID != srcVolParams['volUUID']]
rmChain.append(tmpUUID)
return rmChain
def subChainSizeCalc(self, ancestor, successor, vols):
"""
Do not add additional calls to this function.
TODO:
Should be unified with chainSizeCalc,
but merge should be refactored,
but this file should probably removed.
"""
chain = []
accumulatedChainSize = 0
endVolName = vols[ancestor].getParent() # TemplateVolName or None
currVolName = successor
while (currVolName != endVolName):
chain.insert(0, currVolName)
accumulatedChainSize += vols[currVolName].getVolumeSize()
currVolName = vols[currVolName].getParent()
return accumulatedChainSize, chain
def merge(self, sdUUID, vmUUID, imgUUID, ancestor, successor, postZero):
"""Merge source volume to the destination volume.
'successor' - source volume UUID
'ancestor' - destination volume UUID
"""
self.log.info("sdUUID=%s vmUUID=%s"
" imgUUID=%s ancestor=%s successor=%s postZero=%s",
sdUUID, vmUUID, imgUUID,
ancestor, successor, str(postZero))
sdDom = sdCache.produce(sdUUID)
allVols = sdDom.getAllVolumes()
volsImgs = sd.getVolsOfImage(allVols, imgUUID)
# Since image namespace should be locked is produce all the volumes is
# safe. Producing the (eventual) template is safe also.
# TODO: Split for block and file based volumes for efficiency sake.
vols = {}
for vName in volsImgs.iterkeys():
vols[vName] = sdDom.produceVolume(imgUUID, vName)
srcVol = vols[successor]
srcVolParams = srcVol.getVolumeParams()
srcVolParams['children'] = []
for vName, vol in vols.iteritems():
if vol.getParent() == successor:
srcVolParams['children'].append(vol)
dstVol = vols[ancestor]
dstParentUUID = dstVol.getParent()
if dstParentUUID != sd.BLANK_UUID:
volParams = vols[dstParentUUID].getVolumeParams()
else:
volParams = dstVol.getVolumeParams()
accSize, chain = self.subChainSizeCalc(ancestor, successor, vols)
imageApparentSize = volParams['size']
# allocate %10 more for cow metadata
reqSize = min(accSize, imageApparentSize) * 1.1
try:
# Start the actual merge image procedure
# IMPORTANT NOTE: volumes in the same image chain might have
# different capacity since the introduction of the disk resize
# feature. This means that when we merge volumes the ancestor
# should get the new size from the successor (in order to be
# able to contain the additional data that we are collapsing).
if dstParentUUID != sd.BLANK_UUID:
# The ancestor isn't a base volume of the chain.
self.log.info("Internal volume merge: src = %s dst = %s",
srcVol.getVolumePath(), dstVol.getVolumePath())
chainToRemove = self._internalVolumeMerge(
sdDom, srcVolParams, volParams, reqSize, chain)
# The ancestor is actually a base volume of the chain.
# We have 2 cases here:
# Case 1: ancestor is a COW volume (use 'rebase' workaround)
# Case 2: ancestor is a RAW volume (use 'convert + rebase')
elif volParams['volFormat'] == volume.RAW_FORMAT:
self.log.info("merge with convert: src = %s dst = %s",
srcVol.getVolumePath(), dstVol.getVolumePath())
chainToRemove = self._baseRawVolumeMerge(
sdDom, srcVolParams, volParams,
[vols[vName] for vName in chain])
else:
self.log.info("4 steps merge: src = %s dst = %s",
srcVol.getVolumePath(), dstVol.getVolumePath())
chainToRemove = self._baseCowVolumeMerge(
sdDom, srcVolParams, volParams, reqSize, chain)
# This is unrecoverable point, clear all recoveries
vars.task.clearRecoveries()
# mark all snapshots from 'ancestor' to 'successor' as illegal
self.markIllegalSubChain(sdDom, imgUUID, chainToRemove)
except ActionStopped:
raise
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception as e:
self.log.error(e, exc_info=True)
raise se.SourceImageActionError(imgUUID, sdUUID, str(e))
try:
# remove all snapshots from 'ancestor' to 'successor'
self.removeSubChain(sdDom, imgUUID, chainToRemove, postZero)
except Exception:
self.log.error("Failure to remove subchain %s -> %s in image %s",
ancestor, successor, imgUUID, exc_info=True)
newVol = sdDom.produceVolume(imgUUID=srcVolParams['imgUUID'],
volUUID=srcVolParams['volUUID'])
try:
newVol.shrinkToOptimalSize()
except qemuImg.QImgError:
self.log.warning("Auto shrink after merge failed", exc_info=True)
self.log.info("Merge src=%s with dst=%s was successfully finished.",
srcVol.getVolumePath(), dstVol.getVolumePath())
def _activateVolumeForImportExport(self, domain, imgUUID, volUUID=None):
chain = self.getChain(domain.sdUUID, imgUUID, volUUID)
template = chain[0].getParentVolume()
if template or len(chain) > 1:
self.log.error("Importing and exporting an image with more "
"than one volume is not supported")
raise se.CopyImageError()
domain.activateVolumes(imgUUID, volUUIDs=[chain[0].volUUID])
return chain[0]
def upload(self, methodArgs, sdUUID, imgUUID, volUUID=None):
domain = sdCache.produce(sdUUID)
vol = self._activateVolumeForImportExport(domain, imgUUID, volUUID)
try:
imageSharing.upload(vol.getVolumePath(), methodArgs)
finally:
domain.deactivateImage(imgUUID)
def download(self, methodArgs, sdUUID, imgUUID, volUUID=None):
domain = sdCache.produce(sdUUID)
vol = self._activateVolumeForImportExport(domain, imgUUID, volUUID)
try:
# Extend the volume (if relevant) to the image size
vol.extend(imageSharing.getSize(methodArgs) / volume.BLOCK_SIZE)
imageSharing.download(vol.getVolumePath(), methodArgs)
finally:
domain.deactivateImage(imgUUID)
|
gpl-2.0
| 51,019,312,970,338,880
| 42.523404
| 79
| 0.557841
| false
| 4.385559
| false
| false
| false
|
Jumpscale/jumpscale_core8
|
lib/JumpScale/clients/graphite/GraphiteClient.py
|
1
|
1317
|
from JumpScale import j
import socket
import time
# import urllib.request, urllib.parse, urllib.error
try:
import urllib.request
import urllib.parse
import urllib.error
except:
import urllib.parse as urllib
class GraphiteClient:
def __init__(self):
self.__jslocation__ = "j.clients.graphite"
self._SERVER = '127.0.0.1'
self._CARBON_PORT = 2003
self._GRAPHITE_PORT = 8081
self._url = "http://%s:%s/render" % (self._SERVER, self._GRAPHITE_PORT)
# self.sock.connect((self.CARBON_SERVER, self.CARBON_PORT))
def send(self, msg):
"""
e.g. foo.bar.baz 20
"""
out = ""
for line in msg.split("\n"):
out += '%s %d\n' % (line, int(time.time()))
sock = socket.socket()
sock.connect((self._SERVER, self._CARBON_PORT))
sock.sendall(out)
sock.close()
def close(self):
pass
def query(self, query_=None, **kwargs):
import requests
query = query_.copy() if query_ else dict()
query.update(kwargs)
query['format'] = 'json'
if 'from_' in query:
query['from'] = query.pop('from_')
qs = urllib.parse.urlencode(query)
url = "%s?%s" % (self._url, qs)
return requests.get(url).json()
|
apache-2.0
| 1,251,291,274,446,699,500
| 24.823529
| 79
| 0.557327
| false
| 3.512
| false
| false
| false
|
fidals/refarm-site
|
catalog/models_operations.py
|
1
|
4426
|
import abc
import typing
from django.db.migrations.operations.base import Operation
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
# @todo #283:30m Group models.py, models_operations.py, models_expressions.py into the module.
class IndexSQL(abc.ABC):
def __init__(self, name: str):
self.name = name
def _index_name(self, table: str):
return f'{table}_{self.name}_idx'
@abc.abstractmethod
def execute(self, table: str, schema_editor: BaseDatabaseSchemaEditor):
"""Execute SQL operation."""
class AddedIndex(IndexSQL):
def __init__(self, name: str, columns: typing.List[str]):
super().__init__(name)
self.columns = columns
def execute(self, table, schema_editor):
schema_editor.execute(
f'CREATE INDEX {self._index_name(table)} ON {table}'
f'({", ".join(self.columns)});'
)
class DroppedIndex(IndexSQL):
def execute(self, table, schema_editor):
schema_editor.execute(
f'DROP INDEX {self._index_name(table)};'
)
class IndexOperation(Operation):
"""
Operate an index by given IndexSQL objects.
Docs: https://docs.djangoproject.com/en/1.11/ref/migration-operations/#writing-your-own
"""
reduces_to_sql = True
reversible = True
def __init__(self, model_name, forward: IndexSQL, backward: IndexSQL):
self.model_name = model_name
self.forward = forward
self.backward = backward
def state_forwards(self, app_label, state):
"""We have to implement this method for Operation interface."""
def database_forwards(self, app_label, schema_editor, _, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
table_name = to_model._meta.db_table
self.forward.execute(table_name, schema_editor)
def database_backwards(self, app_label, schema_editor, from_state, _):
from_model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, from_model):
table_name = from_model._meta.db_table
self.backward.execute(table_name, schema_editor)
def describe(self):
return f'Operate the index {self.name} for {self.model_name}'
class RevertedOperation(Operation):
reduces_to_sql = True
reversible = True
def __init__(self, operation: IndexOperation):
self.operation = operation
def state_forwards(self, app_label, state):
"""We have to implement this method for Operation interface."""
def database_forwards(self, app_label, schema_editor, from_state, to_state):
self.operation.database_backwards(app_label, schema_editor, from_state, to_state)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.operation.database_forwards(app_label, schema_editor, from_state, to_state)
# Django doesn't provide ability to add hooks to makemigrations.
# So we have to create migration files and add operations for
# abstract classes (like Tag) manually.
class IndexTagAlphanumeric:
ALPHANUMERIC_NAME = 'alphanumeric_name'
MODEL_NAME = 'tag'
def v1(self) -> typing.List[IndexOperation]:
return [IndexOperation(
model_name=self.MODEL_NAME,
forward=AddedIndex(
name=self.ALPHANUMERIC_NAME,
columns=[
"substring(name, '[a-zA-Zа-яА-Я]+')",
"(substring(name, '[0-9]+\.?[0-9]*')::float)",
],
),
backward=DroppedIndex(name=self.ALPHANUMERIC_NAME),
)]
def v2(self) -> typing.List[IndexOperation]:
"""Preserve whitespaces for alphabetic values of the index."""
old = self.v1()[0]
return [
RevertedOperation(old),
IndexOperation(
model_name=self.MODEL_NAME,
forward=AddedIndex(
name=self.ALPHANUMERIC_NAME,
columns=[
"substring(name, '[a-zA-Zа-яА-Я\s\-_,:;]+')",
"(substring(name, '[0-9]+\.?[0-9]*')::float)",
],
),
backward=DroppedIndex(name=self.ALPHANUMERIC_NAME),
),
]
|
mit
| -4,237,920,755,846,553,000
| 31.970149
| 94
| 0.613173
| false
| 3.792275
| false
| false
| false
|
LiberatorUSA/GUCEF
|
dependencies/curl/tests/python_dependencies/impacket/smb.py
|
1
|
151624
|
# Copyright (c) 2003-2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Copyright (C) 2001 Michael Teo <michaelteo@bigfoot.com>
# smb.py - SMB/CIFS library
#
# This software is provided 'as-is', without any express or implied warranty.
# In no event will the author be held liable for any damages arising from the
# use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
#
# 3. This notice cannot be removed or altered from any source distribution.
#
# Altered source done by Alberto Solino (@agsolino)
# Todo:
# [ ] Try [SMB]transport fragmentation using Transact requests
# [ ] Try other methods of doing write (write_raw, transact2, write, write_and_unlock, write_and_close, write_mpx)
# [-] Try replacements for SMB_COM_NT_CREATE_ANDX (CREATE, T_TRANSACT_CREATE, OPEN_ANDX works
# [x] Fix forceWriteAndx, which needs to send a RecvRequest, because recv() will not send it
# [x] Fix Recv() when using RecvAndx and the answer comes splet in several packets
# [ ] Try [SMB]transport fragmentation with overlaping segments
# [ ] Try [SMB]transport fragmentation with out of order segments
# [x] Do chained AndX requests
# [ ] Transform the rest of the calls to structure
# [X] Implement TRANS/TRANS2 reassembly for list_path
import os
import socket
import string
from binascii import a2b_hex
import datetime
from struct import pack, unpack
from contextlib import contextmanager
from impacket import nmb, ntlm, nt_errors, LOG
from impacket.structure import Structure
from impacket.spnego import SPNEGO_NegTokenInit, TypesMech, SPNEGO_NegTokenResp
# For signing
import hashlib
unicode_support = 0
unicode_convert = 1
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Dialect for SMB1
SMB_DIALECT = 'NT LM 0.12'
# Shared Device Type
SHARED_DISK = 0x00
SHARED_DISK_HIDDEN = 0x80000000
SHARED_PRINT_QUEUE = 0x01
SHARED_DEVICE = 0x02
SHARED_IPC = 0x03
# Extended attributes mask
ATTR_ARCHIVE = 0x020
ATTR_COMPRESSED = 0x800
ATTR_NORMAL = 0x080
ATTR_HIDDEN = 0x002
ATTR_READONLY = 0x001
ATTR_TEMPORARY = 0x100
ATTR_DIRECTORY = 0x010
ATTR_SYSTEM = 0x004
# Service Type
SERVICE_DISK = 'A:'
SERVICE_PRINTER = 'LPT1:'
SERVICE_IPC = 'IPC'
SERVICE_COMM = 'COMM'
SERVICE_ANY = '?????'
# Server Type (Can be used to mask with SMBMachine.get_type() or SMBDomain.get_type())
SV_TYPE_WORKSTATION = 0x00000001
SV_TYPE_SERVER = 0x00000002
SV_TYPE_SQLSERVER = 0x00000004
SV_TYPE_DOMAIN_CTRL = 0x00000008
SV_TYPE_DOMAIN_BAKCTRL = 0x00000010
SV_TYPE_TIME_SOURCE = 0x00000020
SV_TYPE_AFP = 0x00000040
SV_TYPE_NOVELL = 0x00000080
SV_TYPE_DOMAIN_MEMBER = 0x00000100
SV_TYPE_PRINTQ_SERVER = 0x00000200
SV_TYPE_DIALIN_SERVER = 0x00000400
SV_TYPE_XENIX_SERVER = 0x00000800
SV_TYPE_NT = 0x00001000
SV_TYPE_WFW = 0x00002000
SV_TYPE_SERVER_NT = 0x00004000
SV_TYPE_POTENTIAL_BROWSER = 0x00010000
SV_TYPE_BACKUP_BROWSER = 0x00020000
SV_TYPE_MASTER_BROWSER = 0x00040000
SV_TYPE_DOMAIN_MASTER = 0x00080000
SV_TYPE_LOCAL_LIST_ONLY = 0x40000000
SV_TYPE_DOMAIN_ENUM = 0x80000000
# Options values for SMB.stor_file and SMB.retr_file
SMB_O_CREAT = 0x10 # Create the file if file does not exists. Otherwise, operation fails.
SMB_O_EXCL = 0x00 # When used with SMB_O_CREAT, operation fails if file exists. Cannot be used with SMB_O_OPEN.
SMB_O_OPEN = 0x01 # Open the file if the file exists
SMB_O_TRUNC = 0x02 # Truncate the file if the file exists
# Share Access Mode
SMB_SHARE_COMPAT = 0x00
SMB_SHARE_DENY_EXCL = 0x10
SMB_SHARE_DENY_WRITE = 0x20
SMB_SHARE_DENY_READEXEC = 0x30
SMB_SHARE_DENY_NONE = 0x40
SMB_ACCESS_READ = 0x00
SMB_ACCESS_WRITE = 0x01
SMB_ACCESS_READWRITE = 0x02
SMB_ACCESS_EXEC = 0x03
TRANS_DISCONNECT_TID = 1
TRANS_NO_RESPONSE = 2
STATUS_SUCCESS = 0x00000000
STATUS_LOGON_FAILURE = 0xC000006D
STATUS_LOGON_TYPE_NOT_GRANTED = 0xC000015B
MAX_TFRAG_SIZE = 5840
EVASION_NONE = 0
EVASION_LOW = 1
EVASION_HIGH = 2
EVASION_MAX = 3
RPC_X_BAD_STUB_DATA = 0x6F7
# SMB_FILE_ATTRIBUTES
SMB_FILE_ATTRIBUTE_NORMAL = 0x0000
SMB_FILE_ATTRIBUTE_READONLY = 0x0001
SMB_FILE_ATTRIBUTE_HIDDEN = 0x0002
SMB_FILE_ATTRIBUTE_SYSTEM = 0x0004
SMB_FILE_ATTRIBUTE_VOLUME = 0x0008
SMB_FILE_ATTRIBUTE_DIRECTORY = 0x0010
SMB_FILE_ATTRIBUTE_ARCHIVE = 0x0020
SMB_SEARCH_ATTRIBUTE_READONLY = 0x0100
SMB_SEARCH_ATTRIBUTE_HIDDEN = 0x0200
SMB_SEARCH_ATTRIBUTE_SYSTEM = 0x0400
SMB_SEARCH_ATTRIBUTE_DIRECTORY = 0x1000
SMB_SEARCH_ATTRIBUTE_ARCHIVE = 0x2000
# Session SetupAndX Action flags
SMB_SETUP_GUEST = 0x01
SMB_SETUP_USE_LANMAN_KEY = 0x02
# QUERY_INFORMATION levels
SMB_INFO_ALLOCATION = 0x0001
SMB_INFO_VOLUME = 0x0002
FILE_FS_SIZE_INFORMATION = 0x0003
SMB_QUERY_FS_VOLUME_INFO = 0x0102
SMB_QUERY_FS_SIZE_INFO = 0x0103
SMB_QUERY_FILE_EA_INFO = 0x0103
SMB_QUERY_FS_DEVICE_INFO = 0x0104
SMB_QUERY_FS_ATTRIBUTE_INFO = 0x0105
SMB_QUERY_FILE_BASIC_INFO = 0x0101
SMB_QUERY_FILE_STANDARD_INFO = 0x0102
SMB_QUERY_FILE_ALL_INFO = 0x0107
FILE_FS_FULL_SIZE_INFORMATION = 0x03EF
# SET_INFORMATION levels
SMB_SET_FILE_DISPOSITION_INFO = 0x0102
SMB_SET_FILE_BASIC_INFO = 0x0101
SMB_SET_FILE_END_OF_FILE_INFO = 0x0104
# File System Attributes
FILE_CASE_SENSITIVE_SEARCH = 0x00000001
FILE_CASE_PRESERVED_NAMES = 0x00000002
FILE_UNICODE_ON_DISK = 0x00000004
FILE_PERSISTENT_ACLS = 0x00000008
FILE_FILE_COMPRESSION = 0x00000010
FILE_VOLUME_IS_COMPRESSED = 0x00008000
# FIND_FIRST2 flags and levels
SMB_FIND_CLOSE_AFTER_REQUEST = 0x0001
SMB_FIND_CLOSE_AT_EOS = 0x0002
SMB_FIND_RETURN_RESUME_KEYS = 0x0004
SMB_FIND_CONTINUE_FROM_LAST = 0x0008
SMB_FIND_WITH_BACKUP_INTENT = 0x0010
FILE_DIRECTORY_FILE = 0x00000001
FILE_DELETE_ON_CLOSE = 0x00001000
FILE_NON_DIRECTORY_FILE = 0x00000040
SMB_FIND_INFO_STANDARD = 0x0001
SMB_FIND_FILE_DIRECTORY_INFO = 0x0101
SMB_FIND_FILE_FULL_DIRECTORY_INFO= 0x0102
SMB_FIND_FILE_NAMES_INFO = 0x0103
SMB_FIND_FILE_BOTH_DIRECTORY_INFO= 0x0104
SMB_FIND_FILE_ID_FULL_DIRECTORY_INFO = 0x105
SMB_FIND_FILE_ID_BOTH_DIRECTORY_INFO = 0x106
# DesiredAccess flags
FILE_READ_DATA = 0x00000001
FILE_WRITE_DATA = 0x00000002
FILE_APPEND_DATA = 0x00000004
FILE_EXECUTE = 0x00000020
MAXIMUM_ALLOWED = 0x02000000
GENERIC_ALL = 0x10000000
GENERIC_EXECUTE = 0x20000000
GENERIC_WRITE = 0x40000000
GENERIC_READ = 0x80000000
# ShareAccess flags
FILE_SHARE_NONE = 0x00000000
FILE_SHARE_READ = 0x00000001
FILE_SHARE_WRITE = 0x00000002
FILE_SHARE_DELETE = 0x00000004
# CreateDisposition flags
FILE_SUPERSEDE = 0x00000000
FILE_OPEN = 0x00000001
FILE_CREATE = 0x00000002
FILE_OPEN_IF = 0x00000003
FILE_OVERWRITE = 0x00000004
FILE_OVERWRITE_IF = 0x00000005
def strerror(errclass, errcode):
if errclass == 0x01:
return 'OS error', ERRDOS.get(errcode, 'Unknown error')
elif errclass == 0x02:
return 'Server error', ERRSRV.get(errcode, 'Unknown error')
elif errclass == 0x03:
return 'Hardware error', ERRHRD.get(errcode, 'Unknown error')
# This is not a standard error class for SMB
#elif errclass == 0x80:
# return 'Browse error', ERRBROWSE.get(errcode, 'Unknown error')
elif errclass == 0xff:
return 'Bad command', 'Bad command. Please file bug report'
else:
return 'Unknown error', 'Unknown error'
# Raised when an error has occured during a session
class SessionError(Exception):
# SMB X/Open error codes for the ERRDOS error class
ERRsuccess = 0
ERRbadfunc = 1
ERRbadfile = 2
ERRbadpath = 3
ERRnofids = 4
ERRnoaccess = 5
ERRbadfid = 6
ERRbadmcb = 7
ERRnomem = 8
ERRbadmem = 9
ERRbadenv = 10
ERRbadaccess = 12
ERRbaddata = 13
ERRres = 14
ERRbaddrive = 15
ERRremcd = 16
ERRdiffdevice = 17
ERRnofiles = 18
ERRgeneral = 31
ERRbadshare = 32
ERRlock = 33
ERRunsup = 50
ERRnetnamedel = 64
ERRnosuchshare = 67
ERRfilexists = 80
ERRinvalidparam = 87
ERRcannotopen = 110
ERRinsufficientbuffer = 122
ERRinvalidname = 123
ERRunknownlevel = 124
ERRnotlocked = 158
ERRrename = 183
ERRbadpipe = 230
ERRpipebusy = 231
ERRpipeclosing = 232
ERRnotconnected = 233
ERRmoredata = 234
ERRnomoreitems = 259
ERRbaddirectory = 267
ERReasnotsupported = 282
ERRlogonfailure = 1326
ERRbuftoosmall = 2123
ERRunknownipc = 2142
ERRnosuchprintjob = 2151
ERRinvgroup = 2455
# here's a special one from observing NT
ERRnoipc = 66
# These errors seem to be only returned by the NT printer driver system
ERRdriveralreadyinstalled = 1795
ERRunknownprinterport = 1796
ERRunknownprinterdriver = 1797
ERRunknownprintprocessor = 1798
ERRinvalidseparatorfile = 1799
ERRinvalidjobpriority = 1800
ERRinvalidprintername = 1801
ERRprinteralreadyexists = 1802
ERRinvalidprintercommand = 1803
ERRinvaliddatatype = 1804
ERRinvalidenvironment = 1805
ERRunknownprintmonitor = 3000
ERRprinterdriverinuse = 3001
ERRspoolfilenotfound = 3002
ERRnostartdoc = 3003
ERRnoaddjob = 3004
ERRprintprocessoralreadyinstalled = 3005
ERRprintmonitoralreadyinstalled = 3006
ERRinvalidprintmonitor = 3007
ERRprintmonitorinuse = 3008
ERRprinterhasjobsqueued = 3009
# Error codes for the ERRSRV class
ERRerror = 1
ERRbadpw = 2
ERRbadtype = 3
ERRaccess = 4
ERRinvnid = 5
ERRinvnetname = 6
ERRinvdevice = 7
ERRqfull = 49
ERRqtoobig = 50
ERRinvpfid = 52
ERRsmbcmd = 64
ERRsrverror = 65
ERRfilespecs = 67
ERRbadlink = 68
ERRbadpermits = 69
ERRbadpid = 70
ERRsetattrmode = 71
ERRpaused = 81
ERRmsgoff = 82
ERRnoroom = 83
ERRrmuns = 87
ERRtimeout = 88
ERRnoresource = 89
ERRtoomanyuids = 90
ERRbaduid = 91
ERRuseMPX = 250
ERRuseSTD = 251
ERRcontMPX = 252
ERRbadPW = None
ERRnosupport = 0
ERRunknownsmb = 22
# Error codes for the ERRHRD class
ERRnowrite = 19
ERRbadunit = 20
ERRnotready = 21
ERRbadcmd = 22
ERRdata = 23
ERRbadreq = 24
ERRseek = 25
ERRbadmedia = 26
ERRbadsector = 27
ERRnopaper = 28
ERRwrite = 29
ERRread = 30
ERRwrongdisk = 34
ERRFCBunavail = 35
ERRsharebufexc = 36
ERRdiskfull = 39
hard_msgs = {
19: ("ERRnowrite", "Attempt to write on write-protected diskette."),
20: ("ERRbadunit", "Unknown unit."),
21: ("ERRnotready", "Drive not ready."),
22: ("ERRbadcmd", "Unknown command."),
23: ("ERRdata", "Data error (CRC)."),
24: ("ERRbadreq", "Bad request structure length."),
25: ("ERRseek", "Seek error."),
26: ("ERRbadmedia", "Unknown media type."),
27: ("ERRbadsector", "Sector not found."),
28: ("ERRnopaper", "Printer out of paper."),
29: ("ERRwrite", "Write fault."),
30: ("ERRread", "Read fault."),
31: ("ERRgeneral", "General failure."),
32: ("ERRbadshare", "An open conflicts with an existing open."),
33: ("ERRlock", "A Lock request conflicted with an existing lock or specified an invalid mode, or an Unlock requested attempted to remove a lock held by another process."),
34: ("ERRwrongdisk", "The wrong disk was found in a drive."),
35: ("ERRFCBUnavail", "No FCBs are available to process request."),
36: ("ERRsharebufexc", "A sharing buffer has been exceeded.")
}
dos_msgs = {
ERRbadfunc: ("ERRbadfunc", "Invalid function."),
ERRbadfile: ("ERRbadfile", "File not found."),
ERRbadpath: ("ERRbadpath", "Directory invalid."),
ERRnofids: ("ERRnofids", "No file descriptors available"),
ERRnoaccess: ("ERRnoaccess", "Access denied."),
ERRbadfid: ("ERRbadfid", "Invalid file handle."),
ERRbadmcb: ("ERRbadmcb", "Memory control blocks destroyed."),
ERRnomem: ("ERRnomem", "Insufficient server memory to perform the requested function."),
ERRbadmem: ("ERRbadmem", "Invalid memory block address."),
ERRbadenv: ("ERRbadenv", "Invalid environment."),
11: ("ERRbadformat", "Invalid format."),
ERRbadaccess: ("ERRbadaccess", "Invalid open mode."),
ERRbaddata: ("ERRbaddata", "Invalid data."),
ERRres: ("ERRres", "reserved."),
ERRbaddrive: ("ERRbaddrive", "Invalid drive specified."),
ERRremcd: ("ERRremcd", "A Delete Directory request attempted to remove the server's current directory."),
ERRdiffdevice: ("ERRdiffdevice", "Not same device."),
ERRnofiles: ("ERRnofiles", "A File Search command can find no more files matching the specified criteria."),
ERRbadshare: ("ERRbadshare", "The sharing mode specified for an Open conflicts with existing FIDs on the file."),
ERRlock: ("ERRlock", "A Lock request conflicted with an existing lock or specified an invalid mode, or an Unlock requested attempted to remove a lock held by another process."),
ERRunsup: ("ERRunsup", "The operation is unsupported"),
ERRnosuchshare: ("ERRnosuchshare", "You specified an invalid share name"),
ERRfilexists: ("ERRfilexists", "The file named in a Create Directory, Make New File or Link request already exists."),
ERRinvalidname: ("ERRinvalidname", "Invalid name"),
ERRbadpipe: ("ERRbadpipe", "Pipe invalid."),
ERRpipebusy: ("ERRpipebusy", "All instances of the requested pipe are busy."),
ERRpipeclosing: ("ERRpipeclosing", "Pipe close in progress."),
ERRnotconnected: ("ERRnotconnected", "No process on other end of pipe."),
ERRmoredata: ("ERRmoredata", "There is more data to be returned."),
ERRinvgroup: ("ERRinvgroup", "Invalid workgroup (try the -W option)"),
ERRlogonfailure: ("ERRlogonfailure", "Logon failure"),
ERRdiskfull: ("ERRdiskfull", "Disk full"),
ERRgeneral: ("ERRgeneral", "General failure"),
ERRunknownlevel: ("ERRunknownlevel", "Unknown info level")
}
server_msgs = {
1: ("ERRerror", "Non-specific error code."),
2: ("ERRbadpw", "Bad password - name/password pair in a Tree Connect or Session Setup are invalid."),
3: ("ERRbadtype", "reserved."),
4: ("ERRaccess", "The requester does not have the necessary access rights within the specified context for the requested function. The context is defined by the TID or the UID."),
5: ("ERRinvnid", "The tree ID (TID) specified in a command was invalid."),
6: ("ERRinvnetname", "Invalid network name in tree connect."),
7: ("ERRinvdevice", "Invalid device - printer request made to non-printer connection or non-printer request made to printer connection."),
49: ("ERRqfull", "Print queue full (files) -- returned by open print file."),
50: ("ERRqtoobig", "Print queue full -- no space."),
51: ("ERRqeof", "EOF on print queue dump."),
52: ("ERRinvpfid", "Invalid print file FID."),
64: ("ERRsmbcmd", "The server did not recognize the command received."),
65: ("ERRsrverror","The server encountered an internal error, e.g., system file unavailable."),
67: ("ERRfilespecs", "The file handle (FID) and pathname parameters contained an invalid combination of values."),
68: ("ERRreserved", "reserved."),
69: ("ERRbadpermits", "The access permissions specified for a file or directory are not a valid combination. The server cannot set the requested attribute."),
70: ("ERRreserved", "reserved."),
71: ("ERRsetattrmode", "The attribute mode in the Set File Attribute request is invalid."),
81: ("ERRpaused", "Server is paused."),
82: ("ERRmsgoff", "Not receiving messages."),
83: ("ERRnoroom", "No room to buffer message."),
87: ("ERRrmuns", "Too many remote user names."),
88: ("ERRtimeout", "Operation timed out."),
89: ("ERRnoresource", "No resources currently available for request."),
90: ("ERRtoomanyuids", "Too many UIDs active on this session."),
91: ("ERRbaduid", "The UID is not known as a valid ID on this session."),
250: ("ERRusempx","Temp unable to support Raw, use MPX mode."),
251: ("ERRusestd","Temp unable to support Raw, use standard read/write."),
252: ("ERRcontmpx", "Continue in MPX mode."),
253: ("ERRreserved", "reserved."),
254: ("ERRreserved", "reserved."),
0xFFFF: ("ERRnosupport", "Function not supported.")
}
# Error clases
ERRDOS = 0x1
error_classes = { 0: ("SUCCESS", {}),
ERRDOS: ("ERRDOS", dos_msgs),
0x02: ("ERRSRV",server_msgs),
0x03: ("ERRHRD",hard_msgs),
0x04: ("ERRXOS", {} ),
0xE1: ("ERRRMX1", {} ),
0xE2: ("ERRRMX2", {} ),
0xE3: ("ERRRMX3", {} ),
0xFF: ("ERRCMD", {} ) }
def __init__( self, error_string, error_class, error_code, nt_status = 0):
Exception.__init__(self, error_string)
self.nt_status = nt_status
self._args = error_string
if nt_status:
self.error_class = 0
self.error_code = (error_code << 16) + error_class
else:
self.error_class = error_class
self.error_code = error_code
def get_error_class( self ):
return self.error_class
def get_error_code( self ):
return self.error_code
def __str__( self ):
error_class = SessionError.error_classes.get( self.error_class, None )
if not error_class:
error_code_str = self.error_code
error_class_str = self.error_class
else:
error_class_str = error_class[0]
error_code = error_class[1].get( self.error_code, None )
if not error_code:
error_code_str = self.error_code
else:
error_code_str = '%s(%s)' % error_code
if self.nt_status:
return 'SMB SessionError: %s(%s)' % nt_errors.ERROR_MESSAGES[self.error_code]
else:
# Fall back to the old format
return 'SMB SessionError: class: %s, code: %s' % (error_class_str, error_code_str)
# Raised when an supported feature is present/required in the protocol but is not
# currently supported by pysmb
class UnsupportedFeature(Exception): pass
# Contains information about a SMB shared device/service
class SharedDevice:
def __init__(self, name, share_type, comment):
self.__name = name
self.__type = share_type
self.__comment = comment
def get_name(self):
return self.__name
def get_type(self):
return self.__type
def get_comment(self):
return self.__comment
def __repr__(self):
return '<SharedDevice instance: name=' + self.__name + ', type=' + str(self.__type) + ', comment="' + self.__comment + '">'
# Contains information about the shared file/directory
class SharedFile:
def __init__(self, ctime, atime, mtime, filesize, allocsize, attribs, shortname, longname):
self.__ctime = ctime
self.__atime = atime
self.__mtime = mtime
self.__filesize = filesize
self.__allocsize = allocsize
self.__attribs = attribs
try:
self.__shortname = shortname[:string.index(shortname, '\0')]
except ValueError:
self.__shortname = shortname
try:
self.__longname = longname[:string.index(longname, '\0')]
except ValueError:
self.__longname = longname
def get_ctime(self):
return self.__ctime
def get_ctime_epoch(self):
return self.__convert_smbtime(self.__ctime)
def get_mtime(self):
return self.__mtime
def get_mtime_epoch(self):
return self.__convert_smbtime(self.__mtime)
def get_atime(self):
return self.__atime
def get_atime_epoch(self):
return self.__convert_smbtime(self.__atime)
def get_filesize(self):
return self.__filesize
def get_allocsize(self):
return self.__allocsize
def get_attributes(self):
return self.__attribs
def is_archive(self):
return self.__attribs & ATTR_ARCHIVE
def is_compressed(self):
return self.__attribs & ATTR_COMPRESSED
def is_normal(self):
return self.__attribs & ATTR_NORMAL
def is_hidden(self):
return self.__attribs & ATTR_HIDDEN
def is_readonly(self):
return self.__attribs & ATTR_READONLY
def is_temporary(self):
return self.__attribs & ATTR_TEMPORARY
def is_directory(self):
return self.__attribs & ATTR_DIRECTORY
def is_system(self):
return self.__attribs & ATTR_SYSTEM
def get_shortname(self):
return self.__shortname
def get_longname(self):
return self.__longname
def __repr__(self):
return '<SharedFile instance: shortname="' + self.__shortname + '", longname="' + self.__longname + '", filesize=' + str(self.__filesize) + '>'
@staticmethod
def __convert_smbtime(t):
x = t >> 32
y = t & 0xffffffff
geo_cal_offset = 11644473600.0 # = 369.0 * 365.25 * 24 * 60 * 60 - (3.0 * 24 * 60 * 60 + 6.0 * 60 * 60)
return (x * 4.0 * (1 << 30) + (y & 0xfff00000)) * 1.0e-7 - geo_cal_offset
# Contain information about a SMB machine
class SMBMachine:
def __init__(self, nbname, nbt_type, comment):
self.__nbname = nbname
self.__type = nbt_type
self.__comment = comment
def __repr__(self):
return '<SMBMachine instance: nbname="' + self.__nbname + '", type=' + hex(self.__type) + ', comment="' + self.__comment + '">'
class SMBDomain:
def __init__(self, nbgroup, domain_type, master_browser):
self.__nbgroup = nbgroup
self.__type = domain_type
self.__master_browser = master_browser
def __repr__(self):
return '<SMBDomain instance: nbgroup="' + self.__nbgroup + '", type=' + hex(self.__type) + ', master browser="' + self.__master_browser + '">'
# Represents a SMB Packet
class NewSMBPacket(Structure):
structure = (
('Signature', '"\xffSMB'),
('Command','B=0'),
('ErrorClass','B=0'),
('_reserved','B=0'),
('ErrorCode','<H=0'),
('Flags1','B=0'),
('Flags2','<H=0'),
('PIDHigh','<H=0'),
('SecurityFeatures','8s=""'),
('Reserved','<H=0'),
('Tid','<H=0xffff'),
('Pid','<H=0'),
('Uid','<H=0'),
('Mid','<H=0'),
('Data','*:'),
)
def __init__(self, **kargs):
Structure.__init__(self, **kargs)
if ('Flags2' in self.fields) is False:
self['Flags2'] = 0
if ('Flags1' in self.fields) is False:
self['Flags1'] = 0
if 'data' not in kargs:
self['Data'] = []
def addCommand(self, command):
if len(self['Data']) == 0:
self['Command'] = command.command
else:
self['Data'][-1]['Parameters']['AndXCommand'] = command.command
self['Data'][-1]['Parameters']['AndXOffset'] = len(self)
self['Data'].append(command)
def isMoreData(self):
return (self['Command'] in [SMB.SMB_COM_TRANSACTION, SMB.SMB_COM_READ_ANDX, SMB.SMB_COM_READ_RAW] and
self['ErrorClass'] == 1 and self['ErrorCode'] == SessionError.ERRmoredata)
def isMoreProcessingRequired(self):
return self['ErrorClass'] == 0x16 and self['ErrorCode'] == 0xc000
def isValidAnswer(self, cmd):
# this was inside a loop reading more from the net (with recv_packet(None))
if self['Command'] == cmd:
if (self['ErrorClass'] == 0x00 and
self['ErrorCode'] == 0x00):
return 1
elif self.isMoreData():
return 1
elif self.isMoreProcessingRequired():
return 1
raise SessionError("SMB Library Error", self['ErrorClass'] + (self['_reserved'] << 8), self['ErrorCode'], self['Flags2'] & SMB.FLAGS2_NT_STATUS)
else:
raise UnsupportedFeature("Unexpected answer from server: Got %d, Expected %d" % (self['Command'], cmd))
class SMBCommand(Structure):
structure = (
('WordCount', 'B=len(Parameters)/2'),
('_ParametersLength','_-Parameters','WordCount*2'),
('Parameters',':'), # default set by constructor
('ByteCount','<H-Data'),
('Data',':'), # default set by constructor
)
def __init__(self, commandOrData = None, data = None, **kargs):
if type(commandOrData) == type(0):
self.command = commandOrData
else:
data = data or commandOrData
Structure.__init__(self, data = data, **kargs)
if data is None:
self['Parameters'] = ''
self['Data'] = ''
class AsciiOrUnicodeStructure(Structure):
UnicodeStructure = ()
AsciiStructure = ()
def __init__(self, flags = 0, **kargs):
if flags & SMB.FLAGS2_UNICODE:
self.structure = self.UnicodeStructure
else:
self.structure = self.AsciiStructure
Structure.__init__(self, **kargs)
class SMBCommand_Parameters(Structure):
pass
class SMBAndXCommand_Parameters(Structure):
commonHdr = (
('AndXCommand','B=0xff'),
('_reserved','B=0'),
('AndXOffset','<H=0'),
)
structure = ( # default structure, overriden by subclasses
('Data',':=""'),
)
############# TRANSACTIONS RELATED
# TRANS2_QUERY_FS_INFORMATION
# QUERY_FS Information Levels
# SMB_QUERY_FS_ATTRIBUTE_INFO
class SMBQueryFsAttributeInfo(Structure):
structure = (
('FileSystemAttributes','<L'),
('MaxFilenNameLengthInBytes','<L'),
('LengthOfFileSystemName','<L-FileSystemName'),
('FileSystemName',':'),
)
class SMBQueryFsInfoVolume(AsciiOrUnicodeStructure):
commonHdr = (
('ulVolSerialNbr','<L=0xABCDEFAA'),
('cCharCount','<B-VolumeLabel'),
)
AsciiStructure = (
('VolumeLabel','z'),
)
UnicodeStructure = (
('VolumeLabel','u'),
)
# FILE_FS_SIZE_INFORMATION
class FileFsSizeInformation(Structure):
structure = (
('TotalAllocationUnits','<q=148529400'),
('AvailableAllocationUnits','<q=14851044'),
('SectorsPerAllocationUnit','<L=2'),
('BytesPerSector','<L=512'),
)
# SMB_QUERY_FS_SIZE_INFO
class SMBQueryFsSizeInfo(Structure):
structure = (
('TotalAllocationUnits','<q=148529400'),
('TotalFreeAllocationUnits','<q=14851044'),
('SectorsPerAllocationUnit','<L=2'),
('BytesPerSector','<L=512'),
)
# FILE_FS_FULL_SIZE_INFORMATION
class SMBFileFsFullSizeInformation(Structure):
structure = (
('TotalAllocationUnits','<q=148529400'),
('CallerAvailableAllocationUnits','<q=148529400'),
('ActualAvailableAllocationUnits','<q=148529400'),
('SectorsPerAllocationUnit','<L=15'),
('BytesPerSector','<L=512')
)
# SMB_QUERY_FS_VOLUME_INFO
class SMBQueryFsVolumeInfo(Structure):
structure = (
('VolumeCreationTime','<q'),
('SerialNumber','<L=0xABCDEFAA'),
('VolumeLabelSize','<L=len(VolumeLabel)'),
('Reserved','<H=0x10'),
('VolumeLabel',':')
)
# SMB_FIND_FILE_BOTH_DIRECTORY_INFO level
class SMBFindFileBothDirectoryInfo(AsciiOrUnicodeStructure):
commonHdr = (
('NextEntryOffset','<L=0'),
('FileIndex','<L=0'),
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('EndOfFile','<q=0'),
('AllocationSize','<q=0'),
('ExtFileAttributes','<L=0'),
)
AsciiStructure = (
('FileNameLength','<L-FileName','len(FileName)'),
('EaSize','<L=0'),
('ShortNameLength','<B=0'),
('Reserved','<B=0'),
('ShortName','24s'),
('FileName',':'),
)
UnicodeStructure = (
('FileNameLength','<L-FileName','len(FileName)*2'),
('EaSize','<L=0'),
('ShortNameLength','<B=0'),
('Reserved','<B=0'),
('ShortName','24s'),
('FileName',':'),
)
# SMB_FIND_FILE_ID_FULL_DIRECTORY_INFO level
class SMBFindFileIdFullDirectoryInfo(AsciiOrUnicodeStructure):
commonHdr = (
('NextEntryOffset','<L=0'),
('FileIndex','<L=0'),
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('EndOfFile','<q=0'),
('AllocationSize','<q=0'),
('ExtFileAttributes','<L=0'),
)
AsciiStructure = (
('FileNameLength','<L-FileName','len(FileName)'),
('EaSize','<L=0'),
('FileID','<q=0'),
('FileName',':'),
)
UnicodeStructure = (
('FileNameLength','<L-FileName','len(FileName)*2'),
('EaSize','<L=0'),
('FileID','<q=0'),
('FileName',':'),
)
# SMB_FIND_FILE_ID_BOTH_DIRECTORY_INFO level
class SMBFindFileIdBothDirectoryInfo(AsciiOrUnicodeStructure):
commonHdr = (
('NextEntryOffset','<L=0'),
('FileIndex','<L=0'),
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('EndOfFile','<q=0'),
('AllocationSize','<q=0'),
('ExtFileAttributes','<L=0'),
)
AsciiStructure = (
('FileNameLength','<L-FileName','len(FileName)'),
('EaSize','<L=0'),
('ShortNameLength','<B=0'),
('Reserved','<B=0'),
('ShortName','24s'),
('Reserved','<H=0'),
('FileID','<q=0'),
('FileName','z'),
)
UnicodeStructure = (
('FileNameLength','<L-FileName','len(FileName)*2'),
('EaSize','<L=0'),
('ShortNameLength','<B=0'),
('Reserved','<B=0'),
('ShortName','24s'),
('Reserved','<H=0'),
('FileID','<q=0'),
('FileName',':'),
)
# SMB_FIND_FILE_DIRECTORY_INFO level
class SMBFindFileDirectoryInfo(AsciiOrUnicodeStructure):
commonHdr = (
('NextEntryOffset','<L=0'),
('FileIndex','<L=0'),
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('EndOfFile','<q=0'),
('AllocationSize','<q=1'),
('ExtFileAttributes','<L=0'),
)
AsciiStructure = (
('FileNameLength','<L-FileName','len(FileName)'),
('FileName','z'),
)
UnicodeStructure = (
('FileNameLength','<L-FileName','len(FileName)*2'),
('FileName',':'),
)
# SMB_FIND_FILE_NAMES_INFO level
class SMBFindFileNamesInfo(AsciiOrUnicodeStructure):
commonHdr = (
('NextEntryOffset','<L=0'),
('FileIndex','<L=0'),
)
AsciiStructure = (
('FileNameLength','<L-FileName','len(FileName)'),
('FileName','z'),
)
UnicodeStructure = (
('FileNameLength','<L-FileName','len(FileName)*2'),
('FileName',':'),
)
# SMB_FIND_FILE_FULL_DIRECTORY_INFO level
class SMBFindFileFullDirectoryInfo(AsciiOrUnicodeStructure):
commonHdr = (
('NextEntryOffset','<L=0'),
('FileIndex','<L=0'),
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('EndOfFile','<q=0'),
('AllocationSize','<q=1'),
('ExtFileAttributes','<L=0'),
)
AsciiStructure = (
('FileNameLength','<L-FileName','len(FileName)'),
('EaSize','<L'),
('FileName','z'),
)
UnicodeStructure = (
('FileNameLength','<L-FileName','len(FileName)*2'),
('EaSize','<L'),
('FileName',':'),
)
# SMB_FIND_INFO_STANDARD level
class SMBFindInfoStandard(AsciiOrUnicodeStructure):
commonHdr = (
('ResumeKey','<L=0xff'),
('CreationDate','<H=0'),
('CreationTime','<H=0'),
('LastAccessDate','<H=0'),
('LastAccessTime','<H=0'),
('LastWriteDate','<H=0'),
('LastWriteTime','<H=0'),
('EaSize','<L'),
('AllocationSize','<L=1'),
('ExtFileAttributes','<H=0'),
)
AsciiStructure = (
('FileNameLength','<B-FileName','len(FileName)'),
('FileName','z'),
)
UnicodeStructure = (
('FileNameLength','<B-FileName','len(FileName)*2'),
('FileName',':'),
)
# SET_FILE_INFORMATION structures
# SMB_SET_FILE_DISPOSITION_INFO
class SMBSetFileDispositionInfo(Structure):
structure = (
('DeletePending','<B'),
)
# SMB_SET_FILE_BASIC_INFO
class SMBSetFileBasicInfo(Structure):
structure = (
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('ChangeTime','<q'),
('ExtFileAttributes','<H'),
('Reserved','<L'),
)
# FILE_STREAM_INFORMATION
class SMBFileStreamInformation(Structure):
commonHdr = (
('NextEntryOffset','<L=0'),
('StreamNameLength','<L=0'),
('StreamSize','<q=0'),
('StreamAllocationSize','<q=0'),
('StreamName',':=""'),
)
# FILE_NETWORK_OPEN_INFORMATION
class SMBFileNetworkOpenInfo(Structure):
structure = (
('CreationTime','<q=0'),
('LastAccessTime','<q=0'),
('LastWriteTime','<q=0'),
('ChangeTime','<q=0'),
('AllocationSize','<q=0'),
('EndOfFile','<q=0'),
('FileAttributes','<L=0'),
('Reserved','<L=0'),
)
# SMB_SET_FILE_END_OF_FILE_INFO
class SMBSetFileEndOfFileInfo(Structure):
structure = (
('EndOfFile','<q'),
)
# TRANS2_FIND_NEXT2
class SMBFindNext2_Parameters(AsciiOrUnicodeStructure):
commonHdr = (
('SID','<H'),
('SearchCount','<H'),
('InformationLevel','<H'),
('ResumeKey','<L'),
('Flags','<H'),
)
AsciiStructure = (
('FileName','z'),
)
UnicodeStructure = (
('FileName','u'),
)
class SMBFindNext2Response_Parameters(Structure):
structure = (
('SearchCount','<H'),
('EndOfSearch','<H=1'),
('EaErrorOffset','<H=0'),
('LastNameOffset','<H=0'),
)
class SMBFindNext2_Data(Structure):
structure = (
('GetExtendedAttributesListLength','_-GetExtendedAttributesList', 'self["GetExtendedAttributesListLength"]'),
('GetExtendedAttributesList',':'),
)
# TRANS2_FIND_FIRST2
class SMBFindFirst2Response_Parameters(Structure):
structure = (
('SID','<H'),
('SearchCount','<H'),
('EndOfSearch','<H=1'),
('EaErrorOffset','<H=0'),
('LastNameOffset','<H=0'),
)
class SMBFindFirst2_Parameters(AsciiOrUnicodeStructure):
commonHdr = (
('SearchAttributes','<H'),
('SearchCount','<H'),
('Flags','<H'),
('InformationLevel','<H'),
('SearchStorageType','<L'),
)
AsciiStructure = (
('FileName','z'),
)
UnicodeStructure = (
('FileName','u'),
)
class SMBFindFirst2_Data(Structure):
structure = (
('GetExtendedAttributesListLength','_-GetExtendedAttributesList', 'self["GetExtendedAttributesListLength"]'),
('GetExtendedAttributesList',':'),
)
# TRANS2_SET_PATH_INFORMATION
class SMBSetPathInformation_Parameters(AsciiOrUnicodeStructure):
commonHdr = (
('InformationLevel','<H'),
('Reserved','<L'),
)
AsciiStructure = (
('FileName','z'),
)
UnicodeStructure = (
('FileName','u'),
)
class SMBSetPathInformationResponse_Parameters(Structure):
structure = (
('EaErrorOffset','<H=0'),
)
# TRANS2_SET_FILE_INFORMATION
class SMBSetFileInformation_Parameters(Structure):
structure = (
('FID','<H'),
('InformationLevel','<H'),
('Reserved','<H'),
)
class SMBSetFileInformationResponse_Parameters(Structure):
structure = (
('EaErrorOffset','<H=0'),
)
# TRANS2_QUERY_FILE_INFORMATION
class SMBQueryFileInformation_Parameters(Structure):
structure = (
('FID','<H'),
('InformationLevel','<H'),
)
class SMBQueryFileInformationResponse_Parameters(Structure):
structure = (
('EaErrorOffset','<H=0'),
)
class SMBQueryFileInformation_Data(Structure):
structure = (
('GetExtendedAttributeList',':'),
)
# TRANS2_QUERY_PATH_INFORMATION
class SMBQueryPathInformationResponse_Parameters(Structure):
structure = (
('EaErrorOffset','<H=0'),
)
class SMBQueryPathInformation_Parameters(AsciiOrUnicodeStructure):
commonHdr = (
('InformationLevel','<H'),
('Reserved','<L=0'),
)
AsciiStructure = (
('FileName','z'),
)
UnicodeStructure = (
('FileName','u'),
)
class SMBQueryPathInformation_Data(Structure):
structure = (
('GetExtendedAttributeList',':'),
)
# SMB_QUERY_FILE_EA_INFO
class SMBQueryFileEaInfo(Structure):
structure = (
('EaSize','<L=0'),
)
# SMB_QUERY_FILE_BASIC_INFO
class SMBQueryFileBasicInfo(Structure):
structure = (
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('ExtFileAttributes','<L'),
#('Reserved','<L=0'),
)
# SMB_QUERY_FILE_STANDARD_INFO
class SMBQueryFileStandardInfo(Structure):
structure = (
('AllocationSize','<q'),
('EndOfFile','<q'),
('NumberOfLinks','<L=0'),
('DeletePending','<B=0'),
('Directory','<B'),
)
# SMB_QUERY_FILE_ALL_INFO
class SMBQueryFileAllInfo(Structure):
structure = (
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('ExtFileAttributes','<L'),
('Reserved','<L=0'),
('AllocationSize','<q'),
('EndOfFile','<q'),
('NumberOfLinks','<L=0'),
('DeletePending','<B=0'),
('Directory','<B'),
('Reserved','<H=0'),
('EaSize','<L=0'),
('FileNameLength','<L-FileName','len(FileName)'),
('FileName',':'),
)
# \PIPE\LANMAN NetShareEnum
class SMBNetShareEnum(Structure):
structure = (
('RAPOpcode','<H=0'),
('ParamDesc','z'),
('DataDesc','z'),
('InfoLevel','<H'),
('ReceiveBufferSize','<H'),
)
class SMBNetShareEnumResponse(Structure):
structure = (
('Status','<H=0'),
('Convert','<H=0'),
('EntriesReturned','<H'),
('EntriesAvailable','<H'),
)
class NetShareInfo1(Structure):
structure = (
('NetworkName','13s'),
('Pad','<B=0'),
('Type','<H=0'),
('RemarkOffsetLow','<H=0'),
('RemarkOffsetHigh','<H=0'),
)
# \PIPE\LANMAN NetServerGetInfo
class SMBNetServerGetInfoResponse(Structure):
structure = (
('Status','<H=0'),
('Convert','<H=0'),
('TotalBytesAvailable','<H'),
)
class SMBNetServerInfo1(Structure):
# Level 1 Response
structure = (
('ServerName','16s'),
('MajorVersion','B=5'),
('MinorVersion','B=0'),
('ServerType','<L=3'),
('ServerCommentLow','<H=0'),
('ServerCommentHigh','<H=0'),
)
# \PIPE\LANMAN NetShareGetInfo
class SMBNetShareGetInfo(Structure):
structure = (
('RAPOpcode','<H=0'),
('ParamDesc','z'),
('DataDesc','z'),
('ShareName','z'),
('InfoLevel','<H'),
('ReceiveBufferSize','<H'),
)
class SMBNetShareGetInfoResponse(Structure):
structure = (
('Status','<H=0'),
('Convert','<H=0'),
('TotalBytesAvailable','<H'),
)
############# Security Features
class SecurityFeatures(Structure):
structure = (
('Key','<L=0'),
('CID','<H=0'),
('SequenceNumber','<H=0'),
)
############# SMB_COM_QUERY_INFORMATION2 (0x23)
class SMBQueryInformation2_Parameters(Structure):
structure = (
('Fid','<H'),
)
class SMBQueryInformation2Response_Parameters(Structure):
structure = (
('CreateDate','<H'),
('CreationTime','<H'),
('LastAccessDate','<H'),
('LastAccessTime','<H'),
('LastWriteDate','<H'),
('LastWriteTime','<H'),
('FileDataSize','<L'),
('FileAllocationSize','<L'),
('FileAttributes','<L'),
)
############# SMB_COM_SESSION_SETUP_ANDX (0x73)
class SMBSessionSetupAndX_Parameters(SMBAndXCommand_Parameters):
structure = (
('MaxBuffer','<H'),
('MaxMpxCount','<H'),
('VCNumber','<H'),
('SessionKey','<L'),
('AnsiPwdLength','<H'),
('UnicodePwdLength','<H'),
('_reserved','<L=0'),
('Capabilities','<L'),
)
class SMBSessionSetupAndX_Extended_Parameters(SMBAndXCommand_Parameters):
structure = (
('MaxBufferSize','<H'),
('MaxMpxCount','<H'),
('VcNumber','<H'),
('SessionKey','<L'),
('SecurityBlobLength','<H'),
('Reserved','<L=0'),
('Capabilities','<L'),
)
class SMBSessionSetupAndX_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('AnsiPwdLength','_-AnsiPwd','self["AnsiPwdLength"]'),
('UnicodePwdLength','_-UnicodePwd','self["UnicodePwdLength"]'),
('AnsiPwd',':=""'),
('UnicodePwd',':=""'),
('Account','z=""'),
('PrimaryDomain','z=""'),
('NativeOS','z=""'),
('NativeLanMan','z=""'),
)
UnicodeStructure = (
('AnsiPwdLength','_-AnsiPwd','self["AnsiPwdLength"]'),
('UnicodePwdLength','_-UnicodePwd','self["UnicodePwdLength"]'),
('AnsiPwd',':=""'),
('UnicodePwd',':=""'),
('Account','u=""'),
('PrimaryDomain','u=""'),
('NativeOS','u=""'),
('NativeLanMan','u=""'),
)
class SMBSessionSetupAndX_Extended_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('SecurityBlobLength','_-SecurityBlob','self["SecurityBlobLength"]'),
('SecurityBlob',':'),
('NativeOS','z=""'),
('NativeLanMan','z=""'),
)
UnicodeStructure = (
('SecurityBlobLength','_-SecurityBlob','self["SecurityBlobLength"]'),
('SecurityBlob',':'),
('NativeOS','u=""'),
('NativeLanMan','u=""'),
)
class SMBSessionSetupAndXResponse_Parameters(SMBAndXCommand_Parameters):
structure = (
('Action','<H'),
)
class SMBSessionSetupAndX_Extended_Response_Parameters(SMBAndXCommand_Parameters):
structure = (
('Action','<H=0'),
('SecurityBlobLength','<H'),
)
class SMBSessionSetupAndXResponse_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('NativeOS','z=""'),
('NativeLanMan','z=""'),
('PrimaryDomain','z=""'),
)
UnicodeStructure = (
('NativeOS','u=""'),
('NativeLanMan','u=""'),
('PrimaryDomain','u=""'),
)
class SMBSessionSetupAndX_Extended_Response_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('SecurityBlobLength','_-SecurityBlob','self["SecurityBlobLength"]'),
('SecurityBlob',':'),
('NativeOS','z=""'),
('NativeLanMan','z=""'),
)
UnicodeStructure = (
('SecurityBlobLength','_-SecurityBlob','self["SecurityBlobLength"]'),
('SecurityBlob',':'),
('NativeOS','u=""'),
('NativeLanMan','u=""'),
)
############# SMB_COM_TREE_CONNECT (0x70)
class SMBTreeConnect_Parameters(SMBCommand_Parameters):
structure = (
)
class SMBTreeConnect_Data(SMBCommand_Parameters):
structure = (
('PathFormat','"\x04'),
('Path','z'),
('PasswordFormat','"\x04'),
('Password','z'),
('ServiceFormat','"\x04'),
('Service','z'),
)
############# SMB_COM_TREE_CONNECT_ANDX (0x75)
class SMBTreeConnectAndX_Parameters(SMBAndXCommand_Parameters):
structure = (
('Flags','<H=0'),
('PasswordLength','<H'),
)
class SMBTreeConnectAndXResponse_Parameters(SMBAndXCommand_Parameters):
structure = (
('OptionalSupport','<H=0'),
)
class SMBTreeConnectAndXExtendedResponse_Parameters(SMBAndXCommand_Parameters):
structure = (
('OptionalSupport','<H=1'),
('MaximalShareAccessRights','<L=0x1fffff'),
('GuestMaximalShareAccessRights','<L=0x1fffff'),
)
class SMBTreeConnectAndX_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('_PasswordLength','_-Password','self["_PasswordLength"]'),
('Password',':'),
('Path','z'),
('Service','z'),
)
UnicodeStructure = (
('_PasswordLength','_-Password','self["_PasswordLength"] if self["_PasswordLength"] > 0 else 1'),
('Password',':'),
('Path','u'),
('Service','z'),
)
class SMBTreeConnectAndXResponse_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('Service','z'),
('PadLen','_-Pad','self["PadLen"]'),
('Pad',':=""'),
('NativeFileSystem','z'),
)
UnicodeStructure = (
('Service','z'),
('PadLen','_-Pad','self["PadLen"]'),
('Pad',':=""'),
('NativeFileSystem','u'),
)
############# SMB_COM_NT_CREATE_ANDX (0xA2)
class SMBNtCreateAndX_Parameters(SMBAndXCommand_Parameters):
structure = (
('_reserved', 'B=0'),
('FileNameLength','<H'), # NameLength
('CreateFlags','<L'), # Flags
('RootFid','<L=0'), # RootDirectoryFID
('AccessMask','<L'), # DesiredAccess
('AllocationSizeLo','<L=0'), # AllocationSize
('AllocationSizeHi','<L=0'),
('FileAttributes','<L=0'), # ExtFileAttributes
('ShareAccess','<L=3'), #
('Disposition','<L=1'), # CreateDisposition
('CreateOptions','<L'), # CreateOptions
('Impersonation','<L=2'),
('SecurityFlags','B=3'),
)
class SMBNtCreateAndXResponse_Parameters(SMBAndXCommand_Parameters):
# XXX Is there a memory leak in the response for NTCreate (where the Data section would be) in Win 2000, Win XP, and Win 2003?
structure = (
('OplockLevel', 'B=0'),
('Fid','<H'),
('CreateAction','<L'),
('CreateTime','<q=0'),
('LastAccessTime','<q=0'),
('LastWriteTime','<q=0'),
('LastChangeTime','<q=0'),
('FileAttributes','<L=0x80'),
('AllocationSize','<q=0'),
('EndOfFile','<q=0'),
('FileType','<H=0'),
('IPCState','<H=0'),
('IsDirectory','B'),
)
class SMBNtCreateAndXExtendedResponse_Parameters(SMBAndXCommand_Parameters):
# [MS-SMB] Extended response description
structure = (
('OplockLevel', 'B=0'),
('Fid','<H'),
('CreateAction','<L'),
('CreateTime','<q=0'),
('LastAccessTime','<q=0'),
('LastWriteTime','<q=0'),
('LastChangeTime','<q=0'),
('FileAttributes','<L=0x80'),
('AllocationSize','<q=0'),
('EndOfFile','<q=0'),
('FileType','<H=0'),
('IPCState','<H=0'),
('IsDirectory','B'),
('VolumeGUID','16s'),
('FileIdLow','<L=0'),
('FileIdHigh','<L=0'),
('MaximalAccessRights','<L=0x12019b'),
('GuestMaximalAccessRights','<L=0x120089'),
)
class SMBNtCreateAndX_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('FileName','z'),
)
UnicodeStructure = (
('Pad','B'),
('FileName','u'),
)
############# SMB_COM_OPEN_ANDX (0xD2)
class SMBOpenAndX_Parameters(SMBAndXCommand_Parameters):
structure = (
('Flags','<H=0'),
('DesiredAccess','<H=0'),
('SearchAttributes','<H=0'),
('FileAttributes','<H=0'),
('CreationTime','<L=0'),
('OpenMode','<H=1'), # SMB_O_OPEN = 1
('AllocationSize','<L=0'),
('Reserved','8s=""'),
)
class SMBOpenAndX_Data(SMBNtCreateAndX_Data):
pass
class SMBOpenAndXResponse_Parameters(SMBAndXCommand_Parameters):
structure = (
('Fid','<H=0'),
('FileAttributes','<H=0'),
('LastWriten','<L=0'),
('FileSize','<L=0'),
('GrantedAccess','<H=0'),
('FileType','<H=0'),
('IPCState','<H=0'),
('Action','<H=0'),
('ServerFid','<L=0'),
('_reserved','<H=0'),
)
############# SMB_COM_WRITE (0x0B)
class SMBWrite_Parameters(SMBCommand_Parameters):
structure = (
('Fid','<H'),
('Count','<H'),
('Offset','<L'),
('Remaining','<H'),
)
class SMBWriteResponse_Parameters(SMBCommand_Parameters):
structure = (
('Count','<H'),
)
class SMBWrite_Data(Structure):
structure = (
('BufferFormat','<B=1'),
('DataLength','<H-Data'),
('Data',':'),
)
############# SMB_COM_WRITE_ANDX (0x2F)
class SMBWriteAndX_Parameters(SMBAndXCommand_Parameters):
structure = (
('Fid','<H=0'),
('Offset','<L=0'),
('_reserved','<L=0xff'),
('WriteMode','<H=8'),
('Remaining','<H=0'),
('DataLength_Hi','<H=0'),
('DataLength','<H=0'),
('DataOffset','<H=0'),
('HighOffset','<L=0'),
)
class SMBWriteAndX_Data_Short(Structure):
structure = (
('_PadLen','_-Pad','self["DataOffset"] - 59'),
('Pad',':'),
#('Pad','<B=0'),
('DataLength','_-Data','self["DataLength"]'),
('Data',':'),
)
class SMBWriteAndX_Data(Structure):
structure = (
('_PadLen','_-Pad','self["DataOffset"] - 63'),
('Pad',':'),
#('Pad','<B=0'),
('DataLength','_-Data','self["DataLength"]'),
('Data',':'),
)
class SMBWriteAndX_Parameters_Short(SMBAndXCommand_Parameters):
structure = (
('Fid','<H'),
('Offset','<L'),
('_reserved','<L=0xff'),
('WriteMode','<H=8'),
('Remaining','<H'),
('DataLength_Hi','<H=0'),
('DataLength','<H'),
('DataOffset','<H=0'),
)
class SMBWriteAndXResponse_Parameters(SMBAndXCommand_Parameters):
structure = (
('Count','<H'),
('Available','<H'),
('Reserved','<L=0'),
)
############# SMB_COM_WRITE_RAW (0x1D)
class SMBWriteRaw_Parameters(SMBCommand_Parameters):
structure = (
('Fid','<H'),
('Count','<H'),
('_reserved','<H=0'),
('Offset','<L'),
('Timeout','<L=0'),
('WriteMode','<H=0'),
('_reserved2','<L=0'),
('DataLength','<H'),
('DataOffset','<H=0'),
)
############# SMB_COM_READ (0x0A)
class SMBRead_Parameters(SMBCommand_Parameters):
structure = (
('Fid','<H'),
('Count','<H'),
('Offset','<L'),
('Remaining','<H=Count'),
)
class SMBReadResponse_Parameters(Structure):
structure = (
('Count','<H=0'),
('_reserved','8s=""'),
)
class SMBReadResponse_Data(Structure):
structure = (
('BufferFormat','<B=0x1'),
('DataLength','<H-Data'),
('Data',':'),
)
############# SMB_COM_READ_RAW (0x1A)
class SMBReadRaw_Parameters(SMBCommand_Parameters):
structure = (
('Fid','<H'),
('Offset','<L'),
('MaxCount','<H'),
('MinCount','<H=MaxCount'),
('Timeout','<L=0'),
('_reserved','<H=0'),
)
############# SMB_COM_NT_TRANSACT (0xA0)
class SMBNTTransaction_Parameters(SMBCommand_Parameters):
structure = (
('MaxSetupCount','<B=0'),
('Reserved1','<H=0'),
('TotalParameterCount','<L'),
('TotalDataCount','<L'),
('MaxParameterCount','<L=1024'),
('MaxDataCount','<L=65504'),
('ParameterCount','<L'),
('ParameterOffset','<L'),
('DataCount','<L'),
('DataOffset','<L'),
('SetupCount','<B=len(Setup)/2'),
('Function','<H=0'),
('SetupLength','_-Setup','SetupCount*2'),
('Setup',':'),
)
class SMBNTTransactionResponse_Parameters(SMBCommand_Parameters):
structure = (
('Reserved1','3s=""'),
('TotalParameterCount','<L'),
('TotalDataCount','<L'),
('ParameterCount','<L'),
('ParameterOffset','<L'),
('ParameterDisplacement','<L=0'),
('DataCount','<L'),
('DataOffset','<L'),
('DataDisplacement','<L=0'),
('SetupCount','<B=0'),
('SetupLength','_-Setup','SetupCount*2'),
('Setup',':'),
)
class SMBNTTransaction_Data(Structure):
structure = (
('Pad1Length','_-Pad1','self["Pad1Length"]'),
('Pad1',':'),
('NT_Trans_ParametersLength','_-NT_Trans_Parameters','self["NT_Trans_ParametersLength"]'),
('NT_Trans_Parameters',':'),
('Pad2Length','_-Pad2','self["Pad2Length"]'),
('Pad2',':'),
('NT_Trans_DataLength','_-NT_Trans_Data','self["NT_Trans_DataLength"]'),
('NT_Trans_Data',':'),
)
class SMBNTTransactionResponse_Data(Structure):
structure = (
('Pad1Length','_-Pad1','self["Pad1Length"]'),
('Pad1',':'),
('Trans_ParametersLength','_-Trans_Parameters','self["Trans_ParametersLength"]'),
('Trans_Parameters',':'),
('Pad2Length','_-Pad2','self["Pad2Length"]'),
('Pad2',':'),
('Trans_DataLength','_-Trans_Data','self["Trans_DataLength"]'),
('Trans_Data',':'),
)
############# SMB_COM_TRANSACTION2_SECONDARY (0x33)
class SMBTransaction2Secondary_Parameters(SMBCommand_Parameters):
structure = (
('TotalParameterCount','<H'),
('TotalDataCount','<H'),
('ParameterCount','<H'),
('ParameterOffset','<H'),
('DataCount','<H'),
('DataOffset','<H'),
('DataDisplacement','<H=0'),
('FID','<H'),
)
class SMBTransaction2Secondary_Data(Structure):
structure = (
('Pad1Length','_-Pad1','self["Pad1Length"]'),
('Pad1',':'),
('Trans_ParametersLength','_-Trans_Parameters','self["Trans_ParametersLength"]'),
('Trans_Parameters',':'),
('Pad2Length','_-Pad2','self["Pad2Length"]'),
('Pad2',':'),
('Trans_DataLength','_-Trans_Data','self["Trans_DataLength"]'),
('Trans_Data',':'),
)
############# SMB_COM_TRANSACTION2 (0x32)
class SMBTransaction2_Parameters(SMBCommand_Parameters):
structure = (
('TotalParameterCount','<H'),
('TotalDataCount','<H'),
('MaxParameterCount','<H=1024'),
('MaxDataCount','<H=65504'),
('MaxSetupCount','<B=0'),
('Reserved1','<B=0'),
('Flags','<H=0'),
('Timeout','<L=0'),
('Reserved2','<H=0'),
('ParameterCount','<H'),
('ParameterOffset','<H'),
('DataCount','<H'),
('DataOffset','<H'),
('SetupCount','<B=len(Setup)/2'),
('Reserved3','<B=0'),
('SetupLength','_-Setup','SetupCount*2'),
('Setup',':'),
)
class SMBTransaction2Response_Parameters(SMBCommand_Parameters):
structure = (
('TotalParameterCount','<H'),
('TotalDataCount','<H'),
('Reserved1','<H=0'),
('ParameterCount','<H'),
('ParameterOffset','<H'),
('ParameterDisplacement','<H=0'),
('DataCount','<H'),
('DataOffset','<H'),
('DataDisplacement','<H=0'),
('SetupCount','<B=0'),
('Reserved2','<B=0'),
('SetupLength','_-Setup','SetupCount*2'),
('Setup',':'),
)
class SMBTransaction2_Data(Structure):
structure = (
# ('NameLength','_-Name','1'),
# ('Name',':'),
('Pad1Length','_-Pad1','self["Pad1Length"]'),
('Pad1',':'),
('Trans_ParametersLength','_-Trans_Parameters','self["Trans_ParametersLength"]'),
('Trans_Parameters',':'),
('Pad2Length','_-Pad2','self["Pad2Length"]'),
('Pad2',':'),
('Trans_DataLength','_-Trans_Data','self["Trans_DataLength"]'),
('Trans_Data',':'),
)
class SMBTransaction2Response_Data(Structure):
structure = (
('Pad1Length','_-Pad1','self["Pad1Length"]'),
('Pad1',':'),
('Trans_ParametersLength','_-Trans_Parameters','self["Trans_ParametersLength"]'),
('Trans_Parameters',':'),
('Pad2Length','_-Pad2','self["Pad2Length"]'),
('Pad2',':'),
('Trans_DataLength','_-Trans_Data','self["Trans_DataLength"]'),
('Trans_Data',':'),
)
############# SMB_COM_QUERY_INFORMATION (0x08)
class SMBQueryInformation_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('BufferFormat','B=4'),
('FileName','z'),
)
UnicodeStructure = (
('BufferFormat','B=4'),
('FileName','u'),
)
class SMBQueryInformationResponse_Parameters(Structure):
structure = (
('FileAttributes','<H'),
('LastWriteTime','<L'),
('FileSize','<L'),
('Reserved','"0123456789'),
)
############# SMB_COM_TRANSACTION (0x25)
class SMBTransaction_Parameters(SMBCommand_Parameters):
structure = (
('TotalParameterCount','<H'),
('TotalDataCount','<H'),
('MaxParameterCount','<H=1024'),
('MaxDataCount','<H=65504'),
('MaxSetupCount','<B=0'),
('Reserved1','<B=0'),
('Flags','<H=0'),
('Timeout','<L=0'),
('Reserved2','<H=0'),
('ParameterCount','<H'),
('ParameterOffset','<H'),
('DataCount','<H'),
('DataOffset','<H'),
('SetupCount','<B=len(Setup)/2'),
('Reserved3','<B=0'),
('SetupLength','_-Setup','SetupCount*2'),
('Setup',':'),
)
class SMBTransactionResponse_Parameters(SMBCommand_Parameters):
structure = (
('TotalParameterCount','<H'),
('TotalDataCount','<H'),
('Reserved1','<H=0'),
('ParameterCount','<H'),
('ParameterOffset','<H'),
('ParameterDisplacement','<H=0'),
('DataCount','<H'),
('DataOffset','<H'),
('DataDisplacement','<H=0'),
('SetupCount','<B'),
('Reserved2','<B=0'),
('SetupLength','_-Setup','SetupCount*2'),
('Setup',':'),
)
# TODO: We should merge these both. But this will require fixing
# the instances where this structure is used on the client side
class SMBTransaction_SData(AsciiOrUnicodeStructure):
AsciiStructure = (
('Name','z'),
('Trans_ParametersLength','_-Trans_Parameters'),
('Trans_Parameters',':'),
('Trans_DataLength','_-Trans_Data'),
('Trans_Data',':'),
)
UnicodeStructure = (
('Pad','B'),
('Name','u'),
('Trans_ParametersLength','_-Trans_Parameters'),
('Trans_Parameters',':'),
('Trans_DataLength','_-Trans_Data'),
('Trans_Data',':'),
)
class SMBTransaction_Data(Structure):
structure = (
('NameLength','_-Name'),
('Name',':'),
('Trans_ParametersLength','_-Trans_Parameters'),
('Trans_Parameters',':'),
('Trans_DataLength','_-Trans_Data'),
('Trans_Data',':'),
)
class SMBTransactionResponse_Data(Structure):
structure = (
('Trans_ParametersLength','_-Trans_Parameters'),
('Trans_Parameters',':'),
('Trans_DataLength','_-Trans_Data'),
('Trans_Data',':'),
)
############# SMB_COM_READ_ANDX (0x2E)
class SMBReadAndX_Parameters(SMBAndXCommand_Parameters):
structure = (
('Fid','<H'),
('Offset','<L'),
('MaxCount','<H'),
('MinCount','<H=MaxCount'),
('_reserved','<L=0x0'),
('Remaining','<H=MaxCount'),
('HighOffset','<L=0'),
)
class SMBReadAndX_Parameters2(SMBAndXCommand_Parameters):
structure = (
('Fid','<H'),
('Offset','<L'),
('MaxCount','<H'),
('MinCount','<H=MaxCount'),
('_reserved','<L=0xffffffff'),
('Remaining','<H=MaxCount'),
)
class SMBReadAndXResponse_Parameters(SMBAndXCommand_Parameters):
structure = (
('Remaining','<H=0'),
('DataMode','<H=0'),
('_reserved','<H=0'),
('DataCount','<H'),
('DataOffset','<H'),
('DataCount_Hi','<L'),
('_reserved2','6s=""'),
)
############# SMB_COM_ECHO (0x2B)
class SMBEcho_Data(Structure):
structure = (
('Data',':'),
)
class SMBEcho_Parameters(Structure):
structure = (
('EchoCount','<H'),
)
class SMBEchoResponse_Data(Structure):
structure = (
('Data',':'),
)
class SMBEchoResponse_Parameters(Structure):
structure = (
('SequenceNumber','<H=1'),
)
############# SMB_COM_QUERY_INFORMATION_DISK (0x80)
class SMBQueryInformationDiskResponse_Parameters(Structure):
structure = (
('TotalUnits','<H'),
('BlocksPerUnit','<H'),
('BlockSize','<H'),
('FreeUnits','<H'),
('Reserved','<H=0'),
)
############# SMB_COM_LOGOFF_ANDX (0x74)
class SMBLogOffAndX(SMBAndXCommand_Parameters):
strucure = ()
############# SMB_COM_CLOSE (0x04)
class SMBClose_Parameters(SMBCommand_Parameters):
structure = (
('FID','<H'),
('Time','<L=0'),
)
############# SMB_COM_FLUSH (0x05)
class SMBFlush_Parameters(SMBCommand_Parameters):
structure = (
('FID','<H'),
)
############# SMB_COM_CREATE_DIRECTORY (0x00)
class SMBCreateDirectory_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('BufferFormat','<B=4'),
('DirectoryName','z'),
)
UnicodeStructure = (
('BufferFormat','<B=4'),
('DirectoryName','u'),
)
############# SMB_COM_DELETE (0x06)
class SMBDelete_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('BufferFormat','<B=4'),
('FileName','z'),
)
UnicodeStructure = (
('BufferFormat','<B=4'),
('FileName','u'),
)
class SMBDelete_Parameters(Structure):
structure = (
('SearchAttributes','<H'),
)
############# SMB_COM_DELETE_DIRECTORY (0x01)
class SMBDeleteDirectory_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('BufferFormat','<B=4'),
('DirectoryName','z'),
)
UnicodeStructure = (
('BufferFormat','<B=4'),
('DirectoryName','u'),
)
############# SMB_COM_CHECK_DIRECTORY (0x10)
class SMBCheckDirectory_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('BufferFormat','<B=4'),
('DirectoryName','z'),
)
UnicodeStructure = (
('BufferFormat','<B=4'),
('DirectoryName','u'),
)
############# SMB_COM_RENAME (0x07)
class SMBRename_Parameters(SMBCommand_Parameters):
structure = (
('SearchAttributes','<H'),
)
class SMBRename_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('BufferFormat1','<B=4'),
('OldFileName','z'),
('BufferFormat2','<B=4'),
('NewFileName','z'),
)
UnicodeStructure = (
('BufferFormat1','<B=4'),
('OldFileName','u'),
('BufferFormat2','<B=4'),
('Pad','B=0'),
('NewFileName','u'),
)
############# SMB_COM_OPEN (0x02)
class SMBOpen_Parameters(SMBCommand_Parameters):
structure = (
('DesiredAccess','<H=0'),
('SearchAttributes','<H=0'),
)
class SMBOpen_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('FileNameFormat','"\x04'),
('FileName','z'),
)
UnicodeStructure = (
('FileNameFormat','"\x04'),
('FileName','z'),
)
class SMBOpenResponse_Parameters(SMBCommand_Parameters):
structure = (
('Fid','<H=0'),
('FileAttributes','<H=0'),
('LastWriten','<L=0'),
('FileSize','<L=0'),
('GrantedAccess','<H=0'),
)
############# EXTENDED SECURITY CLASSES
class SMBExtended_Security_Parameters(Structure):
structure = (
('DialectIndex','<H'),
('SecurityMode','<B'),
('MaxMpxCount','<H'),
('MaxNumberVcs','<H'),
('MaxBufferSize','<L'),
('MaxRawSize','<L'),
('SessionKey','<L'),
('Capabilities','<L'),
('LowDateTime','<L'),
('HighDateTime','<L'),
('ServerTimeZone','<H'),
('ChallengeLength','<B'),
)
class SMBExtended_Security_Data(Structure):
structure = (
('ServerGUID','16s'),
('SecurityBlob',':'),
)
class SMBNTLMDialect_Parameters(Structure):
structure = (
('DialectIndex','<H'),
('SecurityMode','<B'),
('MaxMpxCount','<H'),
('MaxNumberVcs','<H'),
('MaxBufferSize','<L'),
('MaxRawSize','<L'),
('SessionKey','<L'),
('Capabilities','<L'),
('LowDateTime','<L'),
('HighDateTime','<L'),
('ServerTimeZone','<H'),
('ChallengeLength','<B'),
)
class SMBNTLMDialect_Data(Structure):
structure = (
('ChallengeLength','_-Challenge','self["ChallengeLength"]'),
('Challenge',':'),
('Payload',':'),
# For some reason on an old Linux this field is not present, we have to check this out. There must be a flag stating this.
('DomainName','_'),
('ServerName','_'),
)
def __init__(self,data = None, alignment = 0):
Structure.__init__(self,data,alignment)
#self['ChallengeLength']=8
def fromString(self,data):
Structure.fromString(self,data)
self['DomainName'] = ''
self['ServerName'] = ''
class SMB:
# SMB Command Codes
SMB_COM_CREATE_DIRECTORY = 0x00
SMB_COM_DELETE_DIRECTORY = 0x01
SMB_COM_OPEN = 0x02
SMB_COM_CREATE = 0x03
SMB_COM_CLOSE = 0x04
SMB_COM_FLUSH = 0x05
SMB_COM_DELETE = 0x06
SMB_COM_RENAME = 0x07
SMB_COM_QUERY_INFORMATION = 0x08
SMB_COM_SET_INFORMATION = 0x09
SMB_COM_READ = 0x0A
SMB_COM_WRITE = 0x0B
SMB_COM_LOCK_BYTE_RANGE = 0x0C
SMB_COM_UNLOCK_BYTE_RANGE = 0x0D
SMB_COM_CREATE_TEMPORARY = 0x0E
SMB_COM_CREATE_NEW = 0x0F
SMB_COM_CHECK_DIRECTORY = 0x10
SMB_COM_PROCESS_EXIT = 0x11
SMB_COM_SEEK = 0x12
SMB_COM_LOCK_AND_READ = 0x13
SMB_COM_WRITE_AND_UNLOCK = 0x14
SMB_COM_READ_RAW = 0x1A
SMB_COM_READ_MPX = 0x1B
SMB_COM_READ_MPX_SECONDARY = 0x1C
SMB_COM_WRITE_RAW = 0x1D
SMB_COM_WRITE_MPX = 0x1E
SMB_COM_WRITE_MPX_SECONDARY = 0x1F
SMB_COM_WRITE_COMPLETE = 0x20
SMB_COM_QUERY_SERVER = 0x21
SMB_COM_SET_INFORMATION2 = 0x22
SMB_COM_QUERY_INFORMATION2 = 0x23
SMB_COM_LOCKING_ANDX = 0x24
SMB_COM_TRANSACTION = 0x25
SMB_COM_TRANSACTION_SECONDARY = 0x26
SMB_COM_IOCTL = 0x27
SMB_COM_IOCTL_SECONDARY = 0x28
SMB_COM_COPY = 0x29
SMB_COM_MOVE = 0x2A
SMB_COM_ECHO = 0x2B
SMB_COM_WRITE_AND_CLOSE = 0x2C
SMB_COM_OPEN_ANDX = 0x2D
SMB_COM_READ_ANDX = 0x2E
SMB_COM_WRITE_ANDX = 0x2F
SMB_COM_NEW_FILE_SIZE = 0x30
SMB_COM_CLOSE_AND_TREE_DISC = 0x31
SMB_COM_TRANSACTION2 = 0x32
SMB_COM_TRANSACTION2_SECONDARY = 0x33
SMB_COM_FIND_CLOSE2 = 0x34
SMB_COM_FIND_NOTIFY_CLOSE = 0x35
# Used by Xenix/Unix 0x60 - 0x6E
SMB_COM_TREE_CONNECT = 0x70
SMB_COM_TREE_DISCONNECT = 0x71
SMB_COM_NEGOTIATE = 0x72
SMB_COM_SESSION_SETUP_ANDX = 0x73
SMB_COM_LOGOFF_ANDX = 0x74
SMB_COM_TREE_CONNECT_ANDX = 0x75
SMB_COM_QUERY_INFORMATION_DISK = 0x80
SMB_COM_SEARCH = 0x81
SMB_COM_FIND = 0x82
SMB_COM_FIND_UNIQUE = 0x83
SMB_COM_FIND_CLOSE = 0x84
SMB_COM_NT_TRANSACT = 0xA0
SMB_COM_NT_TRANSACT_SECONDARY = 0xA1
SMB_COM_NT_CREATE_ANDX = 0xA2
SMB_COM_NT_CANCEL = 0xA4
SMB_COM_NT_RENAME = 0xA5
SMB_COM_OPEN_PRINT_FILE = 0xC0
SMB_COM_WRITE_PRINT_FILE = 0xC1
SMB_COM_CLOSE_PRINT_FILE = 0xC2
SMB_COM_GET_PRINT_QUEUE = 0xC3
SMB_COM_READ_BULK = 0xD8
SMB_COM_WRITE_BULK = 0xD9
SMB_COM_WRITE_BULK_DATA = 0xDA
# TRANSACT codes
TRANS_TRANSACT_NMPIPE = 0x26
# TRANSACT2 codes
TRANS2_FIND_FIRST2 = 0x0001
TRANS2_FIND_NEXT2 = 0x0002
TRANS2_QUERY_FS_INFORMATION = 0x0003
TRANS2_QUERY_PATH_INFORMATION = 0x0005
TRANS2_QUERY_FILE_INFORMATION = 0x0007
TRANS2_SET_FILE_INFORMATION = 0x0008
TRANS2_SET_PATH_INFORMATION = 0x0006
# Security Share Mode (Used internally by SMB class)
SECURITY_SHARE_MASK = 0x01
SECURITY_SHARE_SHARE = 0x00
SECURITY_SHARE_USER = 0x01
SECURITY_SIGNATURES_ENABLED = 0X04
SECURITY_SIGNATURES_REQUIRED = 0X08
# Security Auth Mode (Used internally by SMB class)
SECURITY_AUTH_MASK = 0x02
SECURITY_AUTH_ENCRYPTED = 0x02
SECURITY_AUTH_PLAINTEXT = 0x00
# Raw Mode Mask (Used internally by SMB class. Good for dialect up to and including LANMAN2.1)
RAW_READ_MASK = 0x01
RAW_WRITE_MASK = 0x02
# Capabilities Mask (Used internally by SMB class. Good for dialect NT LM 0.12)
CAP_RAW_MODE = 0x00000001
CAP_MPX_MODE = 0x0002
CAP_UNICODE = 0x0004
CAP_LARGE_FILES = 0x0008
CAP_EXTENDED_SECURITY = 0x80000000
CAP_USE_NT_ERRORS = 0x40
CAP_NT_SMBS = 0x10
CAP_LARGE_READX = 0x00004000
CAP_LARGE_WRITEX = 0x00008000
CAP_RPC_REMOTE_APIS = 0x20
# Flags1 Mask
FLAGS1_LOCK_AND_READ_OK = 0x01
FLAGS1_PATHCASELESS = 0x08
FLAGS1_CANONICALIZED_PATHS = 0x10
FLAGS1_REPLY = 0x80
# Flags2 Mask
FLAGS2_LONG_NAMES = 0x0001
FLAGS2_EAS = 0x0002
FLAGS2_SMB_SECURITY_SIGNATURE = 0x0004
FLAGS2_IS_LONG_NAME = 0x0040
FLAGS2_DFS = 0x1000
FLAGS2_PAGING_IO = 0x2000
FLAGS2_NT_STATUS = 0x4000
FLAGS2_UNICODE = 0x8000
FLAGS2_COMPRESSED = 0x0008
FLAGS2_SMB_SECURITY_SIGNATURE_REQUIRED = 0x0010
FLAGS2_EXTENDED_SECURITY = 0x0800
# Dialect's Security Mode flags
NEGOTIATE_USER_SECURITY = 0x01
NEGOTIATE_ENCRYPT_PASSWORDS = 0x02
NEGOTIATE_SECURITY_SIGNATURE_ENABLE = 0x04
NEGOTIATE_SECURITY_SIGNATURE_REQUIRED = 0x08
# Tree Connect AndX Response optionalSuppor flags
SMB_SUPPORT_SEARCH_BITS = 0x01
SMB_SHARE_IS_IN_DFS = 0x02
def __init__(self, remote_name, remote_host, my_name = None, host_type = nmb.TYPE_SERVER, sess_port = 445, timeout=None, UDP = 0, session = None, negPacket = None):
# The uid attribute will be set when the client calls the login() method
self._uid = 0
self.__server_name = ''
self.__server_os = ''
self.__server_os_major = None
self.__server_os_minor = None
self.__server_os_build = None
self.__server_lanman = ''
self.__server_domain = ''
self.__server_dns_domain_name = ''
self.__remote_name = string.upper(remote_name)
self.__remote_host = remote_host
self.__isNTLMv2 = True
self._dialects_parameters = None
self._dialects_data = None
# Credentials
self.__userName = ''
self.__password = ''
self.__domain = ''
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = ''
self.__kdc = ''
self.__TGT = None
self.__TGS = None
# Negotiate Protocol Result, used everywhere
# Could be extended or not, flags should be checked before
self._dialect_data = 0
self._dialect_parameters = 0
self._action = 0
self._sess = None
self.encrypt_passwords = True
self.tid = 0
self.fid = 0
# Signing stuff
self._SignSequenceNumber = 0
self._SigningSessionKey = ''
self._SigningChallengeResponse = ''
self._SignatureEnabled = False
self._SignatureVerificationEnabled = False
self._SignatureRequired = False
# Base flags (default flags, can be overriden using set_flags())
self.__flags1 = SMB.FLAGS1_PATHCASELESS | SMB.FLAGS1_CANONICALIZED_PATHS
self.__flags2 = SMB.FLAGS2_EXTENDED_SECURITY | SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_LONG_NAMES
if timeout is None:
self.__timeout = 60
else:
self.__timeout = timeout
# If port 445 and the name sent is *SMBSERVER we're setting the name to the IP.
# This is to help some old applications still believing
# *SMSBSERVER will work against modern OSes. If port is NETBIOS_SESSION_PORT the user better
# know about *SMBSERVER's limitations
if sess_port == 445 and remote_name == '*SMBSERVER':
self.__remote_name = remote_host
if session is None:
if not my_name:
my_name = socket.gethostname()
i = string.find(my_name, '.')
if i > -1:
my_name = my_name[:i]
if UDP:
self._sess = nmb.NetBIOSUDPSession(my_name, remote_name, remote_host, host_type, sess_port, self.__timeout)
else:
self._sess = nmb.NetBIOSTCPSession(my_name, remote_name, remote_host, host_type, sess_port, self.__timeout)
# Initialize session values (_dialect_data and _dialect_parameters)
self.neg_session()
# Call login() without any authentication information to
# setup a session if the remote server
# is in share mode.
if (self._dialects_parameters['SecurityMode'] & SMB.SECURITY_SHARE_MASK) == SMB.SECURITY_SHARE_SHARE:
self.login('', '')
else:
self._sess = session
self.neg_session(negPacket = negPacket)
# Call login() without any authentication information to
# setup a session if the remote server
# is in share mode.
if (self._dialects_parameters['SecurityMode'] & SMB.SECURITY_SHARE_MASK) == SMB.SECURITY_SHARE_SHARE:
self.login('', '')
@staticmethod
def ntlm_supported():
return False
def get_remote_name(self):
return self.__remote_name
def get_remote_host(self):
return self.__remote_host
def get_flags(self):
return self.__flags1, self.__flags2
def set_flags(self, flags1=None, flags2=None):
if flags1 is not None:
self.__flags1 = flags1
if flags2 is not None:
self.__flags2 = flags2
def set_timeout(self, timeout):
prev_timeout = self.__timeout
self.__timeout = timeout
return prev_timeout
def get_timeout(self):
return self.__timeout
@contextmanager
def use_timeout(self, timeout):
prev_timeout = self.set_timeout(timeout)
try:
yield
finally:
self.set_timeout(prev_timeout)
def get_session(self):
return self._sess
def get_tid(self):
return self.tid
def get_fid(self):
return self.fid
def isGuestSession(self):
return self._action & SMB_SETUP_GUEST
def doesSupportNTLMv2(self):
return self.__isNTLMv2
def __del__(self):
if self._sess:
self._sess.close()
def recvSMB(self):
r = self._sess.recv_packet(self.__timeout)
return NewSMBPacket(data = r.get_trailer())
@staticmethod
def __decode_trans(params, data):
totparamcnt, totdatacnt, _, paramcnt, paramoffset, paramds, datacnt, dataoffset, datads, setupcnt = unpack('<HHHHHHHHHB', params[:19])
if paramcnt + paramds < totparamcnt or datacnt + datads < totdatacnt:
has_more = 1
else:
has_more = 0
paramoffset = paramoffset - 55 - setupcnt * 2
dataoffset = dataoffset - 55 - setupcnt * 2
return has_more, params[20:20 + setupcnt * 2], data[paramoffset:paramoffset + paramcnt], data[dataoffset:dataoffset + datacnt]
# TODO: Move this to NewSMBPacket, it belongs there
def signSMB(self, packet, signingSessionKey, signingChallengeResponse):
# This logic MUST be applied for messages sent in response to any of the higher-layer actions and in
# compliance with the message sequencing rules.
# * The client or server that sends the message MUST provide the 32-bit sequence number for this
# message, as specified in sections 3.2.4.1 and 3.3.4.1.
# * The SMB_FLAGS2_SMB_SECURITY_SIGNATURE flag in the header MUST be set.
# * To generate the signature, a 32-bit sequence number is copied into the
# least significant 32 bits of the SecuritySignature field and the remaining
# 4 bytes are set to 0x00.
# * The MD5 algorithm, as specified in [RFC1321], MUST be used to generate a hash of the SMB
# message from the start of the SMB Header, which is defined as follows.
# CALL MD5Init( md5context )
# CALL MD5Update( md5context, Connection.SigningSessionKey )
# CALL MD5Update( md5context, Connection.SigningChallengeResponse )
# CALL MD5Update( md5context, SMB message )
# CALL MD5Final( digest, md5context )
# SET signature TO the first 8 bytes of the digest
# The resulting 8-byte signature MUST be copied into the SecuritySignature field of the SMB Header,
# after which the message can be transmitted.
#print "seq(%d) signingSessionKey %r, signingChallengeResponse %r" % (self._SignSequenceNumber, signingSessionKey, signingChallengeResponse)
packet['SecurityFeatures'] = pack('<q',self._SignSequenceNumber)
# Sign with the sequence
m = hashlib.md5()
m.update( signingSessionKey )
m.update( signingChallengeResponse )
m.update( str(packet) )
# Replace sequence with acual hash
packet['SecurityFeatures'] = m.digest()[:8]
if self._SignatureVerificationEnabled:
self._SignSequenceNumber +=1
else:
self._SignSequenceNumber +=2
def checkSignSMB(self, packet, signingSessionKey, signingChallengeResponse):
# Let's check
signature = packet['SecurityFeatures']
#print "Signature received: %r " % signature
self.signSMB(packet, signingSessionKey, signingChallengeResponse)
#print "Signature calculated: %r" % packet['SecurityFeatures']
if self._SignatureVerificationEnabled is not True:
self._SignSequenceNumber -= 1
return packet['SecurityFeatures'] == signature
def sendSMB(self,smb):
smb['Uid'] = self._uid
#At least on AIX, PIDs can exceed 16 bits, so we mask them out
smb['Pid'] = (os.getpid() & 0xFFFF)
# set flags
smb['Flags1'] |= self.__flags1
smb['Flags2'] |= self.__flags2
if self._SignatureEnabled:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
self.signSMB(smb, self._SigningSessionKey, self._SigningChallengeResponse)
self._sess.send_packet(str(smb))
@staticmethod
def isValidAnswer(s, cmd):
while 1:
if s.rawData():
if s.get_command() == cmd:
if s.get_error_class() == 0x00 and s.get_error_code() == 0x00:
return 1
else:
raise SessionError( "SMB Library Error", s.get_error_class()+ (s.get_reserved() << 8), s.get_error_code() , s.get_flags2() & SMB.FLAGS2_NT_STATUS)
else:
break
return 0
def neg_session(self, extended_security = True, negPacket = None):
def parsePacket(smb):
if smb.isValidAnswer(SMB.SMB_COM_NEGOTIATE):
sessionResponse = SMBCommand(smb['Data'][0])
self._dialects_parameters = SMBNTLMDialect_Parameters(sessionResponse['Parameters'])
self._dialects_data = SMBNTLMDialect_Data()
self._dialects_data['ChallengeLength'] = self._dialects_parameters['ChallengeLength']
self._dialects_data.fromString(sessionResponse['Data'])
if self._dialects_parameters['Capabilities'] & SMB.CAP_EXTENDED_SECURITY:
# Whether we choose it or it is enforced by the server, we go for extended security
self._dialects_parameters = SMBExtended_Security_Parameters(sessionResponse['Parameters'])
self._dialects_data = SMBExtended_Security_Data(sessionResponse['Data'])
# Let's setup some variable for later use
if self._dialects_parameters['SecurityMode'] & SMB.SECURITY_SIGNATURES_REQUIRED:
self._SignatureRequired = True
# Interestingly, the security Blob might be missing sometimes.
#spnego = SPNEGO_NegTokenInit(self._dialects_data['SecurityBlob'])
#for i in spnego['MechTypes']:
# print "Mech Found: %s" % MechTypes[i]
return 1
# If not, let's try the old way
else:
if self._dialects_data['ServerName'] is not None:
self.__server_name = self._dialects_data['ServerName']
if self._dialects_parameters['DialectIndex'] == 0xffff:
raise UnsupportedFeature("Remote server does not know NT LM 0.12")
return 1
else:
return 0
if negPacket is None:
smb = NewSMBPacket()
negSession = SMBCommand(SMB.SMB_COM_NEGOTIATE)
flags2 = self.get_flags()[1]
if extended_security is True:
self.set_flags(flags2=flags2|SMB.FLAGS2_EXTENDED_SECURITY)
else:
self.set_flags(flags2=flags2 & (~SMB.FLAGS2_EXTENDED_SECURITY))
negSession['Data'] = '\x02NT LM 0.12\x00'
smb.addCommand(negSession)
self.sendSMB(smb)
while 1:
smb = self.recvSMB()
return parsePacket(smb)
else:
return parsePacket( NewSMBPacket( data = negPacket))
def tree_connect(self, path, password = '', service = SERVICE_ANY):
LOG.warning("[MS-CIFS] This is an original Core Protocol command.This command has been deprecated.Client Implementations SHOULD use SMB_COM_TREE_CONNECT_ANDX")
# return 0x800
if password:
# Password is only encrypted if the server passed us an "encryption" during protocol dialect
if self._dialects_parameters['ChallengeLength'] > 0:
# this code is untested
password = self.get_ntlmv1_response(ntlm.compute_lmhash(password))
if not unicode_support:
if unicode_convert:
path = str(path)
else:
raise Exception('SMB: Can\t conver path from unicode!')
smb = NewSMBPacket()
treeConnect = SMBCommand(SMB.SMB_COM_TREE_CONNECT)
treeConnect['Parameters'] = SMBTreeConnect_Parameters()
treeConnect['Data'] = SMBTreeConnect_Data()
treeConnect['Data']['Path'] = path.upper()
treeConnect['Data']['Password'] = password
treeConnect['Data']['Service'] = service
smb.addCommand(treeConnect)
self.sendSMB(smb)
while 1:
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_TREE_CONNECT):
# XXX Here we are ignoring the rest of the response
return smb['Tid']
return smb['Tid']
def get_uid(self):
return self._uid
def set_uid(self, uid):
self._uid = uid
def tree_connect_andx(self, path, password = None, service = SERVICE_ANY, smb_packet=None):
if password:
# Password is only encrypted if the server passed us an "encryption" during protocol dialect
if self._dialects_parameters['ChallengeLength'] > 0:
# this code is untested
password = self.get_ntlmv1_response(ntlm.compute_lmhash(password))
else:
password = '\x00'
if not unicode_support:
if unicode_convert:
path = str(path)
else:
raise Exception('SMB: Can\t convert path from unicode!')
if smb_packet is None:
smb = NewSMBPacket()
else:
smb = smb_packet
# Just in case this came with the full path ,let's just leave
# the sharename, we'll take care of the rest
share = path.split('\\')[-1]
try:
_, _, _, _, sockaddr = socket.getaddrinfo(self.get_remote_host(), 80, 0, 0, socket.IPPROTO_TCP)[0]
remote_host = sockaddr[0]
except Exception:
remote_host = self.get_remote_host()
path = '\\\\' + remote_host + '\\' +share
path = path.upper().encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else path
treeConnect = SMBCommand(SMB.SMB_COM_TREE_CONNECT_ANDX)
treeConnect['Parameters'] = SMBTreeConnectAndX_Parameters()
treeConnect['Data'] = SMBTreeConnectAndX_Data(flags=self.__flags2)
treeConnect['Parameters']['PasswordLength'] = len(password)
treeConnect['Data']['Password'] = password
treeConnect['Data']['Path'] = path
treeConnect['Data']['Service'] = service
if self.__flags2 & SMB.FLAGS2_UNICODE:
treeConnect['Data']['Pad'] = 0x0
smb.addCommand(treeConnect)
# filename = "\PIPE\epmapper"
# ntCreate = SMBCommand(SMB.SMB_COM_NT_CREATE_ANDX)
# ntCreate['Parameters'] = SMBNtCreateAndX_Parameters()
# ntCreate['Data'] = SMBNtCreateAndX_Data()
# ntCreate['Parameters']['FileNameLength'] = len(filename)
# ntCreate['Parameters']['CreateFlags'] = 0
# ntCreate['Parameters']['AccessMask'] = 0x3
# ntCreate['Parameters']['CreateOptions'] = 0x0
# ntCreate['Data']['FileName'] = filename
# smb.addCommand(ntCreate)
self.sendSMB(smb)
while 1:
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_TREE_CONNECT_ANDX):
# XXX Here we are ignoring the rest of the response
self.tid = smb['Tid']
return self.tid
self.tid = smb['Tid']
return self.tid
# backwars compatibility
connect_tree = tree_connect_andx
@staticmethod
def getDialect():
return SMB_DIALECT
def get_server_name(self):
#return self._dialects_data['ServerName']
return self.__server_name
def get_session_key(self):
return self._SigningSessionKey
def set_session_key(self, key):
self._SigningSessionKey = key
def get_encryption_key(self):
if 'Challenge' in self._dialects_data.fields:
return self._dialects_data['Challenge']
else:
return None
def get_server_time(self):
timestamp = self._dialects_parameters['HighDateTime']
timestamp <<= 32
timestamp |= self._dialects_parameters['LowDateTime']
timestamp -= 116444736000000000
timestamp /= 10000000
d = datetime.datetime.utcfromtimestamp(timestamp)
return d.strftime("%a, %d %b %Y %H:%M:%S GMT")
def disconnect_tree(self, tid):
smb = NewSMBPacket()
smb['Tid'] = tid
smb.addCommand(SMBCommand(SMB.SMB_COM_TREE_DISCONNECT))
self.sendSMB(smb)
self.recvSMB()
def open(self, tid, filename, open_mode, desired_access):
filename = string.replace(filename,'/', '\\')
filename = filename.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else filename
smb = NewSMBPacket()
smb['Tid'] = tid
openFile = SMBCommand(SMB.SMB_COM_OPEN)
openFile['Parameters'] = SMBOpen_Parameters()
openFile['Parameters']['DesiredAccess'] = desired_access
openFile['Parameters']['OpenMode'] = open_mode
openFile['Parameters']['SearchAttributes'] = ATTR_READONLY | ATTR_HIDDEN | ATTR_ARCHIVE
openFile['Data'] = SMBOpen_Data(flags=self.__flags2)
openFile['Data']['FileName'] = filename
if self.__flags2 & SMB.FLAGS2_UNICODE:
openFile['Data']['Pad'] = 0x0
smb.addCommand(openFile)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_OPEN):
# XXX Here we are ignoring the rest of the response
openFileResponse = SMBCommand(smb['Data'][0])
openFileParameters = SMBOpenResponse_Parameters(openFileResponse['Parameters'])
return (
openFileParameters['Fid'],
openFileParameters['FileAttributes'],
openFileParameters['LastWriten'],
openFileParameters['FileSize'],
openFileParameters['GrantedAccess'],
)
def open_andx(self, tid, filename, open_mode, desired_access):
filename = string.replace(filename,'/', '\\')
filename = filename.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else filename
smb = NewSMBPacket()
smb['Tid'] = tid
openFile = SMBCommand(SMB.SMB_COM_OPEN_ANDX)
openFile['Parameters'] = SMBOpenAndX_Parameters()
openFile['Parameters']['DesiredAccess'] = desired_access
openFile['Parameters']['OpenMode'] = open_mode
openFile['Parameters']['SearchAttributes'] = ATTR_READONLY | ATTR_HIDDEN | ATTR_ARCHIVE
openFile['Data'] = SMBOpenAndX_Data(flags=self.__flags2)
openFile['Data']['FileName'] = filename
if self.__flags2 & SMB.FLAGS2_UNICODE:
openFile['Data']['Pad'] = 0x0
smb.addCommand(openFile)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_OPEN_ANDX):
# XXX Here we are ignoring the rest of the response
openFileResponse = SMBCommand(smb['Data'][0])
openFileParameters = SMBOpenAndXResponse_Parameters(openFileResponse['Parameters'])
return (
openFileParameters['Fid'],
openFileParameters['FileAttributes'],
openFileParameters['LastWriten'],
openFileParameters['FileSize'],
openFileParameters['GrantedAccess'],
openFileParameters['FileType'],
openFileParameters['IPCState'],
openFileParameters['Action'],
openFileParameters['ServerFid'],
)
def close(self, tid, fid):
smb = NewSMBPacket()
smb['Tid'] = tid
closeFile = SMBCommand(SMB.SMB_COM_CLOSE)
closeFile['Parameters'] = SMBClose_Parameters()
closeFile['Parameters']['FID'] = fid
smb.addCommand(closeFile)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_CLOSE):
return 1
return 0
def send_trans(self, tid, setup, name, param, data, noAnswer = 0):
smb = NewSMBPacket()
smb['Tid'] = tid
transCommand = SMBCommand(SMB.SMB_COM_TRANSACTION)
transCommand['Parameters'] = SMBTransaction_Parameters()
transCommand['Data'] = SMBTransaction_Data()
transCommand['Parameters']['Setup'] = setup
transCommand['Parameters']['TotalParameterCount'] = len(param)
transCommand['Parameters']['TotalDataCount'] = len(data)
transCommand['Parameters']['ParameterCount'] = len(param)
transCommand['Parameters']['ParameterOffset'] = 32+3+28+len(setup)+len(name)
transCommand['Parameters']['DataCount'] = len(data)
transCommand['Parameters']['DataOffset'] = transCommand['Parameters']['ParameterOffset'] + len(param)
transCommand['Data']['Name'] = name
transCommand['Data']['Trans_Parameters'] = param
transCommand['Data']['Trans_Data'] = data
if noAnswer:
transCommand['Parameters']['Flags'] = TRANS_NO_RESPONSE
smb.addCommand(transCommand)
self.sendSMB(smb)
def send_trans2(self, tid, setup, name, param, data):
smb = NewSMBPacket()
smb['Tid'] = tid
command = pack('<H', setup)
transCommand = SMBCommand(SMB.SMB_COM_TRANSACTION2)
transCommand['Parameters'] = SMBTransaction2_Parameters()
transCommand['Parameters']['MaxDataCount'] = self._dialects_parameters['MaxBufferSize']
transCommand['Data'] = SMBTransaction2_Data()
transCommand['Parameters']['Setup'] = command
transCommand['Parameters']['TotalParameterCount'] = len(param)
transCommand['Parameters']['TotalDataCount'] = len(data)
if len(param) > 0:
padLen = (4 - (32+2+28 + len(command)) % 4 ) % 4
padBytes = '\xFF' * padLen
transCommand['Data']['Pad1'] = padBytes
else:
transCommand['Data']['Pad1'] = ''
padLen = 0
transCommand['Parameters']['ParameterCount'] = len(param)
transCommand['Parameters']['ParameterOffset'] = 32+2+28+len(command)+len(name) + padLen
if len(data) > 0:
pad2Len = (4 - (32+2+28 + len(command) + padLen + len(param)) % 4) % 4
transCommand['Data']['Pad2'] = '\xFF' * pad2Len
else:
transCommand['Data']['Pad2'] = ''
pad2Len = 0
transCommand['Parameters']['DataCount'] = len(data)
transCommand['Parameters']['DataOffset'] = transCommand['Parameters']['ParameterOffset'] + len(param) + pad2Len
transCommand['Data']['Name'] = name
transCommand['Data']['Trans_Parameters'] = param
transCommand['Data']['Trans_Data'] = data
smb.addCommand(transCommand)
self.sendSMB(smb)
def query_file_info(self, tid, fid, fileInfoClass = SMB_QUERY_FILE_STANDARD_INFO):
self.send_trans2(tid, SMB.TRANS2_QUERY_FILE_INFORMATION, '\x00', pack('<HH', fid, fileInfoClass), '')
resp = self.recvSMB()
if resp.isValidAnswer(SMB.SMB_COM_TRANSACTION2):
trans2Response = SMBCommand(resp['Data'][0])
trans2Parameters = SMBTransaction2Response_Parameters(trans2Response['Parameters'])
# Remove Potential Prefix Padding
return trans2Response['Data'][-trans2Parameters['TotalDataCount']:]
def __nonraw_retr_file(self, tid, fid, offset, datasize, callback):
if (self._dialects_parameters['Capabilities'] & SMB.CAP_LARGE_READX) and self._SignatureEnabled is False:
max_buf_size = 65000
else:
max_buf_size = self._dialects_parameters['MaxBufferSize'] & ~0x3ff # Read in multiple KB blocks
read_offset = offset
while read_offset < datasize:
data = self.read_andx(tid, fid, read_offset, max_buf_size)
callback(data)
read_offset += len(data)
def __nonraw_stor_file(self, tid, fid, offset, datasize, callback):
if (self._dialects_parameters['Capabilities'] & SMB.CAP_LARGE_WRITEX) and self._SignatureEnabled is False:
max_buf_size = 65000
else:
max_buf_size = self._dialects_parameters['MaxBufferSize'] & ~0x3ff # Write in multiple KB blocks
write_offset = offset
while 1:
data = callback(max_buf_size)
if not data:
break
smb = self.write_andx(tid,fid,data, write_offset)
writeResponse = SMBCommand(smb['Data'][0])
writeResponseParameters = SMBWriteAndXResponse_Parameters(writeResponse['Parameters'])
write_offset += writeResponseParameters['Count']
def get_server_domain(self):
return self.__server_domain
def get_server_dns_domain_name(self):
return self.__server_dns_domain_name
def get_server_os(self):
return self.__server_os
def get_server_os_major(self):
return self.__server_os_major
def get_server_os_minor(self):
return self.__server_os_minor
def get_server_os_build(self):
return self.__server_os_build
def set_server_os(self, os):
self.__server_os = os
def get_server_lanman(self):
return self.__server_lanman
def is_login_required(self):
# Login is required if share mode is user.
# Otherwise only public services or services in share mode
# are allowed.
return (self._dialects_parameters['SecurityMode'] & SMB.SECURITY_SHARE_MASK) == SMB.SECURITY_SHARE_USER
def is_signing_required(self):
return self._SignatureRequired
def get_ntlmv1_response(self, key):
challenge = self._dialects_data['Challenge']
return ntlm.get_ntlmv1_response(key, challenge)
def kerberos_login(self, user, password, domain = '', lmhash = '', nthash = '', aesKey = '', kdcHost = '', TGT=None, TGS=None):
# Importing down here so pyasn1 is not required if kerberos is not used.
from impacket.krb5.asn1 import AP_REQ, Authenticator, TGS_REP, seq_set
from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS
from impacket.krb5 import constants
from impacket.krb5.types import Principal, KerberosTime, Ticket
from pyasn1.codec.der import decoder, encoder
import datetime
# login feature does not support unicode
# disable it if enabled
flags2 = self.__flags2
if flags2 & SMB.FLAGS2_UNICODE:
self.__flags2 = flags2 & (flags2 ^ SMB.FLAGS2_UNICODE)
# If TGT or TGS are specified, they are in the form of:
# TGS['KDC_REP'] = the response from the server
# TGS['cipher'] = the cipher used
# TGS['sessionKey'] = the sessionKey
# If we have hashes, normalize them
if lmhash != '' or nthash != '':
if len(lmhash) % 2: lmhash = '0%s' % lmhash
if len(nthash) % 2: nthash = '0%s' % nthash
try: # just in case they were converted already
lmhash = a2b_hex(lmhash)
nthash = a2b_hex(nthash)
except:
pass
self.__userName = user
self.__password = password
self.__domain = domain
self.__lmhash = lmhash
self.__nthash = nthash
self.__aesKey = aesKey
self.__kdc = kdcHost
self.__TGT = TGT
self.__TGS = TGS
# First of all, we need to get a TGT for the user
userName = Principal(user, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
if TGT is None:
if TGS is None:
tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, password, domain, lmhash, nthash, aesKey, kdcHost)
else:
tgt = TGT['KDC_REP']
cipher = TGT['cipher']
sessionKey = TGT['sessionKey']
# Now that we have the TGT, we should ask for a TGS for cifs
if TGS is None:
serverName = Principal('cifs/%s' % self.__remote_name, type=constants.PrincipalNameType.NT_SRV_INST.value)
tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, domain, kdcHost, tgt, cipher, sessionKey)
else:
tgs = TGS['KDC_REP']
cipher = TGS['cipher']
sessionKey = TGS['sessionKey']
smb = NewSMBPacket()
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 61440
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE | SMB.CAP_LARGE_READX | SMB.CAP_LARGE_WRITEX
# Let's build a NegTokenInit with the NTLMSSP
# TODO: In the future we should be able to choose different providers
blob = SPNEGO_NegTokenInit()
# Kerberos v5 mech
blob['MechTypes'] = [TypesMech['MS KRB5 - Microsoft Kerberos 5']]
# Let's extract the ticket from the TGS
tgs = decoder.decode(tgs, asn1Spec = TGS_REP())[0]
ticket = Ticket()
ticket.from_asn1(tgs['ticket'])
# Now let's build the AP_REQ
apReq = AP_REQ()
apReq['pvno'] = 5
apReq['msg-type'] = int(constants.ApplicationTagNumbers.AP_REQ.value)
opts = list()
apReq['ap-options'] = constants.encodeFlags(opts)
seq_set(apReq,'ticket', ticket.to_asn1)
authenticator = Authenticator()
authenticator['authenticator-vno'] = 5
authenticator['crealm'] = domain
seq_set(authenticator, 'cname', userName.components_to_asn1)
now = datetime.datetime.utcnow()
authenticator['cusec'] = now.microsecond
authenticator['ctime'] = KerberosTime.to_asn1(now)
encodedAuthenticator = encoder.encode(authenticator)
# Key Usage 11
# AP-REQ Authenticator (includes application authenticator
# subkey), encrypted with the application session key
# (Section 5.5.1)
encryptedEncodedAuthenticator = cipher.encrypt(sessionKey, 11, encodedAuthenticator, None)
apReq['authenticator'] = None
apReq['authenticator']['etype'] = cipher.enctype
apReq['authenticator']['cipher'] = encryptedEncodedAuthenticator
blob['MechToken'] = encoder.encode(apReq)
sessionSetup['Parameters']['SecurityBlobLength'] = len(blob)
sessionSetup['Parameters'].getData()
sessionSetup['Data']['SecurityBlob'] = blob.getData()
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX):
# We will need to use this uid field for all future requests/responses
self._uid = smb['Uid']
# Now we have to extract the blob to continue the auth process
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndX_Extended_Response_Parameters(sessionResponse['Parameters'])
sessionData = SMBSessionSetupAndX_Extended_Response_Data(flags = smb['Flags2'])
sessionData['SecurityBlobLength'] = sessionParameters['SecurityBlobLength']
sessionData.fromString(sessionResponse['Data'])
self._action = sessionParameters['Action']
# If smb sign required, let's enable it for the rest of the connection
if self._dialects_parameters['SecurityMode'] & SMB.SECURITY_SIGNATURES_REQUIRED:
self._SigningSessionKey = sessionKey.contents
self._SignSequenceNumber = 2
self._SignatureEnabled = True
# restore unicode flag if needed
if flags2 & SMB.FLAGS2_UNICODE:
self.__flags2 |= SMB.FLAGS2_UNICODE
return 1
else:
raise Exception('Error: Could not login successfully')
def login_extended(self, user, password, domain = '', lmhash = '', nthash = '', use_ntlmv2 = True ):
# login feature does not support unicode
# disable it if enabled
flags2 = self.__flags2
if flags2 & SMB.FLAGS2_UNICODE:
self.__flags2 = flags2 & (flags2 ^ SMB.FLAGS2_UNICODE)
# Once everything's working we should join login methods into a single one
smb = NewSMBPacket()
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 61440
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE | SMB.CAP_LARGE_READX | SMB.CAP_LARGE_WRITEX
# Let's build a NegTokenInit with the NTLMSSP
# TODO: In the future we should be able to choose different providers
blob = SPNEGO_NegTokenInit()
# NTLMSSP
blob['MechTypes'] = [TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']]
auth = ntlm.getNTLMSSPType1('','',self._SignatureRequired, use_ntlmv2 = use_ntlmv2)
blob['MechToken'] = str(auth)
sessionSetup['Parameters']['SecurityBlobLength'] = len(blob)
sessionSetup['Parameters'].getData()
sessionSetup['Data']['SecurityBlob'] = blob.getData()
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX):
# We will need to use this uid field for all future requests/responses
self._uid = smb['Uid']
# Now we have to extract the blob to continue the auth process
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndX_Extended_Response_Parameters(sessionResponse['Parameters'])
sessionData = SMBSessionSetupAndX_Extended_Response_Data(flags = smb['Flags2'])
sessionData['SecurityBlobLength'] = sessionParameters['SecurityBlobLength']
sessionData.fromString(sessionResponse['Data'])
respToken = SPNEGO_NegTokenResp(sessionData['SecurityBlob'])
# Let's parse some data and keep it to ourselves in case it is asked
ntlmChallenge = ntlm.NTLMAuthChallenge(respToken['ResponseToken'])
if ntlmChallenge['TargetInfoFields_len'] > 0:
av_pairs = ntlm.AV_PAIRS(ntlmChallenge['TargetInfoFields'][:ntlmChallenge['TargetInfoFields_len']])
if av_pairs[ntlm.NTLMSSP_AV_HOSTNAME] is not None:
try:
self.__server_name = av_pairs[ntlm.NTLMSSP_AV_HOSTNAME][1].decode('utf-16le')
except:
# For some reason, we couldn't decode Unicode here.. silently discard the operation
pass
if av_pairs[ntlm.NTLMSSP_AV_DOMAINNAME] is not None:
try:
if self.__server_name != av_pairs[ntlm.NTLMSSP_AV_DOMAINNAME][1].decode('utf-16le'):
self.__server_domain = av_pairs[ntlm.NTLMSSP_AV_DOMAINNAME][1].decode('utf-16le')
except:
# For some reason, we couldn't decode Unicode here.. silently discard the operation
pass
if av_pairs[ntlm.NTLMSSP_AV_DNS_DOMAINNAME] is not None:
try:
self.__server_dns_domain_name = av_pairs[ntlm.NTLMSSP_AV_DNS_DOMAINNAME][1].decode('utf-16le')
except:
# For some reason, we couldn't decode Unicode here.. silently discard the operation
pass
# Parse Version to know the target Operating system name. Not provided elsewhere anymore
if 'Version' in ntlmChallenge.fields:
version = ntlmChallenge['Version']
if len(version) >= 4:
self.__server_os_major, self.__server_os_minor, self.__server_os_build = unpack('<BBH',version[:4])
type3, exportedSessionKey = ntlm.getNTLMSSPType3(auth, respToken['ResponseToken'], user, password, domain, lmhash, nthash, use_ntlmv2 = use_ntlmv2)
if exportedSessionKey is not None:
self._SigningSessionKey = exportedSessionKey
smb = NewSMBPacket()
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
respToken2 = SPNEGO_NegTokenResp()
respToken2['ResponseToken'] = str(type3)
# Reusing the previous structure
sessionSetup['Parameters']['SecurityBlobLength'] = len(respToken2)
sessionSetup['Data']['SecurityBlob'] = respToken2.getData()
# Storing some info for later use
self.__server_os = sessionData['NativeOS']
self.__server_lanman = sessionData['NativeLanMan']
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
self._uid = 0
if smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX):
self._uid = smb['Uid']
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndXResponse_Parameters(sessionResponse['Parameters'])
self._action = sessionParameters['Action']
# If smb sign required, let's enable it for the rest of the connection
if self._dialects_parameters['SecurityMode'] & SMB.SECURITY_SIGNATURES_REQUIRED:
self._SignSequenceNumber = 2
self._SignatureEnabled = True
# restore unicode flag if needed
if flags2 & SMB.FLAGS2_UNICODE:
self.__flags2 |= SMB.FLAGS2_UNICODE
return 1
else:
raise Exception('Error: Could not login successfully')
def getCredentials(self):
return (
self.__userName,
self.__password,
self.__domain,
self.__lmhash,
self.__nthash,
self.__aesKey,
self.__TGT,
self.__TGS)
def getIOCapabilities(self):
res = dict()
if (self._dialects_parameters['Capabilities'] & SMB.CAP_LARGE_READX) and self._SignatureEnabled is False:
max_size = 65000
else:
max_size = self._dialects_parameters['MaxBufferSize'] # Read in multiple KB blocks
res['MaxReadSize'] = max_size
res['MaxWriteSize'] = max_size
return res
def login(self, user, password, domain = '', lmhash = '', nthash = '', ntlm_fallback = True):
# If we have hashes, normalize them
if lmhash != '' or nthash != '':
if len(lmhash) % 2: lmhash = '0%s' % lmhash
if len(nthash) % 2: nthash = '0%s' % nthash
try: # just in case they were converted already
lmhash = a2b_hex(lmhash)
nthash = a2b_hex(nthash)
except:
pass
self.__userName = user
self.__password = password
self.__domain = domain
self.__lmhash = lmhash
self.__nthash = nthash
self.__aesKey = ''
self.__TGT = None
self.__TGS = None
if self._dialects_parameters['Capabilities'] & SMB.CAP_EXTENDED_SECURITY:
try:
self.login_extended(user, password, domain, lmhash, nthash, use_ntlmv2 = True)
except:
# If the target OS is Windows 5.0 or Samba, let's try using NTLMv1
if ntlm_fallback and ((self.get_server_lanman().find('Windows 2000') != -1) or (self.get_server_lanman().find('Samba') != -1)):
self.login_extended(user, password, domain, lmhash, nthash, use_ntlmv2 = False)
self.__isNTLMv2 = False
else:
raise
elif ntlm_fallback:
self.login_standard(user, password, domain, lmhash, nthash)
self.__isNTLMv2 = False
else:
raise SessionError('Cannot authenticate against target, enable ntlm_fallback')
def login_standard(self, user, password, domain = '', lmhash = '', nthash = ''):
# login feature does not support unicode
# disable it if enabled
flags2 = self.__flags2
if flags2 & SMB.FLAGS2_UNICODE:
self.__flags2 = flags2 & (flags2 ^ SMB.FLAGS2_UNICODE)
# Only supports NTLMv1
# Password is only encrypted if the server passed us an "encryption key" during protocol dialect negotiation
if self._dialects_parameters['ChallengeLength'] > 0:
if lmhash != '' or nthash != '':
pwd_ansi = self.get_ntlmv1_response(lmhash)
pwd_unicode = self.get_ntlmv1_response(nthash)
elif password:
lmhash = ntlm.compute_lmhash(password)
nthash = ntlm.compute_nthash(password)
pwd_ansi = self.get_ntlmv1_response(lmhash)
pwd_unicode = self.get_ntlmv1_response(nthash)
else: # NULL SESSION
pwd_ansi = ''
pwd_unicode = ''
else:
pwd_ansi = password
pwd_unicode = ''
smb = NewSMBPacket()
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Data()
sessionSetup['Parameters']['MaxBuffer'] = 61440
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VCNumber'] = os.getpid()
sessionSetup['Parameters']['SessionKey'] = self._dialects_parameters['SessionKey']
sessionSetup['Parameters']['AnsiPwdLength'] = len(pwd_ansi)
sessionSetup['Parameters']['UnicodePwdLength'] = len(pwd_unicode)
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_RAW_MODE | SMB.CAP_USE_NT_ERRORS | SMB.CAP_LARGE_READX | SMB.CAP_LARGE_WRITEX
sessionSetup['Data']['AnsiPwd'] = pwd_ansi
sessionSetup['Data']['UnicodePwd'] = pwd_unicode
sessionSetup['Data']['Account'] = str(user)
sessionSetup['Data']['PrimaryDomain'] = str(domain)
sessionSetup['Data']['NativeOS'] = str(os.name)
sessionSetup['Data']['NativeLanMan'] = 'pysmb'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX):
# We will need to use this uid field for all future requests/responses
self._uid = smb['Uid']
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndXResponse_Parameters(sessionResponse['Parameters'])
sessionData = SMBSessionSetupAndXResponse_Data(flags = smb['Flags2'], data = sessionResponse['Data'])
self._action = sessionParameters['Action']
# Still gotta figure out how to do this with no EXTENDED_SECURITY
if sessionParameters['Action'] & SMB_SETUP_USE_LANMAN_KEY == 0:
self._SigningChallengeResponse = sessionSetup['Data']['UnicodePwd']
self._SigningSessionKey = nthash
else:
self._SigningChallengeResponse = sessionSetup['Data']['AnsiPwd']
self._SigningSessionKey = lmhash
#self._SignSequenceNumber = 1
#self.checkSignSMB(smb, self._SigningSessionKey ,self._SigningChallengeResponse)
#self._SignatureEnabled = True
self.__server_os = sessionData['NativeOS']
self.__server_lanman = sessionData['NativeLanMan']
self.__server_domain = sessionData['PrimaryDomain']
# restore unicode flag if needed
if flags2 & SMB.FLAGS2_UNICODE:
self.__flags2 |= SMB.FLAGS2_UNICODE
return 1
else: raise Exception('Error: Could not login successfully')
def waitNamedPipe(self, tid, pipe, timeout = 5, noAnswer = 0):
smb = NewSMBPacket()
smb['Tid'] = tid
transCommand = SMBCommand(SMB.SMB_COM_TRANSACTION)
transCommand['Parameters'] = SMBTransaction_Parameters()
transCommand['Data'] = SMBTransaction_Data()
setup = '\x53\x00\x00\x00'
name = '\\PIPE%s\x00' % pipe
transCommand['Parameters']['Setup'] = setup
transCommand['Parameters']['TotalParameterCount'] = 0
transCommand['Parameters']['TotalDataCount'] = 0
transCommand['Parameters']['MaxParameterCount'] = 0
transCommand['Parameters']['MaxDataCount'] = 0
transCommand['Parameters']['Timeout'] = timeout * 1000
transCommand['Parameters']['ParameterCount'] = 0
transCommand['Parameters']['ParameterOffset'] = 32+3+28+len(setup)+len(name)
transCommand['Parameters']['DataCount'] = 0
transCommand['Parameters']['DataOffset'] = 0
transCommand['Data']['Name'] = name
transCommand['Data']['Trans_Parameters'] = ''
transCommand['Data']['Trans_Data'] = ''
if noAnswer:
transCommand['Parameters']['Flags'] = TRANS_NO_RESPONSE
smb.addCommand(transCommand)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_TRANSACTION):
return 1
return 0
def read(self, tid, fid, offset=0, max_size = None, wait_answer=1):
if not max_size:
max_size = self._dialects_parameters['MaxBufferSize'] # Read in multiple KB blocks
# max_size is not working, because although it would, the server returns an error (More data avail)
smb = NewSMBPacket()
smb['Tid'] = tid
read = SMBCommand(SMB.SMB_COM_READ)
read['Parameters'] = SMBRead_Parameters()
read['Parameters']['Fid'] = fid
read['Parameters']['Offset'] = offset
read['Parameters']['Count'] = max_size
smb.addCommand(read)
if wait_answer:
while 1:
self.sendSMB(smb)
ans = self.recvSMB()
if ans.isValidAnswer(SMB.SMB_COM_READ):
readResponse = SMBCommand(ans['Data'][0])
readData = SMBReadResponse_Data(readResponse['Data'])
return readData['Data']
return None
def read_andx(self, tid, fid, offset=0, max_size = None, wait_answer=1, smb_packet=None):
if not max_size:
if (self._dialects_parameters['Capabilities'] & SMB.CAP_LARGE_READX) and self._SignatureEnabled is False:
max_size = 65000
else:
max_size = self._dialects_parameters['MaxBufferSize'] # Read in multiple KB blocks
# max_size is not working, because although it would, the server returns an error (More data avail)
if smb_packet is None:
smb = NewSMBPacket()
smb['Tid'] = tid
readAndX = SMBCommand(SMB.SMB_COM_READ_ANDX)
readAndX['Parameters'] = SMBReadAndX_Parameters()
readAndX['Parameters']['Fid'] = fid
readAndX['Parameters']['Offset'] = offset
readAndX['Parameters']['MaxCount'] = max_size
smb.addCommand(readAndX)
else:
smb = smb_packet
if wait_answer:
answer = ''
while 1:
self.sendSMB(smb)
ans = self.recvSMB()
if ans.isValidAnswer(SMB.SMB_COM_READ_ANDX):
# XXX Here we are only using a few fields from the response
readAndXResponse = SMBCommand(ans['Data'][0])
readAndXParameters = SMBReadAndXResponse_Parameters(readAndXResponse['Parameters'])
offset = readAndXParameters['DataOffset']
count = readAndXParameters['DataCount']+0x10000*readAndXParameters['DataCount_Hi']
answer += str(ans)[offset:offset+count]
if not ans.isMoreData():
return answer
max_size = min(max_size, readAndXParameters['Remaining'])
readAndX['Parameters']['Offset'] += count # XXX Offset is not important (apparently)
else:
self.sendSMB(smb)
ans = self.recvSMB()
try:
if ans.isValidAnswer(SMB.SMB_COM_READ_ANDX):
return ans
else:
return None
except:
return ans
return None
def read_raw(self, tid, fid, offset=0, max_size = None, wait_answer=1):
if not max_size:
max_size = self._dialects_parameters['MaxBufferSize'] # Read in multiple KB blocks
# max_size is not working, because although it would, the server returns an error (More data avail)
smb = NewSMBPacket()
smb['Tid'] = tid
readRaw = SMBCommand(SMB.SMB_COM_READ_RAW)
readRaw['Parameters'] = SMBReadRaw_Parameters()
readRaw['Parameters']['Fid'] = fid
readRaw['Parameters']['Offset'] = offset
readRaw['Parameters']['MaxCount'] = max_size
smb.addCommand(readRaw)
self.sendSMB(smb)
if wait_answer:
data = self._sess.recv_packet(self.__timeout).get_trailer()
if not data:
# If there is no data it means there was an error
data = self.read_andx(tid, fid, offset, max_size)
return data
return None
def write(self,tid,fid,data, offset = 0, wait_answer=1):
smb = NewSMBPacket()
smb['Tid'] = tid
write = SMBCommand(SMB.SMB_COM_WRITE)
write['Parameters'] = SMBWrite_Parameters()
write['Data'] = SMBWrite_Data()
write['Parameters']['Fid'] = fid
write['Parameters']['Count'] = len(data)
write['Parameters']['Offset'] = offset
write['Parameters']['Remaining'] = len(data)
write['Data']['Data'] = data
smb.addCommand(write)
self.sendSMB(smb)
if wait_answer:
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_WRITE):
return smb
return None
def write_andx(self,tid,fid,data, offset = 0, wait_answer=1, write_pipe_mode = False, smb_packet=None):
if smb_packet is None:
smb = NewSMBPacket()
smb['Tid'] = tid
writeAndX = SMBCommand(SMB.SMB_COM_WRITE_ANDX)
smb.addCommand(writeAndX)
writeAndX['Parameters'] = SMBWriteAndX_Parameters()
writeAndX['Parameters']['Fid'] = fid
writeAndX['Parameters']['Offset'] = offset
writeAndX['Parameters']['WriteMode'] = 8
writeAndX['Parameters']['Remaining'] = len(data)
writeAndX['Parameters']['DataLength'] = len(data)
writeAndX['Parameters']['DataOffset'] = len(smb) # this length already includes the parameter
writeAndX['Data'] = data
if write_pipe_mode is True:
# First of all we gotta know what the MaxBuffSize is
maxBuffSize = self._dialects_parameters['MaxBufferSize']
if len(data) > maxBuffSize:
chunks_size = maxBuffSize - 60
writeAndX['Parameters']['WriteMode'] = 0x0c
sendData = '\xff\xff' + data
totalLen = len(sendData)
writeAndX['Parameters']['DataLength'] = chunks_size
writeAndX['Parameters']['Remaining'] = totalLen-2
writeAndX['Data'] = sendData[:chunks_size]
self.sendSMB(smb)
if wait_answer:
smbResp = self.recvSMB()
smbResp.isValidAnswer(SMB.SMB_COM_WRITE_ANDX)
alreadySent = chunks_size
sendData = sendData[chunks_size:]
while alreadySent < totalLen:
writeAndX['Parameters']['WriteMode'] = 0x04
writeAndX['Parameters']['DataLength'] = len(sendData[:chunks_size])
writeAndX['Data'] = sendData[:chunks_size]
self.sendSMB(smb)
if wait_answer:
smbResp = self.recvSMB()
smbResp.isValidAnswer(SMB.SMB_COM_WRITE_ANDX)
alreadySent += writeAndX['Parameters']['DataLength']
sendData = sendData[chunks_size:]
return smbResp
else:
smb = smb_packet
self.sendSMB(smb)
if wait_answer:
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_WRITE_ANDX):
return smb
return None
def write_raw(self,tid,fid,data, offset = 0, wait_answer=1):
LOG.warning("[MS-CIFS] This command was introduced in the CorePlus dialect, but is often listed as part of the LAN Manager 1.0 dialect.This command has been deprecated.Clients SHOULD use SMB_COM_WRITE_ANDX")
smb = NewSMBPacket()
smb['Tid'] = tid
writeRaw = SMBCommand(SMB.SMB_COM_WRITE_RAW)
writeRaw['Parameters'] = SMBWriteRaw_Parameters()
writeRaw['Parameters']['Fid'] = fid
writeRaw['Parameters']['Offset'] = offset
writeRaw['Parameters']['Count'] = len(data)
writeRaw['Parameters']['DataLength'] = 0
writeRaw['Parameters']['DataOffset'] = 0
smb.addCommand(writeRaw)
self.sendSMB(smb)
self._sess.send_packet(data)
if wait_answer:
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_WRITE_RAW):
return smb
return None
def TransactNamedPipe(self, tid, fid, data = '', noAnswer = 0, waitAnswer = 1, offset = 0):
self.send_trans(tid,pack('<HH', 0x26, fid),'\\PIPE\\\x00','',data, noAnswer = noAnswer)
if noAnswer or not waitAnswer:
return
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_TRANSACTION):
transResponse = SMBCommand(smb['Data'][0])
transParameters = SMBTransactionResponse_Parameters(transResponse['Parameters'])
return transResponse['Data'][-transParameters['TotalDataCount']:] # Remove Potential Prefix Padding
return None
def TransactNamedPipeRecv(self):
s = self.recvSMB()
if s.isValidAnswer(SMB.SMB_COM_TRANSACTION):
transResponse = SMBCommand(s['Data'][0])
transParameters = SMBTransactionResponse_Parameters(transResponse['Parameters'])
return transResponse['Data'][-transParameters['TotalDataCount']:] # Remove Potential Prefix Padding
return None
def nt_create_andx(self,tid,filename, smb_packet=None, cmd = None, shareAccessMode = FILE_SHARE_READ | FILE_SHARE_WRITE, disposition = FILE_OPEN, accessMask = 0x2019f):
filename = filename.replace('/', '\\')
filename = filename.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else filename
if smb_packet is None:
smb = NewSMBPacket()
smb['Tid'] = tid
else:
smb = smb_packet
if cmd is None:
ntCreate = SMBCommand(SMB.SMB_COM_NT_CREATE_ANDX)
ntCreate['Parameters'] = SMBNtCreateAndX_Parameters()
ntCreate['Data'] = SMBNtCreateAndX_Data(flags=self.__flags2)
ntCreate['Parameters']['FileNameLength'] = len(filename)
ntCreate['Parameters']['CreateFlags'] = 0x16
ntCreate['Parameters']['AccessMask'] = accessMask
ntCreate['Parameters']['CreateOptions'] = 0x40
ntCreate['Parameters']['ShareAccess'] = shareAccessMode
ntCreate['Parameters']['Disposition'] = disposition
ntCreate['Data']['FileName'] = filename
if self.__flags2 & SMB.FLAGS2_UNICODE:
ntCreate['Data']['Pad'] = 0x0
else:
ntCreate = cmd
smb.addCommand(ntCreate)
self.sendSMB(smb)
while 1:
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_NT_CREATE_ANDX):
# XXX Here we are ignoring the rest of the response
ntCreateResponse = SMBCommand(smb['Data'][0])
ntCreateParameters = SMBNtCreateAndXResponse_Parameters(ntCreateResponse['Parameters'])
self.fid = ntCreateParameters['Fid']
return ntCreateParameters['Fid']
def logoff(self):
smb = NewSMBPacket()
logOff = SMBCommand(SMB.SMB_COM_LOGOFF_ANDX)
logOff['Parameters'] = SMBLogOffAndX()
smb.addCommand(logOff)
self.sendSMB(smb)
self.recvSMB()
# Let's clear some fields so you can login again under the same session
self._uid = 0
def list_path(self, service, path = '*', password = None):
path = path.replace('/', '\\')
path = path.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else path
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
findFirstParameter = SMBFindFirst2_Parameters()
findFirstParameter['SearchAttributes'] = SMB_FILE_ATTRIBUTE_DIRECTORY | SMB_FILE_ATTRIBUTE_HIDDEN | \
SMB_FILE_ATTRIBUTE_SYSTEM | SMB_FILE_ATTRIBUTE_READONLY | \
SMB_FILE_ATTRIBUTE_ARCHIVE
findFirstParameter['SearchCount'] = 512
findFirstParameter['Flags'] = SMB_FIND_RETURN_RESUME_KEYS | SMB_FIND_CLOSE_AT_EOS
findFirstParameter['InformationLevel'] = SMB_FIND_FILE_BOTH_DIRECTORY_INFO
findFirstParameter['SearchStorageType'] = 0
findFirstParameter['FileName'] = path + ('\x00\x00' if self.__flags2 & SMB.FLAGS2_UNICODE else '\x00')
self.send_trans2(tid, SMB.TRANS2_FIND_FIRST2, '\x00', findFirstParameter, '')
files = [ ]
totalDataCount = 1
findData = ''
findFirst2ParameterBlock = ''
while len(findData) < totalDataCount:
resp = self.recvSMB()
if resp.isValidAnswer(SMB.SMB_COM_TRANSACTION2):
trans2Response = SMBCommand(resp['Data'][0])
trans2Parameters = SMBTransaction2Response_Parameters(trans2Response['Parameters'])
totalDataCount = trans2Parameters['TotalDataCount']
findFirst2ParameterBlock += trans2Response['Data'][trans2Parameters['ParameterOffset']-55:][:trans2Parameters['ParameterCount']]
findData += trans2Response['Data'][trans2Parameters['DataOffset']-55:]
findParameterBlock = SMBFindFirst2Response_Parameters(findFirst2ParameterBlock)
# Save the SID for resume operations
sid = findParameterBlock['SID']
while True:
record = SMBFindFileBothDirectoryInfo(data = findData)
shortname = record['ShortName'].decode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else record['ShortName']
filename = record['FileName'].decode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else record['FileName']
fileRecord = SharedFile(record['CreationTime'], record['LastAccessTime'], record['LastChangeTime'],
record['EndOfFile'], record['AllocationSize'], record['ExtFileAttributes'],
shortname, filename)
files.append(fileRecord)
if record['NextEntryOffset'] > 0 and len(findData[record['NextEntryOffset']:]) > 0:
findData = findData[record['NextEntryOffset']:]
else:
# More data to search?
if findParameterBlock['EndOfSearch'] == 0:
resume_filename = record['FileName']
findNextParameter = SMBFindNext2_Parameters()
findNextParameter['SID'] = sid
findNextParameter['SearchCount'] = 1024
findNextParameter['InformationLevel'] = SMB_FIND_FILE_BOTH_DIRECTORY_INFO
findNextParameter['ResumeKey'] = 0
findNextParameter['Flags'] = SMB_FIND_RETURN_RESUME_KEYS | SMB_FIND_CLOSE_AT_EOS
findNextParameter['FileName'] = resume_filename + ('\x00\x00' if self.__flags2 & SMB.FLAGS2_UNICODE else '\x00')
self.send_trans2(tid, SMB.TRANS2_FIND_NEXT2, '\x00', findNextParameter, '')
findData = ''
findNext2ParameterBlock = ''
totalDataCount = 1
while len(findData) < totalDataCount:
resp = self.recvSMB()
if resp.isValidAnswer(SMB.SMB_COM_TRANSACTION2):
trans2Response = SMBCommand(resp['Data'][0])
trans2Parameters = SMBTransaction2Response_Parameters(trans2Response['Parameters'])
totalDataCount = trans2Parameters['TotalDataCount']
findNext2ParameterBlock += trans2Response['Data'][trans2Parameters['ParameterOffset']-55:][:trans2Parameters['ParameterCount']]
findData += trans2Response['Data'][trans2Parameters['DataOffset']-55:]
findParameterBlock = SMBFindNext2Response_Parameters(findNext2ParameterBlock)
else:
break
finally:
self.disconnect_tree(tid)
return files
def retr_file(self, service, filename, callback, mode = FILE_OPEN, offset = 0, password = None, shareAccessMode = SMB_ACCESS_READ):
filename = string.replace(filename, '/', '\\')
fid = -1
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
fid = self.nt_create_andx(tid, filename, shareAccessMode = shareAccessMode, accessMask = 0x20089)
res = self.query_file_info(tid, fid)
datasize = SMBQueryFileStandardInfo(res)['EndOfFile']
self.__nonraw_retr_file(tid, fid, offset, datasize, callback)
finally:
if fid >= 0:
self.close(tid, fid)
self.disconnect_tree(tid)
def stor_file(self, service, filename, callback, mode = FILE_OVERWRITE_IF, offset = 0, password = None, shareAccessMode = SMB_ACCESS_WRITE):
filename = string.replace(filename, '/', '\\')
fid = -1
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
fid = self.nt_create_andx(tid, filename, shareAccessMode = shareAccessMode, disposition = mode )
self.__nonraw_stor_file(tid, fid, offset, 0, callback)
finally:
if fid >= 0:
self.close(tid, fid)
self.disconnect_tree(tid)
def stor_file_nonraw(self, service, filename, callback, mode = FILE_OVERWRITE_IF, offset = 0, password = None, shareAccessMode = SMB_ACCESS_WRITE ):
filename = string.replace(filename, '/', '\\')
fid = -1
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
fid = self.nt_create_andx(tid, filename, shareAccessMode = shareAccessMode, disposition = mode)
self.__nonraw_stor_file(tid, fid, offset, 0, callback)
finally:
if fid >= 0:
self.close(tid, fid)
self.disconnect_tree(tid)
def check_dir(self, service, path, password = None):
path = string.replace(path,'/', '\\')
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
smb = NewSMBPacket()
smb['Tid'] = tid
smb['Mid'] = 0
cmd = SMBCommand(SMB.SMB_COM_CHECK_DIRECTORY)
cmd['Parameters'] = ''
cmd['Data'] = SMBCheckDirectory_Data(flags = self.__flags2)
cmd['Data']['DirectoryName'] = path.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else path
smb.addCommand(cmd)
self.sendSMB(smb)
while 1:
s = self.recvSMB()
if s.isValidAnswer(SMB.SMB_COM_CHECK_DIRECTORY):
return
finally:
self.disconnect_tree(tid)
def remove(self, service, path, password = None):
path = string.replace(path,'/', '\\')
# Perform a list to ensure the path exists
self.list_path(service, path, password)
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
smb = NewSMBPacket()
smb['Tid'] = tid
smb['Mid'] = 0
cmd = SMBCommand(SMB.SMB_COM_DELETE)
cmd['Parameters'] = SMBDelete_Parameters()
cmd['Parameters']['SearchAttributes'] = ATTR_HIDDEN | ATTR_SYSTEM | ATTR_ARCHIVE
cmd['Data'] = SMBDelete_Data(flags = self.__flags2)
cmd['Data']['FileName'] = (path + '\x00').encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else (path + '\x00')
smb.addCommand(cmd)
self.sendSMB(smb)
while 1:
s = self.recvSMB()
if s.isValidAnswer(SMB.SMB_COM_DELETE):
return
finally:
self.disconnect_tree(tid)
def rmdir(self, service, path, password = None):
path = string.replace(path,'/', '\\')
# Check that the directory exists
self.check_dir(service, path, password)
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
path = path.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else path
smb = NewSMBPacket()
smb['Tid'] = tid
createDir = SMBCommand(SMB.SMB_COM_DELETE_DIRECTORY)
createDir['Data'] = SMBDeleteDirectory_Data(flags=self.__flags2)
createDir['Data']['DirectoryName'] = path
smb.addCommand(createDir)
self.sendSMB(smb)
while 1:
s = self.recvSMB()
if s.isValidAnswer(SMB.SMB_COM_DELETE_DIRECTORY):
return
finally:
self.disconnect_tree(tid)
def mkdir(self, service, path, password = None):
path = string.replace(path,'/', '\\')
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
path = path.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else path
smb = NewSMBPacket()
smb['Tid'] = tid
smb['Mid'] = 0
createDir = SMBCommand(SMB.SMB_COM_CREATE_DIRECTORY)
createDir['Data'] = SMBCreateDirectory_Data(flags=self.__flags2)
createDir['Data']['DirectoryName'] = path
smb.addCommand(createDir)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_CREATE_DIRECTORY):
return 1
return 0
finally:
self.disconnect_tree(tid)
def rename(self, service, old_path, new_path, password = None):
old_path = string.replace(old_path,'/', '\\')
new_path = string.replace(new_path,'/', '\\')
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
smb = NewSMBPacket()
smb['Tid'] = tid
smb['Mid'] = 0
renameCmd = SMBCommand(SMB.SMB_COM_RENAME)
renameCmd['Parameters'] = SMBRename_Parameters()
renameCmd['Parameters']['SearchAttributes'] = ATTR_SYSTEM | ATTR_HIDDEN | ATTR_DIRECTORY
renameCmd['Data'] = SMBRename_Data(flags = self.__flags2)
renameCmd['Data']['OldFileName'] = old_path.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else old_path
renameCmd['Data']['NewFileName'] = new_path.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else new_path
smb.addCommand(renameCmd)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_RENAME):
return 1
return 0
finally:
self.disconnect_tree(tid)
def writeFile(self, treeId, fileId, data, offset = 0):
if (self._dialects_parameters['Capabilities'] & SMB.CAP_LARGE_WRITEX) and self._SignatureEnabled is False:
max_buf_size = 65000
else:
max_buf_size = self._dialects_parameters['MaxBufferSize'] & ~0x3ff # Write in multiple KB blocks
write_offset = offset
while 1:
if len(data) == 0:
break
writeData = data[:max_buf_size]
data = data[max_buf_size:]
smb = self.write_andx(treeId,fileId,writeData, write_offset)
writeResponse = SMBCommand(smb['Data'][0])
writeResponseParameters = SMBWriteAndXResponse_Parameters(writeResponse['Parameters'])
write_offset += writeResponseParameters['Count']
def get_socket(self):
return self._sess.get_socket()
ERRDOS = { 1: 'Invalid function',
2: 'File not found',
3: 'Invalid directory',
4: 'Too many open files',
5: 'Access denied',
6: 'Invalid file handle. Please file a bug report.',
7: 'Memory control blocks destroyed',
8: 'Out of memory',
9: 'Invalid memory block address',
10: 'Invalid environment',
11: 'Invalid format',
12: 'Invalid open mode',
13: 'Invalid data',
15: 'Invalid drive',
16: 'Attempt to remove server\'s current directory',
17: 'Not the same device',
18: 'No files found',
32: 'Sharing mode conflicts detected',
33: 'Lock request conflicts detected',
80: 'File already exists'
}
ERRSRV = { 1: 'Non-specific error',
2: 'Bad password',
4: 'Access denied',
5: 'Invalid tid. Please file a bug report.',
6: 'Invalid network name',
7: 'Invalid device',
49: 'Print queue full',
50: 'Print queue full',
51: 'EOF on print queue dump',
52: 'Invalid print file handle',
64: 'Command not recognized. Please file a bug report.',
65: 'Internal server error',
67: 'Invalid path',
69: 'Invalid access permissions',
71: 'Invalid attribute mode',
81: 'Server is paused',
82: 'Not receiving messages',
83: 'No room to buffer messages',
87: 'Too many remote user names',
88: 'Operation timeout',
89: 'Out of resources',
91: 'Invalid user handle. Please file a bug report.',
250: 'Temporarily unable to support raw mode for transfer',
251: 'Temporarily unable to support raw mode for transfer',
252: 'Continue in MPX mode',
65535: 'Unsupported function'
}
ERRHRD = { 19: 'Media is write-protected',
20: 'Unknown unit',
21: 'Drive not ready',
22: 'Unknown command',
23: 'CRC error',
24: 'Bad request',
25: 'Seek error',
26: 'Unknown media type',
27: 'Sector not found',
28: 'Printer out of paper',
29: 'Write fault',
30: 'Read fault',
31: 'General failure',
32: 'Open conflicts with an existing open',
33: 'Invalid lock request',
34: 'Wrong disk in drive',
35: 'FCBs not available',
36: 'Sharing buffer exceeded'
}
|
apache-2.0
| 3,336,921,338,414,145,000
| 35.990485
| 215
| 0.553244
| false
| 3.882916
| false
| false
| false
|
southpaw94/MachineLearning
|
HPTuning/SVM_X_Validation.py
|
1
|
1287
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
# This program introduces validation curves, which are essential
# in reducing over or under fitting of the learning algorithm.
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases'+\
'/breast-cancer-wisconsin/wdbc.data', header=None)
X = df.loc[:, 2:].values
y = df.loc[:, 1].values
# All malignant tumors will be represented as class 1, otherwise, class 0
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, \
test_size=0.20, random_state=1)
gs = GridSearchCV( \
estimator = DecisionTreeClassifier(random_state = 0), \
param_grid = [ \
{'max_depth': [1, 2, 3, 4, 5, 6, 7, None]} \
], \
scoring = 'accuracy', \
cv = 5)
scores = cross_val_score(gs, \
X_train, \
y_train, \
scoring = 'accuracy', \
cv = 5)
print('CV accuracy: %.3f +/- %.3f' % ( \
np.mean(scores), np.std(scores)))
|
gpl-2.0
| 6,369,555,869,987,812,000
| 30.390244
| 78
| 0.656566
| false
| 3.31701
| false
| false
| false
|
cloudbase/coriolis
|
coriolis/api/__init__.py
|
1
|
5046
|
# Copyright (c) 2013 OpenStack Foundation
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack API controllers.
"""
from paste import urlmap
import routes
from oslo_log import log as logging
from oslo_service import wsgi as base_wsgi
from coriolis.api import wsgi
from coriolis import exception
from coriolis.i18n import _, _LW # noqa
LOG = logging.getLogger(__name__)
def root_app_factory(loader, global_conf, **local_conf):
return urlmap.urlmap_factory(loader, global_conf, **local_conf)
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url is "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
def connect(self, *args, **kwargs):
# NOTE(inhye): Default the format part of a route to only accept json
# and xml so it doesn't eat all characters after a '.'
# in the url.
kwargs.setdefault('requirements', {})
if not kwargs['requirements'].get('format'):
kwargs['requirements']['format'] = 'json|xml'
return routes.Mapper.connect(self, *args, **kwargs)
class ProjectMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if 'parent_resource' not in kwargs:
kwargs['path_prefix'] = '{project_id}/'
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
p_member)
routes.Mapper.resource(self,
member_name,
collection_name,
**kwargs)
class APIRouter(base_wsgi.Router):
"""Routes requests on the API to the appropriate controller and method."""
ExtensionManager = None # override in subclasses
@classmethod
def factory(cls, global_config, **local_config):
return cls()
def __init__(self, ext_mgr=None):
if ext_mgr is None:
if self.ExtensionManager:
ext_mgr = self.ExtensionManager()
else:
raise exception.CoriolisException(
_("Must specify an ExtensionManager class"))
mapper = ProjectMapper()
self.resources = {}
self._setup_routes(mapper, ext_mgr)
self._setup_ext_routes(mapper, ext_mgr)
self._setup_extensions(ext_mgr)
super(APIRouter, self).__init__(mapper)
def _setup_ext_routes(self, mapper, ext_mgr):
for resource in ext_mgr.get_resources():
LOG.debug('Extended resource: %s',
resource.collection)
wsgi_resource = wsgi.Resource(resource.controller)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
mapper.resource(resource.collection, resource.collection, **kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _setup_extensions(self, ext_mgr):
for extension in ext_mgr.get_controller_extensions():
collection = extension.collection
controller = extension.controller
if collection not in self.resources:
LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource'),
{'ext_name': extension.extension.name,
'collection': collection})
continue
LOG.debug('Extension %(ext_name)s extending resource: '
'%(collection)s',
{'ext_name': extension.extension.name,
'collection': collection})
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
def _setup_routes(self, mapper, ext_mgr):
raise NotImplementedError
|
agpl-3.0
| 3,846,645,010,667,522,000
| 35.832117
| 78
| 0.596512
| false
| 4.473404
| false
| false
| false
|
Aravinthu/odoo
|
addons/website/models/ir_http.py
|
1
|
10023
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import traceback
import os
import unittest
import werkzeug
import werkzeug.routing
import werkzeug.utils
import odoo
from odoo import api, models
from odoo import SUPERUSER_ID
from odoo.http import request
from odoo.tools import config
from odoo.exceptions import QWebException
from odoo.tools.safe_eval import safe_eval
from odoo.osv.expression import FALSE_DOMAIN
from odoo.addons.http_routing.models.ir_http import ModelConverter
logger = logging.getLogger(__name__)
def sitemap_qs2dom(qs, route, field='name'):
""" Convert a query_string (can contains a path) to a domain"""
dom = []
if qs and qs.lower() not in route:
needles = qs.strip('/').split('/')
# needles will be altered and keep only element which one is not in route
# diff(from=['shop', 'product'], to=['shop', 'product', 'product']) => to=['product']
unittest.util.unorderable_list_difference(route.strip('/').split('/'), needles)
if len(needles) == 1:
dom = [(field, 'ilike', needles[0])]
else:
dom = FALSE_DOMAIN
return dom
def _guess_mimetype(ext=False, default=False):
exts = {
'.css': ['text/css', 'website.default_css'],
'.less': ['text/less', 'website.default_less'],
'.js': ['text/javascript', 'website.default_javascript'],
'.xml': ['text/xml', 'website.default_xml'],
'.csv': ['text/csv', 'website.default_csv'],
'.html': ['text/html', False],
}
if not default:
default = exts['.html']
return ext is not False and exts.get(ext, default) or exts
class Http(models.AbstractModel):
_inherit = 'ir.http'
@classmethod
def _get_converters(cls):
""" Get the converters list for custom url pattern werkzeug need to
match Rule. This override adds the website ones.
"""
return dict(
super(Http, cls)._get_converters(),
model=ModelConverter,
)
@classmethod
def _auth_method_public(cls):
""" If no user logged, set the public user of current website, or default
public user as request uid.
After this method `request.env` can be called, since the `request.uid` is
set. The `env` lazy property of `request` will be correct.
"""
if not request.session.uid:
env = api.Environment(request.cr, SUPERUSER_ID, request.context)
website = env['website'].get_current_website()
if website:
request.uid = website.user_id.id
if not request.uid:
super(Http, cls)._auth_method_public()
@classmethod
def _add_dispatch_parameters(cls, func):
if request.is_frontend:
context = dict(request.context)
if not context.get('tz'):
context['tz'] = request.session.get('geoip', {}).get('time_zone')
request.website = request.env['website'].get_current_website() # can use `request.env` since auth methods are called
context['website_id'] = request.website.id
super(Http, cls)._add_dispatch_parameters(func)
if request.is_frontend and request.routing_iteration == 1:
request.website = request.website.with_context(context)
@classmethod
def _get_languages(cls):
if getattr(request, 'website', False):
return request.website.language_ids
return super(Http, cls)._get_languages()
@classmethod
def _get_language_codes(cls):
if request.website:
return request.website._get_languages()
return super(Http, cls)._get_language_codes()
@classmethod
def _get_default_lang(cls):
if getattr(request, 'website', False):
return request.website.default_lang_id
return super(Http, cls)._get_default_lang()
@classmethod
def _serve_page(cls):
req_page = request.httprequest.path
domain = [('url', '=', req_page), '|', ('website_ids', 'in', request.website.id), ('website_ids', '=', False)]
if not request.website.is_publisher:
domain += [('is_visible', '=', True)]
mypage = request.env['website.page'].search(domain, limit=1)
_, ext = os.path.splitext(req_page)
if mypage:
return request.render(mypage.view_id.id, {
# 'path': req_page[1:],
'deletable': True,
'main_object': mypage,
}, mimetype=_guess_mimetype(ext)[0])
return False
@classmethod
def _serve_404(cls):
req_page = request.httprequest.path
return request.website.is_publisher() and request.render('website.page_404', {'path': req_page[1:]}) or False
@classmethod
def _serve_redirect(cls):
req_page = request.httprequest.path
domain = [
'|', ('website_id', '=', request.website.id), ('website_id', '=', False),
('url_from', '=', req_page)
]
return request.env['website.redirect'].search(domain, limit=1)
@classmethod
def _serve_fallback(cls, exception):
# serve attachment before
parent = super(Http, cls)._serve_fallback(exception)
if parent: # attachment
return parent
website_page = cls._serve_page()
if website_page:
return website_page
redirect = cls._serve_redirect()
if redirect:
return request.redirect(redirect.url_to, code=redirect.type)
return cls._serve_404()
@classmethod
def _handle_exception(cls, exception):
code = 500 # default code
is_website_request = bool(getattr(request, 'is_frontend', False) and getattr(request, 'website', False))
if not is_website_request:
# Don't touch non website requests exception handling
return super(Http, cls)._handle_exception(exception)
else:
try:
response = super(Http, cls)._handle_exception(exception)
if isinstance(response, Exception):
exception = response
else:
# if parent excplicitely returns a plain response, then we don't touch it
return response
except Exception as e:
if 'werkzeug' in config['dev_mode'] and (not isinstance(exception, QWebException) or not exception.qweb.get('cause')):
raise
exception = e
values = dict(
exception=exception,
traceback=traceback.format_exc(),
)
if isinstance(exception, werkzeug.exceptions.HTTPException):
if exception.code is None:
# Hand-crafted HTTPException likely coming from abort(),
# usually for a redirect response -> return it directly
return exception
else:
code = exception.code
if isinstance(exception, odoo.exceptions.AccessError):
code = 403
if isinstance(exception, QWebException):
values.update(qweb_exception=exception)
if isinstance(exception.qweb.get('cause'), odoo.exceptions.AccessError):
code = 403
if code == 500:
logger.error("500 Internal Server Error:\n\n%s", values['traceback'])
if 'qweb_exception' in values:
view = request.env["ir.ui.view"]
views = view._views_get(exception.qweb['template'])
to_reset = views.filtered(lambda view: view.model_data_id.noupdate is True and view.arch_fs)
values['views'] = to_reset
elif code == 403:
logger.warn("403 Forbidden:\n\n%s", values['traceback'])
values.update(
status_message=werkzeug.http.HTTP_STATUS_CODES[code],
status_code=code,
)
if not request.uid:
cls._auth_method_public()
try:
html = request.env['ir.ui.view'].render_template('website.%s' % code, values)
except Exception:
html = request.env['ir.ui.view'].render_template('website.http_error', values)
return werkzeug.wrappers.Response(html, status=code, content_type='text/html;charset=utf-8')
@classmethod
def binary_content(cls, xmlid=None, model='ir.attachment', id=None, field='datas',
unique=False, filename=None, filename_field='datas_fname', download=False,
mimetype=None, default_mimetype='application/octet-stream',
access_token=None, env=None):
env = env or request.env
obj = None
if xmlid:
obj = env.ref(xmlid, False)
elif id and model in env:
obj = env[model].browse(int(id))
if obj and 'website_published' in obj._fields:
if env[obj._name].sudo().search([('id', '=', obj.id), ('website_published', '=', True)]):
env = env(user=SUPERUSER_ID)
return super(Http, cls).binary_content(
xmlid=xmlid, model=model, id=id, field=field, unique=unique, filename=filename,
filename_field=filename_field, download=download, mimetype=mimetype,
default_mimetype=default_mimetype, access_token=access_token, env=env)
class ModelConverter(ModelConverter):
def generate(self, uid, dom=None, args=None):
Model = request.env[self.model].sudo(uid)
domain = safe_eval(self.domain, (args or {}).copy())
if dom:
domain += dom
for record in Model.search_read(domain=domain, fields=['write_date', Model._rec_name]):
if record.get(Model._rec_name, False):
yield {'loc': (record['id'], record[Model._rec_name])}
|
agpl-3.0
| 8,303,738,624,251,257,000
| 37.255725
| 134
| 0.583458
| false
| 4.229114
| false
| false
| false
|
atty303/pyfilesystem
|
fs/commands/fsinfo.py
|
1
|
3443
|
#!/usr/bin/env python
from fs.errors import ResourceNotFoundError
from fs.opener import opener
from fs.commands.runner import Command
import sys
from datetime import datetime
class FSInfo(Command):
usage = """fsinfo [OPTION]... [PATH]
Display information regarding an FS resource"""
def get_optparse(self):
optparse = super(FSInfo, self).get_optparse()
optparse.add_option('-k', '--key', dest='keys', action='append', default=[],
help='display KEYS only')
optparse.add_option('-s', '--simple', dest='simple', action='store_true', default=False,
help='info displayed in simple format (no table)')
optparse.add_option('-o', '--omit', dest='omit', action='store_true', default=False,
help='omit path name from output')
optparse.add_option('-d', '--dirsonly', dest='dirsonly', action="store_true", default=False,
help="list directories only", metavar="DIRSONLY")
optparse.add_option('-f', '--filesonly', dest='filesonly', action="store_true", default=False,
help="list files only", metavar="FILESONLY")
return optparse
def do_run(self, options, args):
def wrap_value(val):
if val.rstrip() == '\0':
return self.wrap_error('... missing ...')
return val
def make_printable(text):
if not isinstance(text, basestring):
try:
text = str(text)
except:
try:
text = unicode(text)
except:
text = repr(text)
return text
keys = options.keys or None
for fs, path, is_dir in self.get_resources(args,
files_only=options.filesonly,
dirs_only=options.dirsonly):
if not options.omit:
if options.simple:
file_line = u'%s\n' % self.wrap_filename(path)
else:
file_line = u'[%s] %s\n' % (self.wrap_filename(path), self.wrap_faded(fs.desc(path)))
self.output(file_line)
info = fs.getinfo(path)
for k, v in info.items():
if k.startswith('_'):
del info[k]
elif not isinstance(v, (basestring, int, float, bool, datetime)):
del info[k]
if keys:
table = [(k, make_printable(info.get(k, '\0'))) for k in keys]
else:
keys = sorted(info.keys())
table = [(k, make_printable(info[k])) for k in sorted(info.keys())]
if options.simple:
for row in table:
self.output(row[-1] + '\n')
else:
self.output_table(table, {0:self.wrap_table_header, 1:wrap_value})
def run():
return FSInfo().run()
if __name__ == "__main__":
sys.exit(run())
|
bsd-3-clause
| 9,063,598,042,925,267,000
| 40
| 105
| 0.448446
| false
| 4.697135
| false
| false
| false
|
fnaum/rez
|
src/rez/cli/complete.py
|
1
|
3476
|
"""
Prints package completion strings.
"""
from __future__ import print_function
import argparse
__doc__ = argparse.SUPPRESS
def setup_parser(parser, completions=False):
pass
def command(opts, parser, extra_arg_groups=None):
from rez.cli._util import subcommands
import os
import re
# get comp info from environment variables
comp_line = os.getenv("COMP_LINE", "")
comp_point = os.getenv("COMP_POINT", "")
try:
comp_point = int(comp_point)
except:
comp_point = len(comp_line)
last_word = comp_line.split()[-1]
if comp_line.endswith(last_word):
prefix = last_word
else:
prefix = None
def _pop_arg(l, p):
words = l.split()
arg = None
if words:
arg = words[0]
l_ = l.lstrip()
p -= (len(l) - len(l_) + len(arg))
l = l_[len(arg):]
return l, p, arg
return l, p, arg
# determine subcommand, possibly give subcommand completion
subcommand = None
comp_line, comp_point, cmd = _pop_arg(comp_line, comp_point)
if cmd in ("rez", "rezolve"):
comp_line, comp_point, arg = _pop_arg(comp_line, comp_point)
if arg:
if prefix != arg:
subcommand = arg
else:
subcommand = cmd.split("-", 1)[-1]
if subcommand is None:
cmds = [k for k, v in subcommands.items() if not v.get("hidden")]
if prefix:
cmds = (x for x in cmds if x.startswith(prefix))
print(" ".join(cmds))
if subcommand not in subcommands:
return
# replace '--' with special '--N#' flag so that subcommands can specify
# custom completions.
regex = re.compile("\s--\s")
ddashes = regex.findall(comp_line)
for i, ddash in enumerate(ddashes):
j = comp_line.find(ddash)
while comp_line[j] != "-":
j += 1
j += 2
s = "N%d" % i
comp_line = comp_line[:j] + s + comp_line[j:]
if comp_point >= j:
comp_point += len(s)
# create parser for subcommand
from rez.backport.importlib import import_module
module_name = "rez.cli.%s" % subcommand
mod = import_module(module_name)
parser = argparse.ArgumentParser()
mod.setup_parser(parser, completions=True)
# have to massage input a little so argcomplete behaves
cmd = "rez-%s" % subcommand
comp_line = cmd + comp_line
comp_point += len(cmd)
# generate the completions
from rez.cli._complete_util import RezCompletionFinder
completer = RezCompletionFinder(parser=parser,
comp_line=comp_line,
comp_point=comp_point)
words = completer.completions
print(' '.join(words))
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
lgpl-3.0
| 7,186,320,333,218,014,000
| 29.226087
| 79
| 0.609033
| false
| 3.832415
| false
| false
| false
|
amoskong/scylla-cluster-tests
|
sdcm/utils/common.py
|
1
|
48757
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2017 ScyllaDB
# pylint: disable=too-many-lines
import itertools
import os
import logging
import random
import socket
import time
import datetime
import errno
import threading
import select
import shutil
import copy
from functools import wraps
from enum import Enum
from collections import defaultdict
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from urlparse import urlparse
import hashlib
import boto3
import libcloud.storage.providers
import libcloud.storage.types
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider
LOGGER = logging.getLogger('utils')
def _remote_get_hash(remoter, file_path):
try:
result = remoter.run('md5sum {}'.format(file_path), verbose=True)
return result.stdout.strip().split()[0]
except Exception as details: # pylint: disable=broad-except
LOGGER.error(str(details))
return None
def _remote_get_file(remoter, src, dst, user_agent=None):
cmd = 'curl -L {} -o {}'.format(src, dst)
if user_agent:
cmd += ' --user-agent %s' % user_agent
return remoter.run(cmd, ignore_status=True)
def remote_get_file(remoter, src, dst, hash_expected=None, retries=1, user_agent=None): # pylint: disable=too-many-arguments
_remote_get_file(remoter, src, dst, user_agent)
if not hash_expected:
return
while retries > 0 and _remote_get_hash(remoter, dst) != hash_expected:
_remote_get_file(remoter, src, dst, user_agent)
retries -= 1
assert _remote_get_hash(remoter, dst) == hash_expected
class retrying(object): # pylint: disable=invalid-name,too-few-public-methods
"""
Used as a decorator to retry function run that can possibly fail with allowed exceptions list
"""
def __init__(self, n=3, sleep_time=1, allowed_exceptions=(Exception,), message=""):
assert n > 0, "Number of retries parameter should be greater then 0 (current: %s)" % n
self.n = n # number of times to retry # pylint: disable=invalid-name
self.sleep_time = sleep_time # number seconds to sleep between retries
self.allowed_exceptions = allowed_exceptions # if Exception is not allowed will raise
self.message = message # string that will be printed between retries
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
if self.n == 1:
# there is no need to retry
return func(*args, **kwargs)
for i in xrange(self.n):
try:
if self.message:
LOGGER.info("%s [try #%s]", self.message, i)
return func(*args, **kwargs)
except self.allowed_exceptions as ex:
LOGGER.debug("'%s': failed with '%r', retrying [#%s]", func.func_name, ex, i)
time.sleep(self.sleep_time)
if i == self.n - 1:
LOGGER.error("'%s': Number of retries exceeded!", func.func_name)
raise
return inner
def log_run_info(arg):
"""
Decorator that prints BEGIN before the function runs and END when function finished running.
Uses function name as a name of action or string that can be given to the decorator.
If the function is a method of a class object, the class name will be printed out.
Usage examples:
@log_run_info
def foo(x, y=1):
pass
In: foo(1)
Out:
BEGIN: foo
END: foo (ran 0.000164)s
@log_run_info("Execute nemesis")
def disrupt():
pass
In: disrupt()
Out:
BEGIN: Execute nemesis
END: Execute nemesis (ran 0.000271)s
"""
def _inner(func, msg=None):
@wraps(func)
def inner(*args, **kwargs):
class_name = ""
if args and func.__name__ in dir(args[0]):
class_name = " <%s>" % args[0].__class__.__name__
action = "%s%s" % (msg, class_name)
start_time = datetime.datetime.now()
LOGGER.debug("BEGIN: %s", action)
res = func(*args, **kwargs)
end_time = datetime.datetime.now()
LOGGER.debug("END: %s (ran %ss)", action, (end_time - start_time).total_seconds())
return res
return inner
if callable(arg): # when decorator is used without a string message
return _inner(arg, arg.__name__)
else:
return lambda f: _inner(f, arg)
class Distro(Enum):
UNKNOWN = 0
CENTOS7 = 1
RHEL7 = 2
UBUNTU14 = 3
UBUNTU16 = 4
UBUNTU18 = 5
DEBIAN8 = 6
DEBIAN9 = 7
def get_data_dir_path(*args):
import sdcm
sdcm_path = os.path.realpath(sdcm.__path__[0])
data_dir = os.path.join(sdcm_path, "../data_dir", *args)
return os.path.abspath(data_dir)
def get_job_name():
return os.environ.get('JOB_NAME', 'local_run')
def verify_scylla_repo_file(content, is_rhel_like=True):
LOGGER.info('Verifying Scylla repo file')
if is_rhel_like:
body_prefix = ['#', '[scylla', 'name=', 'baseurl=', 'enabled=', 'gpgcheck=', 'type=',
'skip_if_unavailable=', 'gpgkey=', 'repo_gpgcheck=', 'enabled_metadata=']
else:
body_prefix = ['#', 'deb']
for line in content.split('\n'):
valid_prefix = False
for prefix in body_prefix:
if line.startswith(prefix) or not line.strip():
valid_prefix = True
break
LOGGER.debug(line)
assert valid_prefix, 'Repository content has invalid line: {}'.format(line)
def remove_comments(data):
"""Remove comments line from data
Remove any string which is start from # in data
Arguments:
data {str} -- data expected the command output, file contents
"""
return '\n'.join([i.strip() for i in data.split('\n') if not i.startswith('#')])
class S3Storage(object):
bucket_name = 'cloudius-jenkins-test'
enable_multipart_threshold_size = 1024 * 1024 * 1024 # 1GB
multipart_chunksize = 50 * 1024 * 1024 # 50 MB
num_download_attempts = 5
def __init__(self, bucket=None):
if bucket:
self.bucket_name = bucket
self._bucket = boto3.resource("s3").Bucket(name=self.bucket_name)
self.transfer_config = boto3.s3.transfer.TransferConfig(multipart_threshold=self.enable_multipart_threshold_size,
multipart_chunksize=self.multipart_chunksize,
num_download_attempts=self.num_download_attempts)
def get_s3_fileojb(self, key):
objects = []
for obj in self._bucket.objects.filter(Prefix=key):
objects.append(obj)
return objects
def search_by_path(self, path=''):
files = []
for obj in self._bucket.objects.filter(Prefix=path):
files.append(obj.key)
return files
def generate_url(self, file_path, dest_dir=''):
bucket_name = self.bucket_name
file_name = os.path.basename(os.path.normpath(file_path))
return "https://{bucket_name}.s3.amazonaws.com/{dest_dir}/{file_name}".format(dest_dir=dest_dir,
file_name=file_name,
bucket_name=bucket_name)
def upload_file(self, file_path, dest_dir=''):
s3_url = self.generate_url(file_path, dest_dir)
s3_obj = "{}/{}".format(dest_dir, os.path.basename(file_path))
try:
LOGGER.info("Uploading '{file_path}' to {s3_url}".format(file_path=file_path, s3_url=s3_url))
print "Uploading '{file_path}' to {s3_url}".format(file_path=file_path, s3_url=s3_url)
self._bucket.upload_file(Filename=file_path,
Key=s3_obj,
Config=self.transfer_config)
LOGGER.info("Uploaded to {0}".format(s3_url))
LOGGER.info("Set public read access")
self.set_public_access(key=s3_obj)
return s3_url
except Exception as details: # pylint: disable=broad-except
LOGGER.debug("Unable to upload to S3: %s", details)
return ""
def set_public_access(self, key):
acl_obj = boto3.resource('s3').ObjectAcl(self.bucket_name, key)
grants = copy.deepcopy(acl_obj.grants)
grantees = {
'Grantee': {
"Type": "Group",
"URI": "http://acs.amazonaws.com/groups/global/AllUsers"
},
'Permission': "READ"
}
grants.append(grantees)
acl_obj.put(ACL='', AccessControlPolicy={'Grants': grants, 'Owner': acl_obj.owner})
def download_file(self, link, dst_dir):
key_name = link.replace("https://{0.bucket_name}.s3.amazonaws.com/".format(self), "")
file_name = os.path.basename(key_name)
try:
LOGGER.info("Downloading {0} from {1}".format(key_name, self.bucket_name))
self._bucket.download_file(Key=key_name,
Filename=os.path.join(dst_dir, file_name),
Config=self.transfer_config)
LOGGER.info("Downloaded finished")
return os.path.join(os.path.abspath(dst_dir), file_name)
except Exception as details: # pylint: disable=broad-except
LOGGER.warning("File {} is not downloaded by reason: {}".format(key_name, details))
return ""
def get_latest_gemini_version():
bucket_name = 'downloads.scylladb.com'
results = S3Storage(bucket_name).search_by_path(path='gemini')
versions = set()
for result_file in results:
versions.add(result_file.split('/')[1])
return str(sorted(versions)[-1])
def list_logs_by_test_id(test_id):
log_types = ['db-cluster', 'monitor-set', 'loader-set', 'sct-runner',
'prometheus', 'grafana',
'job', 'monitoring_data_stack', 'events']
results = []
if not test_id:
return results
def convert_to_date(date_str):
try:
t = datetime.datetime.strptime(date_str, "%Y%m%d_%H%M%S") # pylint: disable=invalid-name
except ValueError:
try:
t = datetime.datetime.strptime(date_str, "%Y_%m_%d_%H_%M_%S") # pylint: disable=invalid-name
except ValueError:
t = datetime.datetime(1999, 1, 1, 1, 1, 1) # pylint: disable=invalid-name
return t # pylint: disable=invalid-name
log_files = S3Storage().search_by_path(path=test_id)
for log_file in log_files:
for log_type in log_types:
if log_type in log_file:
results.append({"file_path": log_file,
"type": log_type,
"link": "https://{}.s3.amazonaws.com/{}".format(S3Storage.bucket_name, log_file),
"date": convert_to_date(log_file.split('/')[1])
})
break
results = sorted(results, key=lambda x: x["date"])
return results
def all_aws_regions():
client = boto3.client('ec2')
return [region['RegionName'] for region in client.describe_regions()['Regions']]
AWS_REGIONS = all_aws_regions()
class ParallelObject(object): # pylint: disable=too-few-public-methods
"""
Run function in with supplied args in parallel using thread.
"""
def __init__(self, objects, timeout=6, num_workers=None, disable_logging=False):
self.objects = objects
self.timeout = timeout
self.num_workers = num_workers
self.disable_logging = disable_logging
def run(self, func):
def func_wrap(fun):
def inner(*args, **kwargs):
thread_name = threading.current_thread().name
fun_args = args
fun_kwargs = kwargs
fun_name = fun.__name__
LOGGER.debug("[{thread_name}] {fun_name}({fun_args}, {fun_kwargs})".format(thread_name=thread_name,
fun_name=fun_name,
fun_args=fun_args,
fun_kwargs=fun_kwargs))
return_val = fun(*args, **kwargs)
LOGGER.debug("[{thread_name}] Done.".format(thread_name=thread_name))
return return_val
return inner
with ThreadPoolExecutor(max_workers=self.num_workers) as pool:
LOGGER.debug("Executing in parallel: '{}' on {}".format(func.__name__, self.objects))
if not self.disable_logging:
func = func_wrap(func)
return list(pool.map(func, self.objects, timeout=self.timeout))
def clean_cloud_instances(tags_dict):
"""
Remove all instances with specific tags from both AWS/GCE
:param tags_dict: a dict of the tag to select the instances,e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:return: None
"""
clean_instances_aws(tags_dict)
clean_elastic_ips_aws(tags_dict)
clean_instances_gce(tags_dict)
def aws_tags_to_dict(tags_list):
tags_dict = {}
if tags_list:
for item in tags_list:
tags_dict[item["Key"]] = item["Value"]
return tags_dict
def list_instances_aws(tags_dict=None, region_name=None, running=False, group_as_region=False, verbose=False):
"""
list all instances with specific tags AWS
:param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:param region_name: name of the region to list
:param running: get all running instances
:param group_as_region: if True the results would be grouped into regions
:param verbose: if True will log progress information
:return: instances dict where region is a key
"""
instances = {}
aws_regions = [region_name] if region_name else AWS_REGIONS
def get_instances(region):
if verbose:
LOGGER.info('Going to list aws region "%s"', region)
time.sleep(random.random())
client = boto3.client('ec2', region_name=region)
custom_filter = []
if tags_dict:
custom_filter = [{'Name': 'tag:{}'.format(key), 'Values': [value]} for key, value in tags_dict.items()]
response = client.describe_instances(Filters=custom_filter)
instances[region] = [instance for reservation in response['Reservations'] for instance in reservation[
'Instances']]
if verbose:
LOGGER.info("%s: done [%s/%s]", region, len(instances.keys()), len(aws_regions))
ParallelObject(aws_regions, timeout=100).run(get_instances)
for curr_region_name in instances:
if running:
instances[curr_region_name] = [i for i in instances[curr_region_name] if i['State']['Name'] == 'running']
else:
instances[curr_region_name] = [i for i in instances[curr_region_name]
if not i['State']['Name'] == 'terminated']
if not group_as_region:
instances = list(itertools.chain(*instances.values())) # flatten the list of lists
total_items = len(instances)
else:
total_items = sum([len(value) for _, value in instances.items()])
if verbose:
LOGGER.info("Found total of %s instances.", len(total_items))
return instances
def clean_instances_aws(tags_dict):
"""
Remove all instances with specific tags AWS
:param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:return: None
"""
assert tags_dict, "tags_dict not provided (can't clean all instances)"
aws_instances = list_instances_aws(tags_dict=tags_dict, group_as_region=True)
for region, instance_list in aws_instances.items():
client = boto3.client('ec2', region_name=region)
for instance in instance_list:
tags = aws_tags_to_dict(instance.get('Tags'))
name = tags.get("Name", "N/A")
instance_id = instance['InstanceId']
LOGGER.info("Going to delete '{instance_id}' [name={name}] ".format(instance_id=instance_id, name=name))
response = client.terminate_instances(InstanceIds=[instance_id])
LOGGER.debug("Done. Result: %s\n", response['TerminatingInstances'])
def list_elastic_ips_aws(tags_dict=None, region_name=None, group_as_region=False, verbose=False):
"""
list all elastic ips with specific tags AWS
:param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:param region_name: name of the region to list
:param group_as_region: if True the results would be grouped into regions
:param verbose: if True will log progress information
:return: instances dict where region is a key
"""
elastic_ips = {}
aws_regions = [region_name] if region_name else AWS_REGIONS
def get_elastic_ips(region):
if verbose:
LOGGER.info('Going to list aws region "%s"', region)
time.sleep(random.random())
client = boto3.client('ec2', region_name=region)
custom_filter = []
if tags_dict:
custom_filter = [{'Name': 'tag:{}'.format(key), 'Values': [value]} for key, value in tags_dict.items()]
response = client.describe_addresses(Filters=custom_filter)
elastic_ips[region] = [ip for ip in response['Addresses']]
if verbose:
LOGGER.info("%s: done [%s/%s]", region, len(elastic_ips.keys()), len(aws_regions))
ParallelObject(aws_regions, timeout=100).run(get_elastic_ips)
if not group_as_region:
elastic_ips = list(itertools.chain(*elastic_ips.values())) # flatten the list of lists
total_items = elastic_ips
else:
total_items = sum([len(value) for _, value in elastic_ips.items()])
if verbose:
LOGGER.info("Found total of %s ips.", total_items)
return elastic_ips
def clean_elastic_ips_aws(tags_dict):
"""
Remove all elastic ips with specific tags AWS
:param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:return: None
"""
assert tags_dict, "tags_dict not provided (can't clean all instances)"
aws_instances = list_elastic_ips_aws(tags_dict=tags_dict, group_as_region=True)
for region, eip_list in aws_instances.items():
client = boto3.client('ec2', region_name=region)
for eip in eip_list:
association_id = eip.get('AssociationId', None)
if association_id:
response = client.disassociate_address(AssociationId=association_id)
LOGGER.debug("disassociate_address. Result: %s\n", response)
allocation_id = eip['AllocationId']
LOGGER.info("Going to release '{allocation_id}' [public_ip={public_ip}] ".format(
allocation_id=allocation_id, public_ip=eip['PublicIp']))
response = client.release_address(AllocationId=allocation_id)
LOGGER.debug("Done. Result: %s\n", response)
def get_all_gce_regions():
from sdcm.keystore import KeyStore
gcp_credentials = KeyStore().get_gcp_credentials()
gce_driver = get_driver(Provider.GCE)
compute_engine = gce_driver(gcp_credentials["project_id"] + "@appspot.gserviceaccount.com",
gcp_credentials["private_key"],
project=gcp_credentials["project_id"])
all_gce_regions = [region_obj.name for region_obj in compute_engine.region_list]
return all_gce_regions
def gce_meta_to_dict(metadata):
meta_dict = {}
data = metadata.get("items")
if data:
for item in data:
key = item["key"]
if key: # sometimes key is empty string
meta_dict[key] = item["value"]
return meta_dict
def filter_gce_by_tags(tags_dict, instances):
filtered_instances = []
for instance in instances:
tags = gce_meta_to_dict(instance.extra['metadata'])
found_keys = set(k for k in tags_dict if k in tags and tags_dict[k] == tags[k])
if found_keys == set(tags_dict.keys()):
filtered_instances.append(instance)
return filtered_instances
def list_instances_gce(tags_dict=None, running=False, verbose=False):
"""
list all instances with specific tags GCE
:param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:return: None
"""
# avoid cyclic dependency issues, since too many things import utils.py
from sdcm.keystore import KeyStore
gcp_credentials = KeyStore().get_gcp_credentials()
gce_driver = get_driver(Provider.GCE)
compute_engine = gce_driver(gcp_credentials["project_id"] + "@appspot.gserviceaccount.com",
gcp_credentials["private_key"],
project=gcp_credentials["project_id"])
if verbose:
LOGGER.info("Going to get all instances from GCE")
all_gce_instances = compute_engine.list_nodes()
# filter instances by tags since libcloud list_nodes() doesn't offer any filtering
if tags_dict:
instances = filter_gce_by_tags(tags_dict=tags_dict, instances=all_gce_instances)
else:
instances = all_gce_instances
if running:
# https://libcloud.readthedocs.io/en/latest/compute/api.html#libcloud.compute.types.NodeState
instances = [i for i in instances if i.state == 'running']
else:
instances = [i for i in instances if not i.state == 'terminated']
if verbose:
LOGGER.info("Done. Found total of %s instances.", len(instances))
return instances
def clean_instances_gce(tags_dict):
"""
Remove all instances with specific tags GCE
:param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:return: None
"""
assert tags_dict, "tags_dict not provided (can't clean all instances)"
all_gce_instances = list_instances_gce(tags_dict=tags_dict)
for instance in all_gce_instances:
LOGGER.info("Going to delete: {}".format(instance.name))
# https://libcloud.readthedocs.io/en/latest/compute/api.html#libcloud.compute.base.Node.destroy
res = instance.destroy()
LOGGER.info("{} deleted. res={}".format(instance.name, res))
_SCYLLA_AMI_CACHE = defaultdict(dict)
def get_scylla_ami_versions(region):
"""
get the list of all the formal scylla ami from specific region
:param region: the aws region to look in
:return: list of ami data
:rtype: list
"""
if _SCYLLA_AMI_CACHE[region]:
return _SCYLLA_AMI_CACHE[region]
ec2 = boto3.client('ec2', region_name=region)
response = ec2.describe_images(
Owners=['797456418907'], # ScyllaDB
Filters=[
{'Name': 'name', 'Values': ['ScyllaDB *']},
],
)
_SCYLLA_AMI_CACHE[region] = sorted(response['Images'],
key=lambda x: x['CreationDate'],
reverse=True)
return _SCYLLA_AMI_CACHE[region]
_S3_SCYLLA_REPOS_CACHE = defaultdict(dict)
def get_s3_scylla_repos_mapping(dist_type='centos', dist_version=None):
"""
get the mapping from version prefixes to rpm .repo or deb .list files locations
:param dist_type: which distro to look up centos/ubuntu/debian
:param dist_version: famaily name of the distro version
:return: a mapping of versions prefixes to repos
:rtype: dict
"""
if (dist_type, dist_version) in _S3_SCYLLA_REPOS_CACHE:
return _S3_SCYLLA_REPOS_CACHE[(dist_type, dist_version)]
s3_client = boto3.client('s3')
bucket = 'downloads.scylladb.com'
if dist_type == 'centos':
response = s3_client.list_objects(Bucket=bucket, Prefix='rpm/centos/', Delimiter='/')
for repo_file in response['Contents']:
filename = os.path.basename(repo_file['Key'])
# only if path look like 'rpm/centos/scylla-1.3.repo', we deem it formal one
if filename.startswith('scylla-') and filename.endswith('.repo'):
version_prefix = filename.replace('.repo', '').split('-')[-1]
_S3_SCYLLA_REPOS_CACHE[(
dist_type, dist_version)][version_prefix] = "https://s3.amazonaws.com/{bucket}/{path}".format(bucket=bucket, path=repo_file['Key'])
elif dist_type == 'ubuntu' or dist_type == 'debian':
response = s3_client.list_objects(Bucket=bucket, Prefix='deb/{}/'.format(dist_type), Delimiter='/')
for repo_file in response['Contents']:
filename = os.path.basename(repo_file['Key'])
# only if path look like 'deb/debian/scylla-3.0-jessie.list', we deem it formal one
if filename.startswith('scylla-') and filename.endswith('-{}.list'.format(dist_version)):
version_prefix = filename.replace('-{}.list'.format(dist_version), '').split('-')[-1]
_S3_SCYLLA_REPOS_CACHE[(
dist_type, dist_version)][version_prefix] = "https://s3.amazonaws.com/{bucket}/{path}".format(bucket=bucket, path=repo_file['Key'])
else:
raise NotImplementedError("[{}] is not yet supported".format(dist_type))
return _S3_SCYLLA_REPOS_CACHE[(dist_type, dist_version)]
def pid_exists(pid):
"""
Return True if a given PID exists.
:param pid: Process ID number.
"""
try:
os.kill(pid, 0)
except OSError as detail:
if detail.errno == errno.ESRCH:
return False
return True
def safe_kill(pid, signal):
"""
Attempt to send a signal to a given process that may or may not exist.
:param signal: Signal number.
"""
try:
os.kill(pid, signal)
return True
except Exception: # pylint: disable=broad-except
return False
class FileFollowerIterator(object): # pylint: disable=too-few-public-methods
def __init__(self, filename, thread_obj):
self.filename = filename
self.thread_obj = thread_obj
def __iter__(self):
with open(self.filename, 'r') as input_file:
line = ''
while not self.thread_obj.stopped():
poller = select.poll() # pylint: disable=no-member
poller.register(input_file, select.POLLIN) # pylint: disable=no-member
if poller.poll(100):
line += input_file.readline()
if not line or not line.endswith('\n'):
time.sleep(0.1)
continue
yield line
line = ''
yield line
class FileFollowerThread(object):
def __init__(self):
self.executor = concurrent.futures.ThreadPoolExecutor(1)
self._stop_event = threading.Event()
self.future = None
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def run(self):
raise NotImplementedError()
def start(self):
self.future = self.executor.submit(self.run)
return self.future
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def follow_file(self, filename):
return FileFollowerIterator(filename, self)
class ScyllaCQLSession(object):
def __init__(self, session, cluster):
self.session = session
self.cluster = cluster
def __enter__(self):
return self.session
def __exit__(self, exc_type, exc_val, exc_tb):
self.cluster.shutdown()
class MethodVersionNotFound(Exception):
pass
class version(object): # pylint: disable=invalid-name,too-few-public-methods
VERSIONS = {}
"""
Runs a method according to the version attribute of the class method
Limitations: currently, can't work if the same method name in the same file used in different
classes
Example:
In [3]: class VersionedClass(object):
...: def __init__(self, current_version):
...: self.version = current_version
...:
...: @version("1.2")
...: def setup(self):
...: return "1.2"
...:
...: @version("2")
...: def setup(self):
...: return "2"
In [4]: vc = VersionedClass("2")
In [5]: vc.setup()
Out[5]: '2'
In [6]: vc = VersionedClass("1.2")
In [7]: vc.setup()
Out[7]: '1.2'
"""
def __init__(self, ver):
self.version = ver
def __call__(self, func):
self.VERSIONS[(self.version, func.func_name, func.func_code.co_filename)] = func
@wraps(func)
def inner(*args, **kwargs):
cls_self = args[0]
func_to_run = self.VERSIONS.get((cls_self.version, func.func_name, func.func_code.co_filename))
if func_to_run:
return func_to_run(*args, **kwargs)
else:
raise MethodVersionNotFound("Method '{}' with version '{}' not defined in '{}'!".format(
func.func_name,
cls_self.version,
cls_self.__class__.__name__))
return inner
def get_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
addr = sock.getsockname()
port = addr[1]
sock.close()
return port
def get_my_ip():
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
return ip
def get_branched_ami(ami_version, region_name):
"""
Get a list of AMIs, based on version match
:param ami_version: branch version to look for, ex. 'branch-2019.1:latest', 'branch-3.1:all'
:param region_name: the region to look AMIs in
:return: list of ec2.images
"""
branch, build_id = ami_version.split(':')
ec2 = boto3.resource('ec2', region_name=region_name)
LOGGER.info("Looking for AMI match [%s]", ami_version)
if build_id == 'latest' or build_id == 'all':
filters = [{'Name': 'tag:branch', 'Values': [branch]}]
else:
filters = [{'Name': 'tag:branch', 'Values': [branch]}, {'Name': 'tag:build-id', 'Values': [build_id]}]
amis = list(ec2.images.filter(Filters=filters))
amis = sorted(amis, key=lambda x: x.creation_date, reverse=True)
assert amis, "AMI matching [{}] wasn't found on {}".format(ami_version, region_name)
if build_id == 'all':
return amis
else:
return amis[:1]
def get_ami_tags(ami_id, region_name):
"""
Get a list of tags of a specific AMI
:param ami_id:
:param region_name: the region to look AMIs in
:return: dict of tags
"""
ec2 = boto3.resource('ec2', region_name=region_name)
test_image = ec2.Image(ami_id)
if test_image.tags:
return {i['Key']: i['Value'] for i in test_image.tags}
else:
return {}
def tag_ami(ami_id, tags_dict, region_name):
tags = [{'Key': key, 'Value': value} for key, value in tags_dict.items()]
ec2 = boto3.resource('ec2', region_name=region_name)
test_image = ec2.Image(ami_id)
tags += test_image.tags
test_image.create_tags(Tags=tags)
LOGGER.info("tagged %s with %s", ami_id, tags)
def get_non_system_ks_cf_list(loader_node, db_node, request_timeout=300, filter_out_table_with_counter=False,
filter_out_mv=False):
"""Get all not system keyspace.tables pairs
Arguments:
loader_node {BaseNode} -- LoaderNoder to send request
db_node_ip {str} -- ip of db_node
"""
# pylint: disable=too-many-locals
def get_tables_columns_list(entity_type):
if entity_type == 'view':
cmd = "paging off; SELECT keyspace_name, view_name FROM system_schema.views"
else:
cmd = "paging off; SELECT keyspace_name, table_name, type FROM system_schema.columns"
result = loader_node.run_cqlsh(cmd=cmd, timeout=request_timeout, verbose=False, target_db_node=db_node,
split=True, connect_timeout=request_timeout)
if not result:
return []
splitter_result = []
for row in result[4:]:
if '|' not in row:
continue
if row.startswith('system'):
continue
splitter_result.append(row.split('|'))
return splitter_result
views_list = set()
if filter_out_mv:
tables = get_tables_columns_list('view')
for table in tables:
views_list.add('.'.join([name.strip() for name in table[:2]]))
views_list = list(views_list)
result = get_tables_columns_list('column')
if not result:
return []
avaialable_ks_cf = defaultdict(list)
for row in result:
ks_cf_name = '.'.join([name.strip() for name in row[:2]])
if filter_out_mv and ks_cf_name in views_list:
continue
column_type = row[2].strip()
avaialable_ks_cf[ks_cf_name].append(column_type)
if filter_out_table_with_counter:
for ks_cf, column_types in avaialable_ks_cf.items():
if 'counter' in column_types:
avaialable_ks_cf.pop(ks_cf)
return avaialable_ks_cf.keys()
def remove_files(path):
LOGGER.debug("Remove path %s", path)
try:
if os.path.isdir(path):
shutil.rmtree(path=path, ignore_errors=True)
if os.path.isfile(path):
os.remove(path)
except Exception as details: # pylint: disable=broad-except
LOGGER.error("Error during remove archived logs %s", details)
def format_timestamp(timestamp):
return datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def makedirs(path):
"""
TODO: when move to python3, this function will be replaced
with os.makedirs function:
os.makedirs(name, mode=0o777, exist_ok=False)
"""
try:
os.makedirs(path)
except OSError:
if os.path.exists(path):
return
raise
def wait_ami_available(client, ami_id):
"""Wait while ami_id become available
Wait while ami_id become available, after
10 minutes return an error
Arguments:
client {boto3.EC2.Client} -- client of EC2 service
ami_id {str} -- ami id to check availability
"""
waiter = client.get_waiter('image_available')
waiter.wait(ImageIds=[ami_id],
WaiterConfig={
'Delay': 30,
'MaxAttempts': 20}
)
def update_certificates():
"""
Update the certificate of server encryption, which might be expired.
"""
try:
from sdcm.remote import LocalCmdRunner
localrunner = LocalCmdRunner()
localrunner.run('openssl x509 -req -in data_dir/ssl_conf/example/db.csr -CA data_dir/ssl_conf/cadb.pem -CAkey data_dir/ssl_conf/example/cadb.key -CAcreateserial -out data_dir/ssl_conf/db.crt -days 365')
localrunner.run('openssl x509 -enddate -noout -in data_dir/ssl_conf/db.crt')
except Exception as ex:
raise Exception('Failed to update certificates by openssl: %s' % ex)
def s3_download_dir(bucket, path, target):
"""
Downloads recursively the given S3 path to the target directory.
:param bucket: the name of the bucket to download from
:param path: The S3 directory to download.
:param target: the local directory to download the files to.
"""
client = boto3.client('s3')
# Handle missing / at end of prefix
if not path.endswith('/'):
path += '/'
if path.startswith('/'):
path = path[1:]
result = client.list_objects_v2(Bucket=bucket, Prefix=path)
# Download each file individually
for key in result['Contents']:
# Calculate relative path
rel_path = key['Key'][len(path):]
# Skip paths ending in /
if not key['Key'].endswith('/'):
local_file_path = os.path.join(target, rel_path)
# Make sure directories exist
local_file_dir = os.path.dirname(local_file_path)
makedirs(local_file_dir)
LOGGER.info("Downloading %s from s3 to %s", key['Key'], local_file_path)
client.download_file(bucket, key['Key'], local_file_path)
def gce_download_dir(bucket, path, target):
"""
Downloads recursively the given google storage path to the target directory.
:param bucket: the name of the bucket to download from
:param path: The google storage directory to download.
:param target: the local directory to download the files to.
"""
from sdcm.keystore import KeyStore
gcp_credentials = KeyStore().get_gcp_credentials()
gce_driver = libcloud.storage.providers.get_driver(libcloud.storage.types.Provider.GOOGLE_STORAGE)
driver = gce_driver(gcp_credentials["project_id"] + "@appspot.gserviceaccount.com",
gcp_credentials["private_key"],
project=gcp_credentials["project_id"])
if not path.endswith('/'):
path += '/'
if path.startswith('/'):
path = path[1:]
container = driver.get_container(container_name=bucket)
dir_listing = driver.list_container_objects(container, ex_prefix=path)
for obj in dir_listing:
rel_path = obj.name[len(path):]
local_file_path = os.path.join(target, rel_path)
local_file_dir = os.path.dirname(local_file_path)
makedirs(local_file_dir)
LOGGER.info("Downloading %s from gcp to %s", obj.name, local_file_path)
obj.download(destination_path=local_file_path, overwrite_existing=True)
def download_dir_from_cloud(url):
"""
download a directory from AWS S3 or from google storage
:param url: a url that starts with `s3://` or `gs://`
:return: the temp directory create with the downloaded content
"""
if url is None:
return url
md5 = hashlib.md5()
md5.update(url)
tmp_dir = os.path.join('/tmp/download_from_cloud', md5.hexdigest())
parsed = urlparse(url)
LOGGER.info("Downloading [%s] to [%s]", url, tmp_dir)
if os.path.isdir(tmp_dir) and os.listdir(tmp_dir):
LOGGER.warning("[{}] already exists, skipping download".format(tmp_dir))
else:
if url.startswith('s3://'):
s3_download_dir(parsed.hostname, parsed.path, tmp_dir)
elif url.startswith('gs://'):
gce_download_dir(parsed.hostname, parsed.path, tmp_dir)
elif os.path.isdir(url):
tmp_dir = url
else:
raise ValueError("Unsupported url schema or non-existing directory [{}]".format(url))
if not tmp_dir.endswith('/'):
tmp_dir += '/'
LOGGER.info("Finished downloading [%s]", url)
return tmp_dir
def filter_aws_instances_by_type(instances):
filtered_instances = {
"db_nodes": [],
"loader_nodes": [],
"monitor_nodes": []
}
for instance in instances:
name = [tag['Value']
for tag in instance['Tags'] if tag['Key'] == 'Name']
if 'db-node' in name[0]:
filtered_instances["db_nodes"].append(instance)
if 'monitor-node' in name[0]:
filtered_instances["monitor_nodes"].append(instance)
if 'loader-node' in name[0]:
filtered_instances["loader_nodes"].append(instance)
return filtered_instances
def filter_gce_instances_by_type(instances):
filtered_instances = {
"db_nodes": [],
"loader_nodes": [],
"monitor_nodes": []
}
for instance in instances:
if 'db-nodes' in instance.name:
filtered_instances["db_nodes"].append(instance)
if 'monitor-node' in instance.name:
filtered_instances["monitor_nodes"].append(instance)
if 'loader-node' in instance.name:
filtered_instances["loader_nodes"].append(instance)
return filtered_instances
BUILDERS = [
{
"name": "aws-scylla-qa-builder3",
"public_ip": "18.235.64.163",
"user": "jenkins",
"key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2")
},
{
"name": "aws-eu-west1-qa-builder1",
"public_ip": "18.203.132.87",
"user": "jenkins",
"key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2")
},
{
"name": "aws-eu-west1-qa-builder2",
"public_ip": "34.244.95.165",
"user": "jenkins",
"key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2")
},
{
"name": "aws-eu-west1-qa-builder4",
"public_ip": "34.253.184.117",
"user": "jenkins",
"key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2")
},
{
"name": "aws-eu-west1-qa-builder4",
"public_ip": "52.211.130.106",
"user": "jenkins",
"key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2")
}
]
def get_builder_by_test_id(test_id):
from sdcm.remote import RemoteCmdRunner
base_path_on_builder = "/home/jenkins/slave/workspace"
found_builders = []
def search_test_id_on_builder(builder):
remoter = RemoteCmdRunner(builder['public_ip'],
user=builder['user'],
key_file=builder['key_file'])
LOGGER.info('Search on %s', builder['name'])
result = remoter.run("find {where} -name test_id | xargs grep -rl {test_id}".format(where=base_path_on_builder,
test_id=test_id),
ignore_status=True, verbose=False)
if not result.exited and not result.stderr:
path = result.stdout.strip()
LOGGER.info("Builder name %s, ip %s, folder %s", builder['name'], builder['public_ip'], path)
return {"builder": builder, "path": os.path.dirname(path)}
else:
LOGGER.info("Nothing found")
return None
search_obj = ParallelObject(BUILDERS, timeout=30, num_workers=len(BUILDERS))
results = search_obj.run(search_test_id_on_builder)
found_builders = [builder for builder in results if builder]
if not found_builders:
LOGGER.info("Nothing found for %s", test_id)
return found_builders
def get_post_behavior_actions(config):
action_per_type = {
"db_nodes": None,
"monitor_nodes": None,
"loader_nodes": None
}
for key in action_per_type:
config_key = 'post_behavior_{}'.format(key)
old_config_key = config.get('failure_post_behavior', 'destroy')
action_per_type[key] = config.get(config_key, old_config_key)
return action_per_type
def clean_aws_instances_according_post_behavior(params, config, logdir): # pylint: disable=invalid-name
status = get_testrun_status(params.get('TestId'), logdir)
def apply_action(instances, action):
if action == 'destroy':
instances_ids = [instance['InstanceId'] for instance in instances]
LOGGER.info('Clean next instances %s', instances_ids)
client.terminate_instances(InstanceIds=instances_ids)
elif action == 'keep-on-failure':
if status:
LOGGER.info('Run failed. Leave instances running')
else:
LOGGER.info('Run was Successful. Killing nodes')
apply_action(instances, action='destroy')
elif action == 'keep':
LOGGER.info('Leave instances running')
else:
LOGGER.warning('Unsupported action %s', action)
aws_instances = list_instances_aws(params, group_as_region=True)
for region, instances in aws_instances.items():
if not instances:
continue
client = boto3.client("ec2", region_name=region)
filtered_instances = filter_aws_instances_by_type(instances)
actions_per_type = get_post_behavior_actions(config)
for instance_set_type, action in actions_per_type.items():
LOGGER.info('Apply action "%s" for %s instances', action, instance_set_type)
apply_action(filtered_instances[instance_set_type], action)
def clean_gce_instances_according_post_behavior(params, config, logdir): # pylint: disable=invalid-name
status = get_testrun_status(params.get('TestId'), logdir)
def apply_action(instances, action):
if action == 'destroy':
for instance in filtered_instances['db_nodes']:
LOGGER.info('Destroying instance: %s', instance.name)
instance.destroy()
LOGGER.info('Destroyed instance: %s', instance.name)
elif action == 'keep-on-failure':
if status:
LOGGER.info('Run failed. Leave instances running')
else:
LOGGER.info('Run wasSuccessful. Killing nodes')
apply_action(instances, action='destroy')
elif action == 'keep':
LOGGER.info('Leave instances runing')
else:
LOGGER.warning('Unsupported action %s', action)
gce_instances = list_instances_gce(params)
filtered_instances = filter_gce_instances_by_type(gce_instances)
actions_per_type = get_post_behavior_actions(config)
for instance_set_type, action in actions_per_type.items():
apply_action(filtered_instances[instance_set_type], action)
def search_test_id_in_latest(logdir):
from sdcm.remote import LocalCmdRunner
test_id = None
result = LocalCmdRunner().run('cat {0}/latest/test_id'.format(logdir), ignore_status=True)
if not result.exited and result.stdout:
test_id = result.stdout.strip()
LOGGER.info("Found latest test_id: {}".format(test_id))
LOGGER.info("Collect logs for test-run with test-id: {}".format(test_id))
else:
LOGGER.error('test_id not found. Exit code: %s; Error details %s', result.exited, result.stderr)
return test_id
def get_testrun_dir(base_dir, test_id=None):
from sdcm.remote import LocalCmdRunner
if not test_id:
test_id = search_test_id_in_latest(base_dir)
LOGGER.info('Search dir with logs locally for test id: %s', test_id)
search_cmd = "find {base_dir} -name test_id | xargs grep -rl {test_id}".format(**locals())
result = LocalCmdRunner().run(cmd=search_cmd, ignore_status=True)
LOGGER.info("Search result %s", result)
if result.exited == 0 and result.stdout:
found_dirs = result.stdout.strip().split('\n')
LOGGER.info(found_dirs)
return os.path.dirname(found_dirs[0])
LOGGER.info("No any dirs found locally for current test id")
return None
def get_testrun_status(test_id=None, logdir=None):
testrun_dir = get_testrun_dir(logdir, test_id)
status = None
if testrun_dir:
with open(os.path.join(testrun_dir, 'events_log/critical.log')) as f: # pylint: disable=invalid-name
status = f.readlines()
return status
def download_encrypt_keys():
"""
Download certificate files of encryption at-rest from S3 KeyStore
"""
from sdcm.keystore import KeyStore
ks = KeyStore()
if not os.path.exists('./data_dir/encrypt_conf/CA.pem'):
ks.download_file('CA.pem', './data_dir/encrypt_conf/CA.pem')
if not os.path.exists('./data_dir/encrypt_conf/SCYLLADB.pem'):
ks.download_file('SCYLLADB.pem', './data_dir/encrypt_conf/SCYLLADB.pem')
|
agpl-3.0
| -6,150,648,137,884,515,000
| 35.250558
| 210
| 0.595996
| false
| 3.759793
| true
| false
| false
|
MendeleievBros/Mendeleiev-Bros
|
mendeleiev_bros/escena_niveles.py
|
1
|
4374
|
# -*- coding: utf-8 -*-
import pilas
archi = open('datos.txt', 'r')
nivel = archi.readline()
pantalla = archi.readline()
idioma = archi.readline()
archi.close()
if idioma == "ES":
from modulos.ES import *
else:
from modulos.EN import *
class Elemento(pilas.actores.Texto):
def __init__(self, texto='', x=0, y=0, nivel=0):
pilas.actores.Texto.__init__(self, texto=texto, x=x, y=y, magnitud=10,
vertical=False, fuente="data/tipo_tabla.ttf", fijo=True, ancho=0)
self.color = pilas.colores.negro
self.nivel = nivel
class EscenaNiveles(pilas.escena.Base):
"Es la escena de presentación donde se elije el nivel."
def __init__(self):
pilas.escena.Base.__init__(self)
def leertxt(self):
archi = open('datos.txt', 'r')
linea = archi.readline()
archi.close()
return linea
def nivel(self, evento):
#Recorro la lista de banderas para ver si le he dado
for elemento in self.elementos:
# Miro si el ratón entra en colisión con el área de la bandera
if elemento.colisiona_con_un_punto(evento.x, evento.y):
if elemento.nivel <= int(self.nivel_guardado):
import escena_juego
pilas.cambiar_escena(escena_juego.Juego(elemento.nivel))
def cuando_vuelves(self):
import escena_menu
pilas.cambiar_escena(escena_menu.EscenaMenu())
def iniciar(self):
pilas.fondos.Fondo("data/guarida.jpg")
pilas.eventos.click_de_mouse.conectar(self.nivel)
self.elementos = []
self.candado = []
self.nivel_guardado = self.leertxt()
self.mostrar_tabla()
self.volver = pilas.actores.Boton(ruta_normal='data/volver.png',
ruta_over='data/volver.png')
self.volver.x = 50
self.volver.y = -140
self.volver.conectar_presionado(self.cuando_vuelves)
def candados(self):
# muestra los candados de los niveles no disponibles
for elemento in self.elementos:
if elemento.nivel > int(self.nivel_guardado):
candado1 = pilas.actores.Actor("data/candado.png")
candado1.x = elemento.x
candado1.y = elemento.y
self.candado.append(candado1)
return True
def mostrar_tabla(self):
self.trans1 = pilas.actores.Actor("data/tabla.png")
self.elementos.append(Elemento(texto="H", x=-230, y=130, nivel=1))
self.elementos.append(Elemento(texto="Li", x=-230, y=90, nivel=3))
self.elementos.append(Elemento(texto="Na", x=-230, y=45, nivel=11))
self.elementos.append(Elemento(texto="K", x=-230, y=0, nivel=19))
self.elementos.append(Elemento(texto="Be", x=-205, y=90, nivel=4))
self.elementos.append(Elemento(texto="Mg", x=-205, y=45, nivel=12))
self.elementos.append(Elemento(texto="Ca", x=-205, y=0, nivel=20))
self.elementos.append(Elemento(texto="B", x=80, y=90, nivel=5))
self.elementos.append(Elemento(texto="Al", x=80, y=45, nivel=13))
self.elementos.append(Elemento(texto="Ge", x=80, y=0, nivel=21))
self.elementos.append(Elemento(texto="C", x=105, y=90, nivel=6))
self.elementos.append(Elemento(texto="Si", x=105, y=45, nivel=14))
self.elementos.append(Elemento(texto="Ga", x=105, y=0, nivel=22))
self.elementos.append(Elemento(texto="N", x=130, y=90, nivel=7))
self.elementos.append(Elemento(texto="P", x=130, y=45, nivel=15))
self.elementos.append(Elemento(texto="As", x=130, y=0, nivel=23))
self.elementos.append(Elemento(texto="O", x=155, y=90, nivel=8))
self.elementos.append(Elemento(texto="S", x=155, y=45, nivel=16))
self.elementos.append(Elemento(texto="Se", x=155, y=0, nivel=24))
self.elementos.append(Elemento(texto="F", x=180, y=90, nivel=9))
self.elementos.append(Elemento(texto="Cl", x=180, y=45, nivel=17))
self.elementos.append(Elemento(texto="Br", x=180, y=0, nivel=25))
self.elementos.append(Elemento(texto="He", x=210, y=130, nivel=2))
self.elementos.append(Elemento(texto="Ne", x=210, y=90, nivel=10))
self.elementos.append(Elemento(texto="Ar", x=210, y=45, nivel=18))
self.elementos.append(Elemento(texto="Kr", x=210, y=0, nivel=26))
self.candados()
|
gpl-3.0
| -6,599,656,830,202,659,000
| 41.427184
| 78
| 0.61762
| false
| 2.66301
| false
| false
| false
|
eranroz/dnase
|
src/data_provider/dataDownloader.py
|
1
|
18314
|
"""'
Script for download and installation of data and required programs
Some functions requires rsync
@see {transformWig} - another script for transformations
''"""
import argparse
import ftplib
from multiprocessing import Pool
import os
import urllib
import time
from config import DATA_DIR, BIN_DIR, OTHER_DATA, SIGNAL_DIR, WIG_TO_BIG_WIG, BIG_WIG_TO_BED_GRAPH, CHROM_SIZES,\
RAW_DATA_DIR
from data_provider import SeqLoader
SMOOTHING = 20
BED_GRAPH_DIR = os.path.join(DATA_DIR, 'bedGraph')
def setup_environment():
"""
Downloads some required programs from UCSC.
"""
tools = ["fetchChromSizes", "wigToBigWig", "bigWigToBedGraph", "bedGraphToBigWig"]
try:
import urllib.request
urlret = urllib.request.urlretrieve
except ImportError:
import urllib.urlretrieve
urlret = urllib.urlretrieve
for tool in tools:
if not os.path.exists(os.path.join(BIN_DIR, tool)):
urlret("http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/%s" % tool,
os.path.join(BIN_DIR, tool))
def download_dnase_human_data(ignore_files=None):
"""
Connects to genboree Epigenome atlas to download specific cell types
chromatin accessibility experiment results
@param ignore_files: files to ignore for example: '01679.DS17212.wig.gz'
"""
global SMOOTHING
if ignore_files is None:
ignore_files = []
epigenome_atlas = ftplib.FTP(host='ftp.genboree.org')
epigenome_atlas.login()
epigenome_atlas.cwd('EpigenomeAtlas/Current-Release/experiment-sample/Chromatin_Accessibility/')
dirs = epigenome_atlas.nlst()
print('Please select cell type:')
print('-1', 'All')
for i, d in enumerate(dirs):
print(i, d)
cell_type = input('Please enter number: ')
pool_process = Pool()
try:
cell_type = int(cell_type)
if cell_type >= len(dirs):
raise ValueError()
else:
if cell_type == -1:
sel_cell_types = list(dirs)
sel_cell_types = sel_cell_types[1:] # skip the meta dir
sel_cell_types = sel_cell_types[26:]
else:
sel_cell_types = [dirs[cell_type]]
sel_dir = ''
try:
for sel_dir in sel_cell_types:
epigenome_atlas.cwd(sel_dir)
print('cd: ', sel_dir)
wig_files = [fl for fl in epigenome_atlas.nlst() if fl[-6:] == 'wig.gz']
if cell_type > 0:
for i, fl in enumerate(wig_files):
print((i, fl))
selected_wig = input("Which file would you like to download? ")
selected_wigs = [wig_files[int(selected_wig)]]
else:
selected_wigs = wig_files
for selected_wig in selected_wigs:
if any(ig in selected_wig for ig in ignore_files):
print('Ignored:', selected_wig)
continue
if not os.path.exists(os.path.join(RAW_DATA_DIR, selected_wig)):
with open(os.path.join(DATA_DIR, selected_wig), 'wb') as dFile:
print(selected_wig)
epigenome_atlas.retrbinary('RETR %s' % selected_wig, dFile.write)
dFile.close()
print("%s download finished!" % selected_wig)
# create pickled small smoothed file
pool_process.apply_async(serialize_wig_file, (selected_wig,))
else:
print('Skipping - file already downloaded')
if sel_dir != dirs[-1]:
epigenome_atlas.cwd('..')
time.sleep(3) # sleep between directories moves
except KeyboardInterrupt:
print("KeyboardInterrupt: stopping downloading new files. Last dir: ", sel_dir)
epigenome_atlas.close()
pool_process.close()
pool_process.join()
except ValueError:
print("The data you enter couldn't be parsed as index")
def download_ncbi_histone(markers_to_download=None, markers_to_ignore=None,
by_experiments_dir='pub/geo/DATA/roadmapepigenomics/by_experiment/'):
"""
Downloads experiments results from NCBI.
@param markers_to_download: specific experiments to be downloaded. Default: histone modifications+mRNA-Seq and RRBS
@param markers_to_ignore: markers to ignore
@param by_experiments_dir: NCBI directory for downloading experiments
"""
if not markers_to_ignore:
markers_to_ignore = ['DNase']
import subprocess
import time
ncbi_ftp = ftplib.FTP(host='ftp.ncbi.nlm.nih.gov')
ncbi_ftp.login()
ncbi_ftp.cwd('/' + by_experiments_dir)
if markers_to_download is None:
experiments = ncbi_ftp.nlst('./')
local_path = os.path.join(OTHER_DATA, "markers")
if not os.path.exists(local_path):
os.mkdir(local_path)
markers_to_download = [ex for ex in experiments if (ex.startswith('H') or ex in ['mRNA-Seq', 'RRBS']) and not (
os.path.exists(local_path + '/' + ex) and len(os.listdir(local_path + '/' + ex)) > 2)]
enough_data = (ex for ex in markers_to_download if len(list(ncbi_ftp.nlst('./%s' % ex))) > 5)
for ex in enough_data:
print('Synchronizing %s' % ex)
if any(ignore in ex for ignore in markers_to_ignore):
continue
ex_dir = by_experiments_dir + ex
if os.path.exists(local_path + '/' + ex) and len(os.listdir(local_path + '/' + ex)) > 2:
print('Skipping ex')
continue
subprocess.call(
["rsync", "-azuP", "--exclude=*.bed.gz", "--include=*.wig.gz", "ftp.ncbi.nlm.nih.gov::%s" % ex_dir,
local_path])
time.sleep(5)
def download_from_source(source_path, file_format="bigWig"):
"""
Downloads based on a SOURCE file:
* each line in source contains a rsync directory
* It looks for files.txt (if exist) to get metadata on the downloaded files
@param file_format: file format to download
@param source_path: a path to a SOURCE file to which data will be downloaded
@return:
"""
import subprocess
import numpy as np
import re
with open(source_path, 'r') as source_file:
sources = list(source_file.readlines())
local_dir = os.path.dirname(source_path)
meta_data_keys = ['file']
meta_data = np.zeros((0, 1), dtype='S100')
meta_file_path = os.path.join(local_dir, 'files.txt')
for source in sources:
source = source.strip()
print('Download {} => {}'.format(source, local_dir))
subprocess.call(
["rsync", "-azuP", "--include=*.{}".format(file_format), "--include=files.txt", "--exclude=*", source,
local_dir])
if not os.path.exists(meta_file_path):
continue
with open(meta_file_path, 'r') as meta_file:
for track in meta_file.readlines():
# skip non relevant files
file_name, file_data = track.split('\t', 1)
if not file_name.endswith('.' + file_format):
continue
file_keys, file_values = zip(*re.findall('(.+?)=(.+?)[;\n$]', file_data))
file_keys = [key.strip() for key in file_keys]
new_meta_keys = [key for key in file_keys if key not in meta_data_keys]
if any(new_meta_keys):
meta_data_tmp = meta_data
meta_data = np.zeros((meta_data.shape[0], meta_data.shape[1] + len(new_meta_keys)), dtype='S100')
meta_data[:, 0: meta_data_tmp.shape[1]] = meta_data_tmp
meta_data_keys += new_meta_keys
file_keys = map(lambda k: meta_data_keys.index(k), file_keys)
new_row = np.zeros(meta_data.shape[1], dtype='S100')
new_row[0] = file_name
for meta_key, meta_value in zip(file_keys, file_values):
new_row[meta_key] = meta_value
meta_data = np.vstack((meta_data, new_row))
os.remove(meta_file_path) # delete the meta file (avoid conflict with other sources)
meta_data = np.vstack((meta_data_keys, meta_data))
np.savetxt(os.path.join(local_dir, 'metadata.csv'), meta_data, delimiter='\t', fmt="%s")
print('Consider to remove incorrect data! use the metadata.csv to find such data...')
def transform_ncbi(wig_directory=SIGNAL_DIR):
"""
Transforms .wig.gz files in wig_directory to pkl files
@param wig_directory: directory with cell types subdirectories, with wig files
"""
pool_process = Pool()
for cell in os.listdir(wig_directory):
cell_path = os.path.join(wig_directory, cell)
cell_files = os.listdir(cell_path)
for f in cell_files:
if not f.endswith('.wig.gz') or 'filtered-density' in f:
continue
output_file = f.replace('.gz', '').replace('.wig', '.%i.npz' % SMOOTHING)
if output_file in cell_files:
continue
pool_process.apply_async(process_ncbi_file, (os.path.join(cell_path, f),))
pool_process.close()
pool_process.join()
print('Finished transforming all files!')
def process_ncbi_file(wig_file):
"""
pickle it
@param wig_file: wiggle files to transform
"""
print('Processing %s' % wig_file)
SeqLoader.wig_transform(wig_file, SMOOTHING)
print('end processing %s' % wig_file)
def transform_wig_files(directory=DATA_DIR):
"""
Transforms wig.gz files to npz files and archives to RAW_DATA_DIR
@param directory: directory with wig.gz files to transform
"""
pool_process = Pool()
for f in [f for f in os.listdir(directory) if f.endswith('.wig.gz')]:
pool_process.apply_async(serialize_wig_file, (f, directory))
pool_process.close()
pool_process.join()
def serialize_wig_file(wig_file, directory=DATA_DIR):
"""
serialize wig file to npz file
@param directory: directory in which the wig file placed
@param wig_file: wig file to npz/pickle
"""
SeqLoader.wig_transform(os.path.join(directory, wig_file), SMOOTHING)
print(os.path.join(directory, wig_file), '-->', os.path.join(RAW_DATA_DIR, wig_file))
os.rename(os.path.join(directory, wig_file), os.path.join(RAW_DATA_DIR, wig_file))
def serialize_dir(in_directory=RAW_DATA_DIR, out_directory=SIGNAL_DIR, file_type='bigWig'):
"""
Serialize bigwig file to npz file
@param file_type: file types to serialize
@param out_directory: output directory
@param in_directory: input directory
"""
import tempfile
import subprocess
if file_type == 'wig':
return transform_wig_files()
if file_type != 'bigWig':
raise NotImplementedError
for filename in os.listdir(in_directory):
if not filename.endswith(file_type):
continue
src_file = os.path.join(in_directory, filename)
dest_file = os.path.join(out_directory, filename.replace('.' + file_type, ''))
if os.path.exists(dest_file+'.npz'):
continue
with tempfile.NamedTemporaryFile('w+', encoding='ascii') as tmp_file:
subprocess.call([BIG_WIG_TO_BED_GRAPH, src_file, tmp_file.name])
seq = SeqLoader.load_bg(tmp_file.name)
SeqLoader.save_result_dict(dest_file, seq)
print('Finish')
def wig_to_bed_graph(cur_trans):
"""
Transforms wig file to bed graph file
@param cur_trans: file to transform as 3-tuple (original.wig, temp.bw, result.bg))
"""
import subprocess
print('Transforming')
print('->'.join(cur_trans))
subprocess.call([WIG_TO_BIG_WIG, cur_trans[0], CHROM_SIZES, cur_trans[1]])
subprocess.call([BIG_WIG_TO_BED_GRAPH, cur_trans[1], cur_trans[2]])
os.remove(cur_trans[1])
print('Completed')
def raw_data_to_bed_graph(wig_directory=RAW_DATA_DIR, bg_directory=BED_GRAPH_DIR):
"""
Transforms raw data wig files to bed graph files
@param wig_directory: directory with wig files
@param bg_directory: directory with bed graph data
"""
pool_process = Pool()
bed_graphs = [f[:-3] for f in os.listdir(bg_directory)]
need_transform = [(os.path.join(wig_directory, f), os.path.join(bg_directory, f[:-7] + '.bw'),
os.path.join(bg_directory, f[:-7] + '.bg')) for f in os.listdir(wig_directory) if
f[:-7] not in bed_graphs]
for trans in need_transform:
pool_process.apply_async(wig_to_bed_graph, (trans,))
pool_process.close()
pool_process.join()
def ucsc_download(src_path, target_path=None, email=None):
"""
Downloads data from UCSC using FTP
@param src_path: path to download to (local)
@param target_path: path to download from (remote)
@param email: email for authentication
"""
if target_path is None:
target_path = input("In which directory would you like to store the genome?")
if email is None:
email = input("Please enter your mail (will be used to enter to hgdownload ftp")
with ftplib.FTP(host='hgdownload.cse.ucsc.edu') as ucsc_ftp:
ucsc_ftp.login(user="anonymous", passwd=email)
ucsc_ftp.cwd(os.path.dirname(target_path))
if not os.path.exists(src_path):
os.makedirs(src_path)
with open(os.path.join(src_path, os.path.basename(target_path)), 'wb') as dFile:
ucsc_ftp.retrbinary('RETR %s' % os.path.basename(target_path), dFile.write)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help="")
# downloads genome sequence
parser_download_genome = subparsers.add_parser('download_genome',
help='Downloads genome sequence from hgdownload.cse.ucsc.edu')
parser_download_genome.add_argument('directory', help="Directory to store retrived file")
parser_download_genome.add_argument('--genome', help="Genome to download", default='hg19')
parser_download_genome.add_argument('--email', help="Email for authentication to UCSC", default='')
parser_download_genome.set_defaults(
func=lambda args: ucsc_download(args.directory, "goldenPath/%s/bigZips/%s.2bit" % (args.genome, args.genome),
args.email))
# utility function for downloading from multiple FTPs
parser_download_source = subparsers.add_parser('download_sources',
help='Downloads genome sequence from hgdownload.cse.ucsc.edu')
parser_download_source.add_argument('source',
help="A file with each line containing FTP source to download data from")
parser_download_source.set_defaults(func=lambda args: download_from_source(args.source))
parser_transform_ncbi = subparsers.add_parser('transform_ncbi',
help='Transforms .wig.gz files in SIGNAL_DIR to pkl files')
parser_transform_ncbi.add_argument('--directory', help="directory with cell types subdirectories, with wig files",
default=SIGNAL_DIR)
parser_transform_ncbi.set_defaults(func=lambda args: transform_ncbi(args.directory))
parser_download_ncbi_markers = subparsers.add_parser('ncbiMarkers',
help='ownloads ncbi markers to OTHER_DATA/markers')
parser_download_ncbi_markers.add_argument('--markers_to_download',
help="specific experiments to be downloaded. " +
"Default: histone modifications+mRNA-Seq and RRBS",
default=None)
parser_download_ncbi_markers.add_argument('--markers_to_ignore', help="markers to ignore",
default=None)
parser_download_ncbi_markers.add_argument('--by_experiments_dir', help="NCBI directory for downloading experiments",
default="pub/geo/DATA/roadmapepigenomics/by_experiment/")
parser_download_ncbi_markers.set_defaults(
func=lambda args: download_ncbi_histone(args.markers_to_download, args.markers_to_ignore,
args.by_experiments_dir))
raw_data_to_bed_graph_parser = subparsers.add_parser('raw_to_bed',
help='Transforms .wig.gz files in NCBI_DIR to pkl files')
raw_data_to_bed_graph_parser.add_argument('--wig_directory', help="directory with wig files",
default=RAW_DATA_DIR)
raw_data_to_bed_graph_parser.add_argument('--bg_directory', help="directory with bed graph data",
default=BED_GRAPH_DIR)
raw_data_to_bed_graph_parser.set_defaults(func=lambda args: raw_data_to_bed_graph(args.wig_directory,
args.bg_directory))
wig_to_npz_transform = subparsers.add_parser('wig_to_npz',
help='Transforms .wig.gz files in directory to npz files')
wig_to_npz_transform.add_argument('--directory', help="directory with wig.gz files to transform",
default=DATA_DIR)
wig_to_npz_transform.set_defaults(func=lambda args: transform_wig_files(args.directory))
serialize_dir_transform = subparsers.add_parser('serialize_dir',
help='Serializes wig.gz/bigWig files to npz')
serialize_dir_transform.add_argument('--in_directory', help="Input directory", default=RAW_DATA_DIR)
serialize_dir_transform.add_argument('--out_directory', help="Output directory directory", default=SIGNAL_DIR)
serialize_dir_transform.set_defaults(func=lambda args: serialize_dir(args.in_directory, args.out_directory))
command_args = parser.parse_args()
command_args.func(command_args)
|
mit
| -2,990,876,278,321,959,400
| 43.024038
| 120
| 0.598395
| false
| 3.711796
| false
| false
| false
|
linkslice/graphite-tools
|
codahale_metrics.py
|
1
|
7854
|
#!/usr/bin/env python
#####################################################
## Parse codahale/yammer/dropwizard JSON metrics ##
## put the tuples into a list, ##
## pickle the list and dump it into the graphite ##
## pickle port ##
#####################################################
import pickle
import socket
import struct
import time
import re
import sys
from base64 import b64encode
from optparse import OptionParser
import urllib2, httplib
import json
socket.setdefaulttimeout(30.0)
def processResponse(data,graphiteRoot,pickleport):
timestamp = time.time()
output = ([])
if options.verbose: print >> sys.stderr, data
d = json.loads(data)
try:
# Step through JSON objects and sub objects and sub objects.
for everyone, two in d.iteritems():
if type(two).__name__=='dict':
for attr, value in two.items():
if type(value).__name__=='dict':
try:
for left, right in value.items():
if not ((type(right).__name__ == "float") or (type(right).__name__ == "int")): continue
# strip unicode stuff
if '.' in everyone:
blah = str("%s.%s_%s_%s" % ( graphiteRoot, everyone, attr.replace(' ','_'), left.replace(' ','_')))
output.append((blah, (timestamp,right)))
else:
blah = str("%s.%s.%s_%s" % ( graphiteRoot, everyone, attr.replace(' ','_'), left.replace(' ','_')))
output.append((blah, (timestamp,right)))
# Some 'left' objects at this level are of type unicode.
# So, obviously attempting to walk them like they were a dict type
# is going to generate some exceptions.
# Ignore them and move to the next one.
except AttributeError as uh:
continue
else:
#if type(value).__name__=="dict": continue
# strip unicode stuff
blah = str("%s.%s.%s" % ( graphiteRoot, everyone, attr.replace(' ','_')))
output.append((blah,(timestamp,value)))
else:
# strip unicode stuff
blah = str("%s.%s" % ( graphiteRoot, everyone.replace(' ','_')))
output.append((blah, (timestamp,two)))
# probably not needed any longer
except KeyError:
print >> sys.stderr, "Critical: Key not found: %s" % resource
sys.exit(1)
finally:
#prepare the package for delivery!!!
package = pickle.dumps(output, 1)
size = struct.pack('!L', len(package))
# if verbose is set write the pickle to a file for
# further testing
if options.verbose:
fh = open('data.p', 'wb')
pickle.dump(output, fh)
fh.close()
s = socket.socket()
s.connect(('localhost', pickleport))
s.sendall(size)
s.sendall(package)
sys.exit(0)
class HTTPSClientAuthHandler(urllib2.HTTPSHandler):
def __init__(self, key, cert):
urllib2.HTTPSHandler.__init__(self)
self.key = key
self.cert = cert
def https_open(self, req):
return self.do_open(self.getConnection, req)
def getConnection(self, host, timeout=300):
return httplib.HTTPSConnection(host, key_file=self.key, cert_file=self.cert)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-H', '--host', dest='host',
help='Hostname/IP of the web server')
parser.add_option('-p', '--port', dest='port',
type='int', default=80,
help='Port to connect to on the web server')
parser.add_option('-u', '--url', dest='url',
help='URL to retrieve data from')
parser.add_option('-n', '--username', dest='username',
help='Username for accessing the page')
parser.add_option('-w', '--password', dest='password',
help='Password for accessing the page')
parser.add_option('-s', '--service', dest='service',
help='Service you want to query')
parser.add_option('-r', '--resource', dest='resource',
help='Resource you want to query')
parser.add_option('-q', '--query', dest='query',
help='Object to query')
parser.add_option('-S', '--ssl', dest='usingssl',
action="store_true",
help='Enable SSL for HTTP connection')
parser.add_option('-C', '--client', dest='client',
help='Client cert to use')
parser.add_option('-K', '--key', dest='key',
help='Client key to use')
parser.add_option('-R', '--graphite-root', dest='graphiteRoot',
help='Graphite root to store data in')
parser.add_option('-P', '--pickle-port', dest='pickleport',
type='int', default=2004,
help='Pickle port to submit data to')
parser.add_option('-v', '--verbose', dest='verbose',
action="store_true",
help='enable verbose output')
options, args = parser.parse_args()
if not options.host:
print >> sys.stderr, "Critical: You must specify the host."
sys.exit(1)
if not options.url:
print >> sys.stderr, "You must specify a URL."
sys.exit(1)
else:
url = options.url
headers = {}
if options.username and options.password:
authstring = ':'.join((
options.username, options.password)).encode('base64')
headers = {
"Authorization": "Basic " + authstring.rstrip(),
}
# default to use SSL if the port is 443
if options.usingssl or options.port == '443':
if not options.key:
from httplib import HTTPSConnection
try:
connection = HTTPSConnection(options.host, options.port)
connection.request("GET", url, None, headers)
except:
print >> sys.stderr, "Unable to make HTTPS connection to https://%s:%s%s" % ( options.host, options.port, url )
sys.exit(1)
else:
import urllib2
from httplib import HTTPSConnection
opener = urllib2.build_opener(HTTPSClientAuthHandler(options.key, options.client))
connectString = "https://%s:%s%s" % (options.host, options.port, options.url)
try:
response = opener.open(connectString)
except:
print >> sys.stderr, "Could not connect to %s" % connectString
sys.exit(2)
else:
from httplib import HTTPConnection
try:
connection = HTTPConnection(options.host, options.port)
connection.request("GET", url, None, headers)
except Exception as e:
print >> sys.stderr, "Unable to make HTTP connection to http://%s:%s%s because: %s" % ( options.host, options.port, url, e )
sys.exit(1)
graphiteRoot = "%s.%s" % ( options.graphiteRoot, options.host )
if options.key:
returnCode = response.getcode()
else:
response = connection.getresponse()
returnCode = response.status
if returnCode == 200:
processResponse(response.read(),graphiteRoot,options.pickleport)
elif returnCode == 401:
print "Invalid username or password."
sys.exit(1)
elif returnCode == 404:
print "404 not found."
sys.exit(1)
else:
print "Web service error %: " % returnCode #, (None if not response.reason else response.reason) )
sys.exit(1)
|
mit
| 7,965,960,671,044,199,000
| 38.467337
| 136
| 0.536542
| false
| 4.32251
| false
| false
| false
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/mobile/channels.py
|
1
|
7457
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.38012
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/mobile/channels.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class channels(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(channels, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<html>\r
<head>\r
\t<title>OpenWebif</title>\r
\t<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
\t<meta name="viewport" content="user-scalable=no, width=device-width"/>\r
\t<meta name="apple-mobile-web-app-capable" content="yes" />\r
\t<link rel="stylesheet" type="text/css" href="/css/jquery.mobile-1.0.min.css" media="screen"/>\r
\t<link rel="stylesheet" type="text/css" href="/css/iphone.css" media="screen"/>\r
\t<script src="/js/jquery-1.6.2.min.js"></script>\r
\t<script src="/js/jquery.mobile-1.0.min.js"></script>\r
</head>\r
<body> \r
\t<div data-role="page">\r
\r
\t\t<div id="header">\r
\t\t\t<div class="button" onClick="history.back()">''')
_v = VFFSL(SL,"tstrings",True)['back'] # u"$tstrings['back']" on line 17, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['back']")) # from line 17, col 49.
write(u'''</div>\r
\t\t\t<h1><a style="color:#FFF;text-decoration:none;" href=\'/mobile\'>OpenWebif</a></h1>
\t\t</div>\r
\r
\t\t<div id="contentContainer">\r
\t\t\t<ul data-role="listview" data-inset="true" data-theme="d">\r
\t\t\t\t<li data-role="list-divider" role="heading" data-theme="b">''')
_v = VFFSL(SL,"tstrings",True)['channels'] # u"$tstrings['channels']" on line 23, col 64
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['channels']")) # from line 23, col 64.
write(u'''</li>\r
''')
for channel in VFFSL(SL,"channels",True): # generated from line 24, col 5
write(u'''\t\t\t\t<li>\r
\t\t\t\t<a href="/mobile/channelinfo?sref=''')
_v = VFFSL(SL,"channel.ref",True) # u'$channel.ref' on line 26, col 39
if _v is not None: write(_filter(_v, rawExpr=u'$channel.ref')) # from line 26, col 39.
write(u'''" style="padding: 3px;">\r
\t\t\t\t<span class="ui-li-heading" style="margin-top: 0px; margin-bottom: 3px;">''')
_v = VFFSL(SL,"channel.name",True) # u'$channel.name' on line 27, col 78
if _v is not None: write(_filter(_v, rawExpr=u'$channel.name')) # from line 27, col 78.
write(u'''</span>\r
''')
if VFN(VFFSL(SL,"channel",True),"has_key",False)('now_title'): # generated from line 28, col 5
write(u'''\t\t\t\t<span class="ui-li-desc" style="margin-bottom: 0px;">''')
_v = VFFSL(SL,"channel.now_title",True) # u'$channel.now_title' on line 29, col 58
if _v is not None: write(_filter(_v, rawExpr=u'$channel.now_title')) # from line 29, col 58.
write(u'''</span>\r
''')
write(u'''\t\t\t\t</a>\r
\t\t\t\t</li>\r
''')
write(u'''\t\t\t</ul>\r
\t\t</div>\r
\r
\t\t<div id="footer">\r
\t\t\t<p>OpenWebif Mobile</p>\r
\t\t\t<a onclick="document.location.href=\'/index?mode=fullpage\';return false;" href="#">''')
_v = VFFSL(SL,"tstrings",True)['show_full_openwebif'] # u"$tstrings['show_full_openwebif']" on line 39, col 86
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['show_full_openwebif']")) # from line 39, col 86.
write(u'''</a>\r
\t\t</div>\r
\t\t\r
\t</div>\r
</body>\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_channels= 'respond'
## END CLASS DEFINITION
if not hasattr(channels, '_initCheetahAttributes'):
templateAPIClass = getattr(channels, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(channels)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=channels()).run()
|
gpl-2.0
| 5,791,372,461,891,725,000
| 37.637306
| 247
| 0.617541
| false
| 3.184031
| false
| false
| false
|
commonslabgr/donation-box-pi
|
donation-box/DonationServer.py
|
1
|
16989
|
#!/usr/bin/python
####################################################
# Name: Donation Box WebSockets deamon
#
# Description:
# Provides the WebSockets Server which polls data from the DB, notifies any connected clients (browsers)
# and accepts messages (donations) from clients that then writes to the DB
#
# Author: Dimitris Koukoulakis
#
# License: GNU GPL v3.0
####################################################
from __future__ import print_function
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import serial
import MySQLdb
import time
import threading
import datetime
import decimal
import json
import urllib2
import logging
import subprocess
import os
import random
from time import gmtime, strftime
import sys
sys.path.insert(0,'/home/commonslab/donation-box/resources')
from Adafruit_Thermal import *
import coopboxqr
#Init
json_data=open('/home/commonslab/donation-box/config.json')
config = json.load(json_data)
json_data.close()
logging.basicConfig(filename='DonationServer.log', level=logging.DEBUG, format='%(levelname)s %(asctime)s: %(message)s')
logging.debug('Donation Box Server started')
coin = 0
#See the config.json file for the configuration
curr = config["General"]["currency"]
init_wait_time = config["General"]["Init Wait time (sec)"]
clients = []
dbserver = config["Database"]["server"]
dbuser = config["Database"]["username"]
dbpass = config["Database"]["password"]
dbname = config["Database"]["name"]
#PRINTER
pr_enabled = config["Printer"]["enabled"]
pr_dev = config["Printer"]["dev"]
pr_baudrate = config["Printer"]["baudrate"]
pr_timeout = config["Printer"]["timeout"]
pr_feedlines = config["Printer"]["feedlines"]
pr_heattime = config["Printer"]["heattime"]
#GAME
game_enabled = config["Game"]["enabled"]
game_run = config["Game"]["run"]
#UI
ui_sendsum = config["UI"]["SendSumDonations"]
#NETWORK
net_enabled = config["Network"]["enabled"]
net_url = config["Network"]["URL"]
net_send = config["Network"]["insert"]
net_get = config["Network"]["get"]
net_getparam = config["Network"]["get_param"]
net_boxid = config["Network"]["boxID"]
#wait at start up for mySQL to load
time.sleep(init_wait_time)
#For normal Screen (No Touch) make donations automatic
#ONLY for single project!
auto_donation = False
if pr_enabled:
printer = Adafruit_Thermal(pr_dev, pr_baudrate, timeout=pr_timeout)
def PrintMSFReceipt():
printer.begin(pr_heattime)
printer.setTimes(0,0) #print as fast as possible
printer.feed(1)
printer.printBitmap(msflogo.width, msflogo.height, msflogo.data)
printer.feed(2)
printer.printBitmap(msfty.width, msfty.height, msfty.data)
printer.feed(1)
printer.printBitmap(msfqr.width, msfqr.height, msfqr.data)
printer.feed(1)
printer.doubleHeightOn()
printer.println(' msf.gr')
printer.println(' +30 210 5200500')
printer.feed(pr_feedlines)
def PrintCoopBoxReceipt(amount,uid):
printer.begin(pr_heattime)
printer.setTimes(0,0) #print as fast as possible
printer.doubleHeightOn()
printer.println(' CODE: {0}'.format(uid))
printer.doubleHeightOff()
printer.feed(1)
printer.println('scan the QR code or go to ')
printer.println('http://thecoopbox.commonslab.gr')
printer.println('and register for your perk')
printer.feed(1)
printer.doubleHeightOn()
printer.println(' {0} EUR'.format(amount))
printer.doubleHeightOff()
printer.feed(1)
#if (amount == '0.50'):
# printer.printBitmap(halfeuro.width, halfeuro.height, halfeuro.data)
#elif (amount == '1.00'):
# printer.printBitmap(oneeuro.width, oneeuro.height, oneeuro.data)
#elif (amount == '2.00'):
# printer.printBitmap(twoeuro.width, twoeuro.height, twoeuro.data)
printer.feed(1)
printer.printBitmap(coopboxqr.width, coopboxqr.height, coopboxqr.data)
printer.feed(pr_feedlines)
def Th_print(currency,value,name,email,prname,prid,donationid,uid):
if not pr_enabled:
logging.debug('Thermal printer is disabled')
return
PrintCoopBoxReceipt(value,uid)
#os.system('/home/commonslab/donation-box/PrintCoopReceipt.py -a {0} -i {1}'.format(value,uid))
#THREAD:Start printing receipt
#p = threading.Thread(target=PrintCoopBoxReceipt(value,uid))
#p.daemon = True
#p.start()
#Generate Unique Donation ID for registering it to the DB and print it for user
def GenerateUID(amount):
#Generate random 5 digit number
r = random.randint(10000,99999)
#Get a list of the digits
l = list(str(r))
#Get the sum of those digits
c = int(l[0])+int(l[1])+int(l[2])+int(l[3])+int(l[4])
#Get the modulus of that sum
c = c%10;
a = str(amount)[0]
'''
if (amount == 1):
a = random.randint(0,2)
elif (amount == 2):
a = random.randint(3,5)
elif (amount == 0.5):
a = random.randint(6,9)
'''
uid = str('{0}{1}{2}').format(a,r,c)
return uid
#Retrieve Donations from server
def RetrieveDonations(pid):
url = net_url+"/"+net_get+"?"+net_getparam+"="+pid
#url = "http://thecoopbox.commonslab.gr/network_output.php?idproject={0}".format(pid)
response = urllib2.urlopen(url)
data = json.loads(response.read())
new_amount = data[0]['amount']
logging.debug(json.dumps(data))
return new_amount
#Submit Donation data to server
def SendDonationToServer(prid,value,uid):
from time import gmtime, strftime
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#uid = GenerateUID(value)
#data = {"ProjectID":1,"BoxID":1,"DateTime":time.strftime('%Y-%m-%d %H:%M:%S'),"Amount":2}
data = {}
data['ProjectID'] = prid
data['BoxID'] = net_boxid
data['Amount'] = value
data['DonationTime'] = timestamp
data['UID'] = uid
logging.debug(json.dumps(data))
req = urllib2.Request(net_url+'/'+net_send)
req.add_header('Content-Type', 'application/json')
#print "Sending:"
#print json.dumps(data)
logging.debug('Sending: {0}'.format(data))
response = urllib2.urlopen(req, json.dumps(data))
logging.debug('Response from {0}/{1}: {2}'.format(net_url,net_send,response.read()))
if ("successfully" in response.read()):
return True
else:
return False
#Check for inserted coins and send any to the websocket clients
def UpdateCoins():
#THREAD: This function runs inside the thread
LastTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
while True:
#Connect to database
dbConn = MySQLdb.connect(dbserver,dbuser,dbpass,dbname) or die ("could not connect to database")
cursor = dbConn.cursor()
try:
cursor.execute('SELECT timeinserted, value, currency FROM coinacceptor WHERE timeinserted > "{0}" ORDER BY timeinserted ASC'.format(LastTime))
#print('SELECT timeinserted, value, currency FROM coinacceptor WHERE timeinserted > "{0}" ORDER BY timeinserted ASC'.format(LastTime))
for (timeinserted, value, currency) in cursor:
LastTime = timeinserted
global coin
coin = value
global curr
curr = currency
#logging.debug('{0}|{1}'.format(coin,curr))
#print('{0}|{1}'.format(coin,curr))
if coin != 0:
#Send value to web socket clients
SendCoins('{0}|{1}'.format(coin,curr))
if auto_donation:
ProcessDonation('PLAY|||0|COOP|1|{0}EUR'.format(value))
cursor.close(); #close the cursor
except MySQLdb.IntegrityError:
logging.error('failed to fetch data')
finally:
cursor.close(); #close the cursor
#Sleep for a while to allow other processes
time.sleep(0.5);
#Check for money that have not been donated yet
def GetCoins():
global dbserver
global dbname
global dbuser
global dbpass
global coin
global curr
#Connect to Database
dbConn = MySQLdb.connect(dbserver,dbuser,dbpass,dbname) or die ("could not connect to database")
cursor = dbConn.cursor()
try:
#See if there are coins inserted that have not been donated
cursor.execute('SELECT currency,value,donationid FROM coinacceptor WHERE donationid < 0')
# Get returned values
for (currency,value,donationid) in cursor:
#TODO: What should happen if one coin is of differenct currency?
curr = currency
coin += value
logging.debug('DonationID: '+repr(donationid)+' Currency: '+repr(curr)+' Value: '+repr(coin))
if coin != 0:
return str('{0}|{1}'.format(coin,curr))
else:
return 0
cursor.close(); #close the cursor
except MySQLdb.IntegrityError:
logging.error('failed to fetch data')
finally:
cursor.close() #close just incase it failed
def InsertRegistration(name,email):
from time import gmtime, strftime
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
dbConn = MySQLdb.connect(dbserver,dbuser,dbpass,dbname) or die ("could not connect to database")
logging.debug('Insert registration to DB')
dbConn.set_character_set('utf8')
cursor = dbConn.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
logging.debug('Name:'+name+' Email:'+email)
try:
#Insert registration
cursor.execute('INSERT INTO newsletter (email,name,timestamp) VALUES ("{0}","{1}","{2}")'.format(email,name,timestamp))
dbConn.commit()
cursor.close()
except MySQLdb.IntegrityError:
logging.error('failed to fetch data')
for client in clients:
client.write_message("ERROR")
finally:
cursor.close() #close just incase it failed
def InsertDonation(currency,value,name,email,public, prname, prid, uid):
from time import gmtime, strftime
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
dbConn = MySQLdb.connect(dbserver,dbuser,dbpass,dbname) or die ("could not connect to database")
logging.debug('Insert donation to DB')
dbConn.set_character_set('utf8')
cursor = dbConn.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
logging.debug('Name:'+name+' Email:'+email+' public:'+public+' Project Name:'+prname+' ProjectID:'+prid+' Currency:'+currency+' Value:'+value)
if (public == 'false'):
public = 0
else:
public = 1
try:
#Insert donation
logging.debug('INSERT INTO donations (currency,ammount,projectname,email,name,public,projectid,timestamp, uid) VALUES ("{0}",{1},"{2}","{3}","{4}",{5},{6},"{7}","{8}")'.format(currency,value,prname,email,name,public,prid,timestamp,uid))
cursor.execute('INSERT INTO donations (currency,ammount,projectname,email,name,public,projectid,timestamp, uid) VALUES ("{0}",{1},"{2}","{3}","{4}",{5},{6},"{7}","{8}")'.format(currency,value,prname,email,name,public,prid,timestamp,uid))
dbConn.commit()
#Get donation ID
donationid = cursor.lastrowid
#Update coins inserted with donation ID
cursor.execute('UPDATE coinacceptor SET donationid={0} WHERE donationid=-1'.format(donationid))
dbConn.commit()
cursor.close()
except MySQLdb.IntegrityError:
logging.error('failed to fetch data')
for client in clients:
client.write_message("ERROR")
finally:
cursor.close() #close just incase it failed
for client in clients:
client.write_message("SUCCESS")
#SendCoins('{0}|{1}'.format(value,currency))
logging.info('Data written successfuly')
return donationid;
def GetSumDonations():
return GetDonations(-99)
#Get amount of donations for a project
def GetDonations(pid):
#Connect to Database
dbConn = MySQLdb.connect(dbserver,dbuser,dbpass,dbname) or die ("could not connect to database")
cursor = dbConn.cursor()
value = 0
try:
if (pid == -99):
cursor.execute('SELECT SUM(Ammount) FROM donations')
else:
cursor.execute('SELECT SUM(Ammount) FROM donations WHERE ProjectID = {0}'.format(pid))
data = cursor.fetchone()
for row in cursor:
if row[0] is not None:
value = float(row[0])
cursor.close(); #close the cursor
logging.debug('Get project total amount donated: %s', value)
return value
except MySQLdb.IntegrityError:
logging.error('failed to fetch data')
finally:
cursor.close()
#Send coins that have not been donated to clients
def SendCoins(msg):
logging.debug('COINS|{0}'.format(msg))
for client in clients:
client.write_message('COINS|{0}'.format(msg))
#Reset global vars
global coin
global curr
coin = 0
curr = "EUR"
def SendSumDonations(msg):
logging.debug('PID|-99|TOTAL|{0}'.format(msg))
for client in clients:
client.write_message('PID|-99|TOTAL|{0}'.format(msg))
#Send donations for a specified project ID to clients
def SendDonations(pid, msg):
if (net_enabled):
msg = RetrieveDonations(pid)
logging.debug('PID|{0}|TOTAL|{1}'.format(pid,msg))
for client in clients:
client.write_message('PID|{0}|TOTAL|{1}'.format(pid,msg))
#Process Registration
def ProcessRegistration(msg):
logging.debug('Process registration: %s', msg)
values = msg.split('|')
name = values[1]
email = values[2]
#Insert Newsletter registration to database
InsertRegistration(name,email)
#Flag UIDStored
def UIDStored(uid, value):
dbConn = MySQLdb.connect(dbserver,dbuser,dbpass,dbname) or die ("could not connect to database")
cursor = dbConn.cursor()
try:
#See if there are coins inserted that have not been donated
logging.debug('UPDATE donations SET uidStored={0} WHERE uid="{1}"'.format(value,uid))
cursor.execute('UPDATE donations SET uidStored={0} WHERE uid="{1}"'.format(value,uid))
dbConn.commit()
cursor.close() #close the cursor
except MySQLdb.IntegrityError:
logging.error('UIDStored: failed to fetch data')
finally:
cursor.close() #close just incase it failed
#Process Donation
def ProcessDonation(msg):
logging.debug('Process donation: %s', msg)
values = msg.split('|')
name = values[1]
email = values[2]
public = values[3]
prname = values[4]
#This depends on the Language settings
#projectdetails = values[4].split('?') #contains language info (e.g. 81?lang=el)
#prid = projectdetails[0]
prid = values[5]
#lang = projectdetails[1] #lang support for printer limited to ASCII
dondata = values[6]
l = len(dondata)
donvalue = dondata[0:l-3]
doncurr = dondata[l-3:]
#Switch to Game
if (values[0] == 'PLAY'):
SwitchToGame();
if net_enabled:
uid = GenerateUID(donvalue)
#Insert Donation to Database
donationid = InsertDonation(doncurr,donvalue,name,email,public,prname,prid,uid)
if (SendDonationToServer(prid,donvalue,uid)):
UIDStored(uid, True)
else:
UIDStored(uid, False)
#Print receipt
Th_print(doncurr,donvalue,name,email,prname,prid,donationid,uid)
else:
#Insert Donation to Database
donationid = InsertDonation(doncurr,donvalue,name,email,public,prname,prid,0)
Th_print(doncurr,donvalue,name,email,prname,prid,donationid,0)
#Close window playing video loop
def CloseVideo():
logging.debug('Close Video window')
os.system("wmctrl -a 'Donation Box |'")
#Process Messages
def processmsg(msg):
logging.debug('Process message: %s', msg)
values = msg.split('|')
if (values[0] == 'REQPROJECTTOTAL'):
s = GetDonations(values[1])
SendDonations(values[1],s)
elif (values[0] == 'NEWSLETTER'):
ProcessRegistration(msg)
elif ( (values[0] == 'DONATION') or (values[0] == 'PLAY') ):
ProcessDonation(msg)
elif (values[0] == 'VIDEO_CLICK'):
CloseVideo()
#Switch to Game Window
def SwitchToGame():
if game_enabled:
logging.debug('Switch to: ')
logging.debug(game_run)
#For Reaction game
#os.system("wmctrl -a reflex_game")
#For MAME or Pacman game
os.system(game_run)
#HTTP Server Handler
class WSHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
logging.info('New connection was opened')
clients.append(self)
#Get inserted coins that have not been donated
s = GetCoins()
# Send value and currency to web socket client
SendCoins(s)
#Get donations
#s = GetDonations(1) #PID=1 if we run the box as a single donation project, otherwise we need the Project ID
#Send Donations to web socket clients
#SendDonations(1,s)
if (ui_sendsum):
s = GetSumDonations()
SendSumDonations(s)
#Process any received messages
def on_message(self, message):
processmsg(message)
def on_close(self):
logging.info('Connection was closed...')
clients.remove(self)
#THREAD:Start looking for newly inserted coins
t = threading.Thread(target=UpdateCoins)
t.daemon = True
t.start()
application = tornado.web.Application([
(r'/ws', WSHandler),
])
if __name__ == "__main__":
#Start the HTTP server and listen at port 8888
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
|
gpl-3.0
| -4,253,610,225,365,085,700
| 31.421756
| 243
| 0.6825
| false
| 3.382242
| true
| false
| false
|
rl-institut/appBBB
|
appBBB/results_evaluation.py
|
1
|
32709
|
from os.path import expanduser
import csv
import pandas as pd
import matplotlib.pyplot as plt
from oemof.core import energy_system as es
from oemof.solph.predefined_objectives import minimize_cost
from oemof.outputlib import to_pandas as tpd
from oemof import db
import helper_BBB as hlsb
def create_es(solver, timesteps, year):
"""
Creates a default energy system to load results into.
"""
simulation = es.Simulation(solver=solver,
timesteps=timesteps,
debug=False,
objective_options={"function": minimize_cost})
# Adding a time index to the energy system
time_index = pd.date_range('1/1/' + year,
periods=len(timesteps),
freq='H')
energysystem = es.EnergySystem(time_idx=time_index,
simulation=simulation)
return energysystem
def color_dict(reg):
"""
Sets colors for entities in plot of electricity sector.
"""
cdict = {
# transformer
"('FixedSrc', '" + reg + "', 'wind_pwr')": 'lightblue',
"('FixedSrc', '" + reg + "', 'pv_pwr')": 'yellow',
"('transformer', '" + reg + "', 'oil')": 'black',
"('transformer', '" + reg + "', 'oil', 'chp')": 'black',
"('transformer', '" + reg + "', 'oil', 'SEchp')": 'black',
"('transformer', '" + reg + "', 'natural_gas')": 'lightgrey',
"('transformer', '" + reg + "', 'natural_gas', 'chp')":
'lightgrey',
"('transformer', '" + reg + "', 'natural_gas', 'SEchp')":
'lightgrey',
"('transformer', '" + reg + "', 'natural_gas_cc')": 'darkgrey',
"('transformer', '" + reg + "', 'natural_gas_cc', 'chp')":
'darkgrey',
"('transformer', '" + reg + "', 'natural_gas_cc', 'SEchp')":
'darkgrey',
"('transformer', '" + reg + "', 'HH', 'bhkw_gas')": 'grey',
"('transformer', '" + reg + "', 'GHD', 'bhkw_gas')": 'grey',
"('transformer', '" + reg + "', 'biomass')": 'lightgreen',
"('transformer', '" + reg + "', 'biomass', 'chp')": 'lightgreen',
"('transformer', '" + reg + "', 'biomass', 'SEchp')":
'lightgreen',
"('transformer', '" + reg + "', 'HH', 'bhkw_bio')": 'green',
"('transformer', '" + reg + "', 'GHD', 'bhkw_bio')": 'green',
"('transformer', '" + reg + "', 'powertoheat')": 'lightsalmon',
"('transformer', '" + reg + "', 'lignite_jw', 'SEchp')": 'brown',
"('transformer', '" + reg + "', 'lignite_sp', 'SEchp')": 'orange',
# demand
"('demand', '" + reg + "', 'elec')": 'red',
"('demand', '" + reg + "', 'elec', 'mob')": 'red',
# shortage / excess
"('bus', '" + reg + "', 'elec')_excess": 'purple',
"('bus', '" + reg + "', 'elec')_shortage": 'blueviolet',
# heat pump
"('transformer', '" + reg + "', 'hp', 'brine', 'ww')": 'blue',
"('transformer', '" + reg + "', 'hp', 'brine', 'heating')":
'blue',
"('transformer', '" + reg + "', 'hp', 'air', 'ww')": 'blue',
"('transformer', '" + reg + "', 'hp', 'air', 'heating')": 'blue',
"('transformer', '" + reg + "', 'hp', 'air', 'ww', 'rod')":
'blue',
"('transformer', '" + reg + "', 'hp', 'air', 'heating', 'rod')":
'blue',
# transport
"transport_('bus', 'UB', 'elec')('bus', 'OS', 'elec')": 'salmon',
"transport_('bus', 'OS', 'elec')('bus', 'UB', 'elec')": 'salmon',
"transport_('bus', 'OS', 'elec')('bus', 'LS', 'elec')":
'chocolate',
"transport_('bus', 'LS', 'elec')('bus', 'OS', 'elec')":
'chocolate',
"transport_('bus', 'OS', 'elec')('bus', 'BE', 'elec')": 'peru',
"transport_('bus', 'BE', 'elec')('bus', 'OS', 'elec')": 'peru',
"transport_('bus', 'LS', 'elec')('bus', 'HF', 'elec')":
'burlywood',
"transport_('bus', 'HF', 'elec')('bus', 'LS', 'elec')":
'burlywood',
"transport_('bus', 'HF', 'elec')('bus', 'PO', 'elec')":
'goldenrod',
"transport_('bus', 'PO', 'elec')('bus', 'HF', 'elec')":
'goldenrod',
"transport_('bus', 'HF', 'elec')('bus', 'BE', 'elec')": 'khaki',
"transport_('bus', 'BE', 'elec')('bus', 'HF', 'elec')": 'khaki',
"transport_('bus', 'PO', 'elec')('bus', 'OS', 'elec')":
'indianred',
"transport_('bus', 'OS', 'elec')('bus', 'PO', 'elec')":
'indianred',
"transport_('bus', 'UB', 'elec')('bus', 'KJ', 'elec')": 'lime',
"transport_('bus', 'UB', 'elec')('bus', 'MV', 'elec')": 'cyan',
"transport_('bus', 'PO', 'elec')('bus', 'MV', 'elec')": 'teal',
"transport_('bus', 'PO', 'elec')('bus', 'ST', 'elec')":
'seagreen',
"transport_('bus', 'HF', 'elec')('bus', 'ST', 'elec')":
'yellowgreen',
"transport_('bus', 'LS', 'elec')('bus', 'SN', 'elec')":
'turquoise',
"transport_('bus', 'BE', 'elec')('bus', 'HF', 'elec')": 'olive',
"transport_('bus', 'BE', 'elec')('bus', 'OS', 'elec')":
'lightseagreen',
"transport_('bus', 'KJ', 'import')('bus', 'UB', 'elec')": 'lime',
"transport_('bus', 'MV', 'import')('bus', 'UB', 'elec')": 'cyan',
"transport_('bus', 'MV', 'import')('bus', 'PO', 'elec')": 'teal',
"transport_('bus', 'ST', 'import')('bus', 'PO', 'elec')":
'seagreen',
"transport_('bus', 'ST', 'import')('bus', 'HF', 'elec')":
'yellowgreen',
"transport_('bus', 'SN', 'import')('bus', 'LS', 'elec')":
'turquoise',
"transport_('bus', 'HF', 'elec')('bus', 'BE', 'elec')": 'olive',
"transport_('bus', 'OS', 'elec')('bus', 'BE', 'elec')":
'lightseagreen'}
return cdict
def color_dict_dh(reg):
"""
Sets colors for entities in plot of district heating.
"""
cdict = {
# transformer
"('transformer', '" + reg + "', 'oil', 'chp')": 'black',
"('transformer', '" + reg + "', 'oil', 'SEchp')": 'black',
"('heat_transformer', '" + reg + "', 'oil')": 'black',
"('transformer', '" + reg + "', 'natural_gas', 'chp')":
'lightgrey',
"('transformer', '" + reg + "', 'natural_gas', 'SEchp')":
'lightgrey',
"('heat_transformer', '" + reg + "', 'natural_gas')":
'lightgrey',
"('transformer', '" + reg + "', 'dh_peak_heating')": 'khaki',
"('transformer', '" + reg + "', 'natural_gas_cc', 'chp')":
'darkgrey',
"('transformer', '" + reg + "', 'natural_gas_cc', 'SEchp')":
'darkgrey',
"('transformer', '" + reg + "', 'biomass', 'chp')": 'lightgreen',
"('transformer', '" + reg + "', 'biomass', 'SEchp')":
'lightgreen',
"('heat_transformer', '" + reg + "', 'biomass')": 'lightgreen',
"('transformer', '" + reg + "', 'lignite_jw', 'SEchp')": 'brown',
"('transformer', '" + reg + "', 'lignite_sp', 'SEchp')": 'orange',
"('transformer', '" + reg + "', 'powertoheat')": 'lightsalmon',
# demand
"('demand', '" + reg + "', 'dh')": 'red',
# shortag / excess
"('bus', '" + reg + "', 'dh')_excess": 'purple',
"('bus', '" + reg + "', 'dh')_shortage": 'blue'}
return cdict
def stack_plot(energysystem, reg, bus, date_from, date_to):
"""
Creates a stack plot of the specified bus.
"""
# initialize plot
myplot = tpd.DataFramePlot(energy_system=energysystem)
# get dictionary with color of each entity in plot
if bus == 'elec':
cdict = color_dict(reg)
elif bus == 'dh':
cdict = color_dict_dh(reg)
# slice dataframe to prepare for plot function
myplot.slice_unstacked(
bus_uid="('bus', '" + reg + "', '" + bus + "')",
type="input",
date_from=date_from,
date_to=date_to)
myplot.color_from_dict(cdict)
# set plot parameters
fig = plt.figure(figsize=(40, 14))
plt.rc('legend', **{'fontsize': 18})
plt.rcParams.update({'font.size': 18})
plt.style.use('grayscale')
# plot bus
handles, labels = myplot.io_plot(
bus_uid="('bus', '" + reg + "', '" + bus + "')",
cdict=cdict,
line_kwa={'linewidth': 4},
ax=fig.add_subplot(1, 1, 1),
date_from=date_from,
date_to=date_to,
)
myplot.ax.set_ylabel('Power in MW')
myplot.ax.set_xlabel('Date')
myplot.ax.set_title(bus+" bus")
myplot.set_datetime_ticks(tick_distance=24, date_format='%d-%m-%Y')
myplot.outside_legend(handles=handles, labels=labels)
plt.show()
return (fig)
def sum_max_output_of_component(energysystem, from_uid, to_uid):
"""
Returns the sum and the maximum of the flow from entity with 'from_uid'
to entity with 'to_uid'.
"""
results_bus = energysystem.results[[obj for obj in energysystem.entities
if obj.uid == (from_uid)][0]]
results_bus_component = results_bus[[obj for obj in energysystem.entities
if obj.uid == (to_uid)][0]]
return sum(results_bus_component), max(results_bus_component)
def timeseries_of_component(energysystem, from_uid, to_uid):
"""
Returns the flow from entity with 'from_uid' to entity with 'to_uid'.
"""
results_bus = energysystem.results[[obj for obj in energysystem.entities
if obj.uid == (from_uid)][0]]
results_bus_component = results_bus[[obj for obj in energysystem.entities
if obj.uid == (to_uid)][0]]
return results_bus_component
def print_validation_outputs(energysystem, reg, results_dc):
"""
Returns sums and maximums of flows as well as full load hours of
transformers.
"""
# connect to database
conn_oedb = db.connection(section='open_edb')
# get paremeters of transformers from database
(co2_emissions, co2_fix, eta_elec, eta_th, eta_th_chp, eta_el_chp,
eta_chp_flex_el, sigma_chp, beta_chp, opex_var, opex_fix, capex,
c_rate_in, c_rate_out, eta_in, eta_out,
cap_loss, lifetime, wacc) = hlsb.get_parameters(conn_oedb)
# list of possible power plants in region
pp = [
"('FixedSrc', '" + reg + "', 'wind_pwr')",
"('FixedSrc', '" + reg + "', 'pv_pwr')",
"('transformer', '" + reg + "', 'oil')",
"('transformer', '" + reg + "', 'oil', 'chp')",
"('transformer', '" + reg + "', 'oil', 'SEchp')",
"('transformer', '" + reg + "', 'natural_gas')",
"('transformer', '" + reg + "', 'natural_gas', 'chp')",
"('transformer', '" + reg + "', 'natural_gas', 'SEchp')",
"('transformer', '" + reg + "', 'natural_gas_cc')",
"('transformer', '" + reg + "', 'natural_gas_cc', 'chp')",
"('transformer', '" + reg + "', 'natural_gas_cc', 'SEchp')",
"('transformer', '" + reg + "', 'biomass')",
"('transformer', '" + reg + "', 'biomass', 'chp')",
"('transformer', '" + reg + "', 'biomass', 'SEchp')",
"('transformer', '" + reg + "', 'HH', 'bhkw_gas')",
"('transformer', '" + reg + "', 'GHD', 'bhkw_gas')",
"('transformer', '" + reg + "', 'HH', 'bhkw_bio')",
"('transformer', '" + reg + "', 'GHD', 'bhkw_bio')",
"('transformer', '" + reg + "', 'bhkw_bio')",
"('transformer', '" + reg + "', 'bhkw_bio', 'dh')",
"('transformer', '" + reg + "', 'dh_peak_heating')",
"('transformer', '" + reg + "', 'lignite_jw', 'SEchp')",
"('transformer', '" + reg + "', 'lignite_sp', 'SEchp')",
"('transformer', '" + reg + "', 'powertoheat')"]
# list of efficiencies of the above transformers
eta_el = [
1,
1,
eta_elec['oil'],
eta_el_chp['oil'],
eta_chp_flex_el['oil'],
eta_elec['natural_gas'],
eta_el_chp['natural_gas'],
eta_chp_flex_el['natural_gas'],
eta_elec['natural_gas_cc'],
eta_el_chp['natural_gas_cc'],
eta_chp_flex_el['natural_gas_cc'],
eta_elec['biomass'],
eta_el_chp['biomass'],
eta_chp_flex_el['biomass'],
eta_el_chp['bhkw_gas'],
eta_el_chp['bhkw_gas'],
eta_el_chp['bhkw_bio'],
eta_el_chp['bhkw_bio'],
eta_el_chp['bhkw_bio'],
eta_el_chp['bhkw_bio'],
0, # dh_peakheating
eta_chp_flex_el['jaenschwalde'],
eta_chp_flex_el['schwarzepumpe'],
0 # powertoheat
]
# list of CO2 emissions of the above transformers
co2 = [
0,
0,
co2_emissions['oil'],
co2_emissions['oil'],
co2_emissions['oil'],
co2_emissions['natural_gas'],
co2_emissions['natural_gas'],
co2_emissions['natural_gas'],
co2_emissions['natural_gas_cc'],
co2_emissions['natural_gas_cc'],
co2_emissions['natural_gas_cc'],
co2_emissions['biomass'],
co2_emissions['biomass'],
co2_emissions['biomass'],
co2_emissions['bhkw_gas'],
co2_emissions['bhkw_gas'],
co2_emissions['bhkw_bio'],
co2_emissions['bhkw_bio'],
co2_emissions['bhkw_bio'],
co2_emissions['bhkw_bio'],
0, # dh_peakheating
co2_emissions['lignite'],
co2_emissions['lignite'],
0 # powertoheat
]
# get sum and maximum of each flow from transformer to bus as well as
# full load hours of each transformer
ebus = "('bus', '" + reg + "', 'elec')"
dhbus = "('bus', '" + reg + "', 'dh')"
summe_plant_dict = {}
el_energy = list()
dh_energy = list()
for p in pp:
print(p)
# if flow from transformer to electricity bus
try:
summe_plant_dict[p], maximum = sum_max_output_of_component(
energysystem, p, ebus)
print(('sum:' + str(summe_plant_dict[p])))
print(('max:' + str(maximum)))
results_dc['sum ' + reg + str(p)] = summe_plant_dict[p]
results_dc['max ' + reg + str(p)] = maximum
el_energy.append(summe_plant_dict[p])
except:
print('nicht vorhanden')
results_dc['sum ' + reg + str(p)] = 0
results_dc['max ' + reg + str(p)] = 0
el_energy.append(0)
try:
print(('vlh:' + str(summe_plant_dict[p] / maximum)))
results_dc['vlh ' + reg + str(p)] = summe_plant_dict[p] / maximum
except:
results_dc['vlh ' + reg + str(p)] = 0
print('\n')
# if flow from transformer to district heating bus
try:
summe_plant_dict['dh' + p], maximum = sum_max_output_of_component(
energysystem, p, dhbus)
print(('sum:' + str(summe_plant_dict['dh' + p])))
print(('max:' + str(maximum)))
results_dc['sum '+ reg + str(p) + '_dh'] = \
summe_plant_dict['dh' + p]
results_dc['max '+ reg + str(p) + '_dh'] = maximum
dh_energy.append(summe_plant_dict['dh' + p])
except:
print('nicht vorhanden')
dh_energy.append(0)
results_dc['sum '+ reg + str(p)+'_dh'] = 0
results_dc['max '+ reg + str(p)+'_dh'] = 0
try:
print(('vls:' + str(summe_plant_dict[p] / maximum)))
results_dc['vlh ' + reg + str(p)+'_dh'] = (summe_plant_dict[p] /
maximum)
except:
results_dc['vlh ' + reg + str(p)+'_dh'] = 0
print('\n')
# get sum and maximum of electricity shortage
shortage_bus = "('bus', '" + reg + "', 'elec')_shortage"
summe_plant, maximum = sum_max_output_of_component(
energysystem, shortage_bus, ebus)
print(('el_shortage_sum:' + str(summe_plant)))
print(('el_shortage_max:' + str(maximum)))
results_dc['el_shortage ' + reg] = str(summe_plant)
results_dc['el_shortage_max ' + reg] = maximum
print('\n')
# get sum and maximum of excess in district heating
excess_dh = "('bus', '" + reg + "', 'dh')_excess"
summe_plant, maximum = sum_max_output_of_component(
energysystem, dhbus, excess_dh)
print(('dh_excess_sum:' + str(summe_plant)))
print(('dh_excess_max:' + str(maximum)))
results_dc['dh_excess_sum ' + reg] = summe_plant
results_dc['dh_excess_max ' + reg] = maximum
# get sum and maximum of electricity excess
excess = "('bus', '" + reg + "', 'elec')_excess"
summe_plant, maximum = sum_max_output_of_component(
energysystem, ebus, excess)
print(('el_excess_sum:' + str(summe_plant)))
print(('el_excess_max:' + str(maximum)))
results_dc['el_excess_sum ' + reg] = summe_plant
results_dc['el_excess_max ' + reg] = maximum
# get sum of flows from wind turbines and pv systems to electricity bus
sum_fee = (summe_plant_dict["('FixedSrc', '" + reg + "', 'wind_pwr')"] +
summe_plant_dict["('FixedSrc', '" + reg + "', 'pv_pwr')"])
print(('share excess wind + pv:' + str((summe_plant / sum_fee) * 100)))
# create dataframe with power output of each transformer, electrical
# efficiency and CO2 per MWh
frame = pd.DataFrame(index=pp)
frame['dh_energy'] = dh_energy
frame['energy_sum'] = el_energy
frame['eta_el'] = eta_el
frame['co2'] = co2
return (results_dc, frame)
def print_exports(energysystem, results_dc, year, path):
"""
Get exports from Brandenburg to neighbor regions and imports from neighbor
regions to Brandenburg.
"""
export_from = ["('bus', 'UB', 'elec')",
"('bus', 'UB', 'elec')",
"('bus', 'PO', 'elec')",
"('bus', 'PO', 'elec')",
"('bus', 'HF', 'elec')",
"('bus', 'LS', 'elec')",
"('bus', 'HF', 'elec')",
"('bus', 'OS', 'elec')"]
import_to = export_from
export_to = ["transport_('bus', 'UB', 'elec')('bus', 'KJ', 'elec')",
"transport_('bus', 'UB', 'elec')('bus', 'MV', 'elec')",
"transport_('bus', 'PO', 'elec')('bus', 'MV', 'elec')",
"transport_('bus', 'PO', 'elec')('bus', 'ST', 'elec')",
"transport_('bus', 'HF', 'elec')('bus', 'ST', 'elec')",
"transport_('bus', 'LS', 'elec')('bus', 'SN', 'elec')",
"transport_('bus', 'BE', 'elec')('bus', 'HF', 'elec')",
"transport_('bus', 'BE', 'elec')('bus', 'OS', 'elec')"]
import_from = ["transport_('bus', 'KJ', 'import')('bus', 'UB', 'elec')",
"transport_('bus', 'MV', 'import')('bus', 'UB', 'elec')",
"transport_('bus', 'MV', 'import')('bus', 'PO', 'elec')",
"transport_('bus', 'ST', 'import')('bus', 'PO', 'elec')",
"transport_('bus', 'ST', 'import')('bus', 'HF', 'elec')",
"transport_('bus', 'SN', 'import')('bus', 'LS', 'elec')",
"transport_('bus', 'HF', 'elec')('bus', 'BE', 'elec')",
"transport_('bus', 'OS', 'elec')('bus', 'BE', 'elec')"]
time_index = pd.date_range('1/1/{0}'.format(year), periods=8760, freq='H')
time_no_export = pd.DataFrame(index=time_index)
exports = pd.DataFrame(index=time_index)
imports = pd.DataFrame(index=time_index)
export_total = 0
for i in range(len(export_from)):
print(export_to[i])
# sum of export
summe_ex, maximum = sum_max_output_of_component(
energysystem, export_from[i], export_to[i])
export_total += summe_ex
print('export:')
print(summe_ex)
results_dc['export ' + export_to[i] + ' summe'] = summe_ex
# maximum of export
print('max:')
print(maximum)
results_dc['export ' + export_to[i] + ' maximum'] = maximum
# timeseries
exports[export_to[i]] = timeseries_of_component(
energysystem, export_from[i], export_to[i])
imports[export_to[i]] = timeseries_of_component(
energysystem, import_from[i], import_to[i])
time_no_export[export_to[i]] = (exports[export_to[i]] -
imports[export_to[i]])
# total export
print('export_gesamt:')
print(export_total)
results_dc['export gesamt: '] = export_total
# save import and export timeseries to csv
exports.to_csv(path + 'exports.csv')
imports.to_csv(path + 'imports.csv')
time_no_export.to_csv(path + 'no_export.csv')
return (results_dc, time_no_export)
def print_im_exports(energysystem, results_dc, year, path):
"""
Adds flows between regions in Brandenburg and between Brandenburg and
Berlin to results_dc.
"""
export_from = ["('bus', 'UB', 'elec')",
"('bus', 'PO', 'elec')",
"('bus', 'HF', 'elec')",
"('bus', 'LS', 'elec')",
"('bus', 'OS', 'elec')",
"('bus', 'BE', 'elec')"]
export_to = [
"transport_('bus', 'UB', 'elec')('bus', 'OS', 'elec')",
"transport_('bus', 'OS', 'elec')('bus', 'UB', 'elec')",
"transport_('bus', 'OS', 'elec')('bus', 'LS', 'elec')",
"transport_('bus', 'LS', 'elec')('bus', 'OS', 'elec')",
"transport_('bus', 'OS', 'elec')('bus', 'BE', 'elec')",
"transport_('bus', 'BE', 'elec')('bus', 'OS', 'elec')",
"transport_('bus', 'LS', 'elec')('bus', 'HF', 'elec')",
"transport_('bus', 'HF', 'elec')('bus', 'LS', 'elec')",
"transport_('bus', 'HF', 'elec')('bus', 'PO', 'elec')",
"transport_('bus', 'PO', 'elec')('bus', 'HF', 'elec')",
"transport_('bus', 'HF', 'elec')('bus', 'BE', 'elec')",
"transport_('bus', 'BE', 'elec')('bus', 'HF', 'elec')",
"transport_('bus', 'PO', 'elec')('bus', 'OS', 'elec')",
"transport_('bus', 'OS', 'elec')('bus', 'PO', 'elec')"]
time_index = pd.date_range('1/1/{0}'.format(year), periods=8760, freq='H')
BBB_Kuppelstellen = pd.DataFrame(index=time_index)
export_all = 0
for i in export_from:
print(i)
for k in export_to:
print(k)
try:
summe_ex, maximum = sum_max_output_of_component(
energysystem, i, k)
export_all += summe_ex
print('from '+ i + ' to '+ k)
print(summe_ex)
results_dc['export from ' + i + ' to ' + k] = summe_ex
results_dc['export from ' + i + ' to ' + k + ' maximum'] = \
maximum
BBB_Kuppelstellen['export from ' + i + ' to ' + k] = \
timeseries_of_component(energysystem, i, k)
except:
pass
# total of flows
print('export_in_BBB_gesamt:')
print(export_all)
results_dc['export in BBB gesamt: '] = export_all
# timeseries to csv
BBB_Kuppelstellen.to_csv(path + 'kuppelstellen.csv')
return results_dc
def get_share_ee(energysystem, reg, results_dc):
"""
Get shares of wind and pv on demand fulfillment.
"""
# get feedin timeseries from wind and pv to electricity bus
ebus = "('bus', '" + reg + "', 'elec')"
pv_time = timeseries_of_component(
energysystem, "('FixedSrc', '" + reg + "', 'pv_pwr')", ebus)
wind_time = timeseries_of_component(
energysystem, "('FixedSrc', '" + reg + "', 'wind_pwr')", ebus)
# get electricity demand timeseries
demand_time = timeseries_of_component(
energysystem, ebus, "('demand', '" + reg + "', 'elec')")
# calculate shares
res = pd.DataFrame(index=range(len(demand_time)),
columns=['ee', 'pv', 'wind'])
for i in range(len(demand_time)):
fee = demand_time[i] - pv_time[i] - wind_time[i]
if fee < 0:
res['ee'][i] = demand_time[i]
res['pv'][i] = demand_time[i] * pv_time[i] / (
pv_time[i] + wind_time[i])
res['wind'][i] = demand_time[i] * wind_time[i] / (
pv_time[i] + wind_time[i])
else:
res['ee'][i] = pv_time[i] + wind_time[i]
res['pv'][i] = pv_time[i]
res['wind'][i] = wind_time[i]
ee_share = sum(res['ee']) / sum(demand_time)
pv_share = sum(res['pv']) / sum(demand_time)
wind_share = sum(res['wind']) / sum(demand_time)
# print shares and add to results_dc
print('ee share:')
print(ee_share)
results_dc['ee share ' + reg] = ee_share
print('pv share:')
print(pv_share)
results_dc['pv share ' + reg] = pv_share
print('wind share:')
print(wind_share)
results_dc['wind share ' + reg] = wind_share
return results_dc
def co2(energysystem):
"""
Calculate total CO2 emissions.
"""
# retrieve specific CO2 emissions from database
conn_oedb = db.connection(section='open_edb')
(co2_emissions, co2_fix, eta_elec, eta_th, eta_th_chp, eta_el_chp,
eta_chp_flex_el, sigma_chp, beta_chp, opex_var, opex_fix, capex,
c_rate_in, c_rate_out, eta_in, eta_out,
cap_loss, lifetime, wacc) = hlsb.get_parameters(conn_oedb)
# fossil ressources
global_ressources = ['natural_gas', 'natural_gas_cc', 'lignite',
'oil', 'waste', 'hard_coal']
# create list of global ressource buses BB
list_global_ressource_buses = []
for ressource in global_ressources:
list_global_ressource_buses += ["('bus', 'BB', '" + ressource + "')"]
# create list with entities of global ressource buses
global_ressource_buses_bb = [obj for obj in energysystem.entities
if any(bus in obj.uid for bus in list_global_ressource_buses)]
# get yearly energy
co2 = 0
for bus in global_ressource_buses_bb:
for output in bus.outputs:
summe, maximum = sum_max_output_of_component(
energysystem, bus.uid, output.uid)
co2 += summe * co2_emissions[bus.type]
# biogas
biogas_transformer = [obj for obj in energysystem.entities
if 'bhkw_bio' in obj.uid and 'transformer' in obj.uid]
bb_regions = ['PO', 'UB', 'HF', 'OS', 'LS']
biogas_transformer_bb = [obj for obj in biogas_transformer
if any(region in obj.uid for region in bb_regions)]
# write list to hand over to BB constraint
for transformer in biogas_transformer_bb:
summe, maximum = sum_max_output_of_component(
energysystem, transformer.inputs[0].uid, transformer.uid)
co2 += summe * co2_emissions[transformer.inputs[0].type]
print('Total CO2 emissions in BB:')
print(co2)
return co2
def get_supply_demand_timeseries(energysystem, year, path):
"""
Writes timeseries of all inputs and outputs of the electricity bus of
each region as well as their sums to dataframe and saves to csv.
"""
time_index = pd.date_range('1/1/{0}'.format(year), periods=8760, freq='H')
# create dataframe for timeseries sums of outputs and inputs of electricity
# bus
supply_demand_sum = pd.DataFrame(index=time_index)
for region in energysystem.regions:
reg = region.name
# create dataframe for timeseries of outputs and inputs of electricity
# bus
elec_out = pd.DataFrame(index=time_index)
elec_in = pd.DataFrame(index=time_index)
# get electricity bus entity and its results
elec_bus = [obj for obj in energysystem.entities
if obj.uid == ("('bus', '" + reg + "', 'elec')")][0]
elec_bus_results = energysystem.results[[obj for obj in
energysystem.entities
if obj.uid == ("('bus', '" + reg + "', 'elec')")][0]]
# get outputs of electricity bus
for obj in energysystem.entities:
if 'demand' in obj.uid or 'hp' in obj.uid:
try:
elec_out[obj.uid] = elec_bus_results[[obj][0]]
except:
pass
# get inputs of electricity bus
for obj in energysystem.entities:
if ('transformer' in obj.uid or 'transport' in obj.uid or
'FixedSrc' in obj.uid):
obj_in = energysystem.results[[obj][0]]
try:
elec_in[obj.uid] = obj_in[[elec_bus][0]]
except:
pass
# save to csv
elec_in.to_csv(path + reg + '_all_times_in.csv')
elec_out.to_csv(path + reg + '_all_times_out.csv')
# get residual as well as sum of all inputs and all outputs
supply_demand_sum[reg] = elec_in.sum(axis=1) - elec_out.sum(axis=1)
supply_demand_sum[reg + 'in'] = elec_in.sum(axis=1)
supply_demand_sum[reg + 'out'] = elec_out.sum(axis=1)
# save to csv
supply_demand_sum.to_csv(path + 'supply_minus_demand.csv')
return supply_demand_sum
if __name__ == "__main__":
# load results
path_to_dump = expanduser("~") + '/.oemof/dumps/'
year = 2010
# create dummy energy system
energysystem = create_es('cbc', [t for t in range(8760)], str(year))
# load dumped energy system
energysystem.restore(path_to_dump)
# weeks for stack plot
date_from = {}
date_to = {}
date_from['spring'] = "2010-03-17 00:00:00"
date_to['spring'] = "2010-03-24 00:00:00"
date_from['summer'] = "2010-06-17 00:00:00"
date_to['summer'] = "2010-06-24 00:00:00"
date_from['autumn'] = "2010-09-17 00:00:00"
date_to['autumn'] = "2010-09-24 00:00:00"
date_from['winter'] = "2010-12-17 00:00:00"
date_to['winter'] = "2010-12-24 00:00:00"
# empty results_dc dictionary to write results into
results_dc = {}
# get all inputs and outputs of electricity bus of each region
get_supply_demand_timeseries(energysystem, year, path_to_dump)
# get exports from Brandenburg to neighbor regions and imports from
# neighbor regions to Brandenburg
print_exports(energysystem, results_dc, year, path_to_dump)
# add flows between regions in Brandenburg and between Brandenburg and
# Berlin to results_dc
print_im_exports(energysystem, results_dc, year, path_to_dump)
# calculates total CO2 emissions
results_dc['co2_all_BB'] = co2(energysystem)
transformer_results_df = pd.DataFrame()
for reg in ('HF', 'LS', 'UB', 'PO', 'BE', 'OS'):
# create stack plots for electricity bus and district heating bus for
# winter week
week = 'winter'
for bus in ('elec', 'dh'):
fig = stack_plot(
energysystem, reg, bus, date_from[week], date_to[week])
fig.savefig(path_to_dump + reg + '_' + bus + '_' + week + '.png')
# add sums and maximums of flows as well as full load hours of
# transformers
# return value frame is a dataframe with power output of each
# transformer, electrical efficiency and CO2 per MWh
results_dc, frame = print_validation_outputs(
energysystem, reg, results_dc)
transformer_results_df = transformer_results_df.append(frame)
# get shares of wind and pv of demand fulfillment
get_share_ee(energysystem, reg, results_dc)
# write to csv
transformer_results_df.to_csv(path_to_dump + 'co2_el_energy.csv')
f = open(path_to_dump + '_results.csv', 'w', newline='')
w = csv.writer(f, delimiter=';')
w.writerow(list(results_dc.keys()))
w.writerow(list(results_dc.values()))
f.close
f = open(path_to_dump + '_results.csv', 'w', newline='')
w = csv.writer(f, delimiter=';')
w.writerow(list(results_dc.keys()))
w.writerow(list(results_dc.values()))
f.close
|
gpl-3.0
| -4,810,676,121,799,167,000
| 39.734745
| 79
| 0.504234
| false
| 3.306611
| false
| false
| false
|
team-vigir/vigir_behaviors
|
vigir_flexbe_states/src/vigir_flexbe_states/footstep_plan_relative_state.py
|
1
|
4234
|
#!/usr/bin/env python
import rospy
import math
import actionlib
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyActionClient
from vigir_footstep_planning_msgs.msg import *
from std_msgs.msg import String, Header
'''
Created on 02/24/2015
@author: Philipp Schillinger and Spyros Maniatopoulos
'''
class FootstepPlanRelativeState(EventState):
'''
Implements a state where the robot plans a relative motion, e.g. 2m to the left.
Please note that the distance is only approximate, actual distance depends on exact step alignment.
-- direction int One of the class constants to specify a direction.
># distance float Distance to walk, given in meters.
#> plan_header Header The header of the plan to perform the walking.
<= planned Successfully created a plan.
<= failed Failed to create a plan.
'''
DIRECTION_LEFT = 3 # PatternParameters.STRAFE_LEFT
DIRECTION_RIGHT = 4 # PatternParameters.STRAFE_RIGHT
DIRECTION_FORWARD = 1 # PatternParameters.FORWARD
DIRECTION_BACKWARD = 2 # PatternParameters.BACKARD
def __init__(self, direction):
'''
Constructor
'''
super(FootstepPlanRelativeState, self).__init__(outcomes=['planned', 'failed'],
input_keys=['distance'],
output_keys=['plan_header'])
if not rospy.has_param("behavior/step_distance_forward"):
Logger.logerr("Need to specify parameter behavior/step_distance_forward at the parameter server")
return
if not rospy.has_param("behavior/step_distance_sideward"):
Logger.logerr("Need to specify parameter behavior/step_distance_sideward at the parameter server")
return
self._step_distance_forward = rospy.get_param("behavior/step_distance_forward")
self._step_distance_sideward = rospy.get_param("behavior/step_distance_sideward")
self._action_topic = '/vigir/footstep_manager/step_plan_request'
self._client = ProxyActionClient({self._action_topic: StepPlanRequestAction})
self._done = False
self._failed = False
self._direction = direction
def execute(self, userdata):
if self._failed:
userdata.plan_header = None
return 'failed'
if self._done:
return 'planned'
if self._client.has_result(self._action_topic):
result = self._client.get_result(self._action_topic)
if result.status.warning != ErrorStatus.NO_WARNING:
Logger.logwarn('Planning footsteps warning:\n%s' % result.status.warning_msg)
if result.status.error == ErrorStatus.NO_ERROR:
userdata.plan_header = result.step_plan.header
num_steps = len(result.step_plan.steps)
Logger.loginfo('Received plan with %d steps' % num_steps)
self._done = True
return 'planned'
else:
userdata.plan_header = None # as recommended: dont send out incomplete plan
Logger.logerr('Planning footsteps failed:\n%s' % result.status.error_msg)
self._failed = True
return 'failed'
def on_enter(self, userdata):
self._failed = False
self._done = False
# Create footstep planner request
strafing = self._direction == PatternParameters.STRAFE_LEFT or self._direction == PatternParameters.STRAFE_RIGHT
pattern_parameters = PatternParameters()
pattern_parameters.mode = self._direction
pattern_parameters.step_distance_forward = self._step_distance_forward if not strafing else 0.0 # will it ignore?
pattern_parameters.step_distance_sideward = self._step_distance_sideward if strafing else 0.0 # will it ignore?
pattern_parameters.close_step = True
step_distance = pattern_parameters.step_distance_sideward if strafing else pattern_parameters.step_distance_forward
pattern_parameters.steps = int(round(userdata.distance / step_distance))
request = StepPlanRequest()
request.parameter_set_name = String('drc_step_no_collision')
request.header = Header(frame_id = '/world', stamp = rospy.Time.now())
request.planning_mode = StepPlanRequest.PLANNING_MODE_PATTERN
request.pattern_parameters = pattern_parameters
action_goal = StepPlanRequestGoal(plan_request = request)
try:
self._client.send_goal(self._action_topic, action_goal)
except Exception as e:
Logger.logwarn('Was unable to create footstep pattern for wide stance:\n%s' % str(e))
self._failed = True
|
bsd-3-clause
| -8,746,519,600,787,208,000
| 33.422764
| 117
| 0.730279
| false
| 3.403537
| false
| false
| false
|
IMDProjects/ServiceManager
|
ServiceManager.py
|
1
|
3754
|
#-------------------------------------------------------------------------------
# ServiceManager.py
#
# Purpose: Creates, updates, deletes services in ArcGIS Online
#
#
# Prerequisites/Inputs:
# TokenManager: authentication token for NPS ArcGIS Online
# ServiceConfiguration: service configuration structure
# ServiceSource: service content
#
# XML metadata template in known subfolder (<somewhere>/Templates/Metadata)
# Working Folder/Workspace
#
# Outputs:
# Create: feature service in ArcGIS Online repository
# Manage: updated feature service in ArcGIS Online repository
# Delete: log of deleted service(s)
#
# Created by: NPS Inventory and Monitoring Division Staff
# Update date: 20161019
#
#
#
#-------------------------------------------------------------------------------
import urllib
import urllib2
import arcrest
#import TokenManager
from TokenManager import TokenManager
import ServiceConfiguration
from ServiceConfiguration import ServiceConfiguration
#import ServiceSource
from ServiceSource import ServiceSource
class ServiceManager(object):
'''
INFO
----
Object to manage ArcGIS Online feature services
'''
token = None
admin = None
def __init__(self):
if self.token == None:
tm = TokenManager("Portal", "https://nps.maps.arcgis.com", "IMDGISTeam", "G3010g!c2016", "https://irma.nps.gov")
tm.getToken()
self.token = tm.token
if self.admin == None:
self.admin = tm.admin
def getConfiguration(self, itype, title, description, url=None, tags=None, snippet=None, accessInformation=None, metadata=None):
sc = ServiceConfiguration(itype=itype, title=title
, description=description
, url=url
, tags=tags
, snippet=snippet
, accessInformation=accessInformation
, metadata=metadata)
return sc.itemParams
if __name__=='__main__':
sm = ServiceManager()
serviceToken = sm.token
admin = sm.admin
print serviceToken
content = admin.content
userInfo = content.users.user()
ss = ServiceSource()
# Data Store/ServCat example
ss.sourceFilter = ss.dsscConnection("GRI", "GeospatialDataset")
ss.sourceList = ss.getDSSCSources("http://irmaservices.nps.gov/datastore/v4/rest/AdvancedSearch/Composite?top=2000&format=json")
# ArcGIS Server example
#ss.agsConnection("https://inp2300fcvhafo1", "arcgis_admin", "admin2016...")
#ss.sourceList = ss.getAGSSources(ss.agsServer, "Inventory_Geology")
# Metadata: may work if convert this to an XML object: , metadata="https://irma.nps.gov/DataStore/DownloadFile/544273"
for i in range(1, len(ss.sourceList['serviceName'])):
itemParameters = sm.getConfiguration(itype="Map Service"
, title=ss.sourceList['serviceName'][i]
, description=ss.sourceList['description'][i]
, url=ss.sourceList['serviceURL'][i].replace('http','https')
#, url=urllib.urlencode(ss.sourceList['serviceURL'][i])
, tags="National Park Service (NPS) Geologic Resources Inventory (GRI), Geology"
, snippet="Digital Data, Digital Geologic Map, NPS Geologic Resources Inventory"
, accessInformation="National Park Service (NPS) Geologic Resources Inventory (GRI) program, National Park Service (NPS) Inventory and Monitoring Division")
print ss.sourceList['serviceURL'][i]
#print str(itemParameters)
# This request works although the overwrite and folder params are ignored
item = userInfo.addItem(itemParameters=itemParameters, overwrite=True)
print item.title
|
mit
| 5,012,614,175,353,012,000
| 36.919192
| 176
| 0.644113
| false
| 4.089325
| true
| false
| false
|
tangentlabs/django-fancypages
|
fancypages/contrib/oscar_fancypages/abstract_models.py
|
1
|
2983
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ...models import ContentBlock
class AbstractSingleProductBlock(ContentBlock):
name = _("Single Product")
code = 'single-product'
group = _("Catalogue")
template_name = "fancypages/blocks/productblock.html"
product = models.ForeignKey(
'catalogue.Product', verbose_name=_("Single Product"), null=True)
def __unicode__(self):
if self.product:
return u"Product '{0}'".format(self.product.upc)
return u"Product '{0}'".format(self.id)
class Meta:
abstract = True
class AbstractHandPickedProductsPromotionBlock(ContentBlock):
name = _("Hand Picked Products Promotion")
code = 'promotion-hand-picked-products'
group = _("Catalogue")
template_name = "fancypages/blocks/promotionblock.html"
promotion = models.ForeignKey(
'promotions.HandPickedProductList', null=True,
verbose_name=_("Hand Picked Products Promotion"))
def __unicode__(self):
if self.promotion:
return u"Promotion '{0}'".format(self.promotion.pk)
return u"Promotion '{0}'".format(self.id)
class Meta:
abstract = True
class AbstractAutomaticProductsPromotionBlock(ContentBlock):
name = _("Automatic Products Promotion")
code = 'promotion-ordered-products'
group = _("Catalogue")
template_name = "fancypages/blocks/promotionblock.html"
promotion = models.ForeignKey(
'promotions.AutomaticProductList',
verbose_name=_("Automatic Products Promotion"), null=True)
def __unicode__(self):
if self.promotion:
return u"Promotion '{0}'".format(self.promotion.pk)
return u"Promotion '{0}'".format(self.id)
class Meta:
abstract = True
class AbstractOfferBlock(ContentBlock):
name = _("Offer Products")
code = 'products-range'
group = _("Catalogue")
template_name = "fancypages/blocks/offerblock.html"
offer = models.ForeignKey(
'offer.ConditionalOffer', verbose_name=_("Offer"), null=True)
@property
def products(self):
Product = models.get_model('catalogue', 'Product')
product_range = self.offer.condition.range
if product_range.includes_all_products:
return Product.browsable.filter(is_discountable=True)
return product_range.included_products.filter(is_discountable=True)
def __unicode__(self):
if self.offer:
return u"Offer '{0}'".format(self.offer.pk)
return u"Offer '{0}'".format(self.id)
class Meta:
abstract = True
class AbstractPrimaryNavigationBlock(ContentBlock):
name = _("Primary Navigation")
code = 'primary-navigation'
group = _("Content")
template_name = "fancypages/blocks/primary_navigation_block.html"
def __unicode__(self):
return u'Primary Navigation'
class Meta:
abstract = True
|
bsd-3-clause
| -2,053,354,243,874,709,800
| 28.83
| 75
| 0.65404
| false
| 3.993307
| false
| false
| false
|
palpen/wage_calculator
|
calculate_hours.py
|
1
|
2217
|
import re
import argparse
def wage_calculator(log_txt_file, month, year, wage):
date_pattern = "\*\*Date\*\*" # pattern identifying the date in the file
hours_full = []
with log_txt_file as f:
for line in log_txt_file:
if re.search(r"{0}".format(date_pattern), line): # go to the relevant line
if re.search(month, line) and re.search(str(year), line): # within line, go to desired month/year
# skips two lines to the line containing the number of hours worked
f.next()
hours_line = f.next()
hours_list_str = re.findall(r'[-+]?\d*\.*\d+', hours_line) # put hours in a list
hours_list = [float(x) for x in hours_list_str]
hours_full += hours_list
sum_hours_date = sum(hours_list)
print line.rstrip()
print "Hours logged: " + str(hours_list)
print "Total hours for the day " + str(sum_hours_date) + "\n"
hours_total = sum(hours_full)
print "-----------"
print "Total hours worked in {0} {1}: **{2}** \n".format(month, year,
hours_total)
print "Hourly wage: **${0}/hr** \n".format(wage)
print "Total wage for {0} {1}: **${2}**".format(month, year,
hours_total * wage)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("file",
help="Text file containing hours logged (e.g. ra_hours.txt)",
type=argparse.FileType('r')
)
parser.add_argument("month",
help="The month for which we want the income",
type=str)
parser.add_argument("year",
help="Enter year",
type=int)
parser.add_argument("wage",
help="Enter hourly wage",
type=float)
args = parser.parse_args()
wage_calculator(args.file, args.month, args.year, args.wage)
if __name__ == '__main__':
main()
|
mit
| 1,043,829,032,182,839,900
| 32.089552
| 114
| 0.483085
| false
| 4.097967
| false
| false
| false
|
kovidgoyal/build-calibre
|
scripts/pkgs/pyqt.py
|
1
|
1772
|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import os
from .constants import PYTHON, MAKEOPTS, build_dir, PREFIX, isosx, iswindows
from .utils import run, replace_in_file
def main(args):
b = build_dir()
if isosx:
b = os.path.join(b, 'python/Python.framework/Versions/2.7')
elif iswindows:
b = os.path.join(b, 'private', 'python')
lp = os.path.join(PREFIX, 'qt', 'lib')
sip, qmake = 'sip', 'qmake'
if iswindows:
sip += '.exe'
qmake += '.exe'
sp = 'Lib' if iswindows else 'lib/python2.7'
cmd = [PYTHON, 'configure.py', '--confirm-license', '--sip=%s/bin/%s' % (PREFIX, sip), '--qmake=%s/qt/bin/%s' % (PREFIX, qmake),
'--bindir=%s/bin' % b, '--destdir=%s/%s/site-packages' % (b, sp), '--verbose', '--sipdir=%s/share/sip/PyQt5' % b,
'--no-stubs', '-c', '-j5', '--no-designer-plugin', '--no-qml-plugin', '--no-docstrings']
if iswindows:
cmd.append('--spec=win32-msvc2015')
cmd.append('--sip-incdir=%s/private/python/include' % PREFIX)
run(*cmd, library_path=lp)
if iswindows:
# In VisualStudio 15 Update 3 the compiler crashes on the below
# statement
replace_in_file('QtGui/sipQtGuipart2.cpp', 'return new ::QPicture[sipNrElem]', 'return NULL')
run('nmake')
run('nmake install')
else:
run('make ' + MAKEOPTS, library_path=lp)
run('make install')
def post_install_check():
run(PYTHON, '-c', 'import sip, sipconfig; from PyQt5 import QtCore, QtGui, QtWebKit', library_path=os.path.join(PREFIX, 'qt', 'lib'))
|
gpl-3.0
| -6,079,356,304,098,510,000
| 39.272727
| 137
| 0.603273
| false
| 3.119718
| false
| false
| false
|
Bl4ckb0ne/ring-api
|
ring_api/server/flask/api/certificate.py
|
1
|
2365
|
#
# Copyright (C) 2016 Savoir-faire Linux Inc
#
# Authors: Seva Ivanov <seva.ivanov@savoirfairelinux.com>
# Simon Zeni <simon.zeni@savoirfairelinux.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from flask import jsonify, request
from flask_restful import Resource
import numpy as np
class Certificates(Resource):
def __init__(self, dring):
self.dring = dring
def get(self, cert_id=None):
if (cert_id):
return jsonify({
'status': 200,
'details': self.dring.config.get_certificate_details(cert_id)
})
return jsonify({
'status': 200,
'pinned': self.dring.config.get_pinned_certificates()
})
def post(self, cert_id):
data = request.get_json(force=True)
if (not data):
return jsonify({
'status': 404,
'message': 'data not found'
})
if ('action' not in data):
return jsonify({
'status': 400,
'message': 'action not found in request data'
})
result = None
if (data.get('action') == 'pin'):
# temporary
local = True if data.get('local') in ["True", "true"] else False
result = self.dring.config.pin_certificate(cert_id, local)
elif (data.get('action') == 'unpin'):
result = self.dring.config.unpin_certificate(cert_id)
else:
return jsonify({
'status': 400,
'message': 'wrong action type'
})
return jsonify({
'status': 200,
'action': result
})
|
gpl-3.0
| -8,790,291,132,487,526,000
| 29.320513
| 80
| 0.587738
| false
| 4.056604
| false
| false
| false
|
AntonioMtn/NZBMegaSearch
|
werkzeug/contrib/testtools.py
|
1
|
2435
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.testtools
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements extended wrappers for simplified testing.
`TestResponse`
A response wrapper which adds various cached attributes for
simplified assertions on various content types.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from ..utils import cached_property, import_string
from ..wrappers import Response
from warnings import warn
warn(DeprecationWarning('werkzeug.contrib.testtools is deprecated and '
'will be removed with Werkzeug 1.0'))
class ContentAccessors(object):
"""
A mixin class for response objects that provides a couple of useful
accessors for unittesting.
"""
def xml(self):
"""Get an etree if possible."""
if 'xml' not in self.mimetype:
raise AttributeError(
'Not a XML response (Content-Type: %s)'
% self.mimetype)
for module in ['xml.etree.ElementTree', 'ElementTree',
'elementtree.ElementTree']:
etree = import_string(module, silent=True)
if etree is not None:
return etree.XML(self.body)
raise RuntimeError('You must have ElementTree installed '
'to use TestResponse.xml')
xml = cached_property(xml)
def lxml(self):
"""Get an lxml etree if possible."""
if ('html' not in self.mimetype and 'xml' not in self.mimetype):
raise AttributeError('Not an HTML/XML response')
from lxml import etree
try:
from lxml.html import fromstring
except ImportError:
fromstring = etree.HTML
if self.mimetype=='text/html':
return fromstring(self.data)
return etree.XML(self.data)
lxml = cached_property(lxml)
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.mimetype:
raise AttributeError('Not a JSON response')
try:
from simplejson import loads
except ImportError:
from json import loads
return loads(self.data)
json = cached_property(json)
class TestResponse(Response, ContentAccessors):
"""Pass this to `werkzeug.test.Client` for easier unittesting."""
|
gpl-2.0
| -8,992,439,196,236,989,000
| 33.295775
| 76
| 0.614374
| false
| 4.77451
| true
| false
| false
|
phako/Totem
|
src/plugins/iplayer/iplayer.py
|
1
|
10459
|
# -*- coding: utf-8 -*-
import gettext
from gi.repository import GObject, Peas, Totem # pylint: disable-msg=E0611
import iplayer2
import threading
gettext.textdomain ("totem")
D_ = gettext.dgettext
_ = gettext.gettext
class IplayerPlugin (GObject.Object, Peas.Activatable):
__gtype_name__ = 'IplayerPlugin'
object = GObject.property (type = GObject.Object)
def __init__ (self):
GObject.Object.__init__ (self)
self.debug = False
self.totem = None
self.programme_download_lock = threading.Lock ()
self.tv_feed = None
self.tv_tree_store = None
def do_activate (self):
self.totem = self.object
# Build the interface
builder = Totem.plugin_load_interface ("iplayer", "iplayer.ui", True,
self.totem.get_main_window (),
self)
container = builder.get_object ('iplayer_vbox')
self.tv_tree_store = builder.get_object ('iplayer_programme_store')
programme_list = builder.get_object ('iplayer_programme_list')
programme_list.connect ('row-expanded', self.__row_expanded_cb)
programme_list.connect ('row-activated', self.__row_activated_cb)
container.show_all ()
self.tv_feed = iplayer2.Feed ('tv')
# Add the interface to Totem's sidebar
self.totem.add_sidebar_page ("iplayer", _(u"BBC iPlayer"), container)
# Get the channel category listings
self._populate_channel_list (self.tv_feed, self.tv_tree_store)
def do_deactivate (self):
self.totem.remove_sidebar_page ("iplayer")
def _populate_channel_list (self, feed, tree_store):
if self.debug:
print "Populating channel list…"
# Add all the channels as top-level rows in the tree store
channels = feed.channels ()
for channel_id, title in channels.items ():
tree_store.append (None, (title, channel_id, None))
# Add the channels' categories in a thread, since they each require a
# network request
thread = PopulateChannelsThread (self.__populate_channel_list_cb,
feed, tree_store)
thread.start ()
def __populate_channel_list_cb (self, tree_store, parent_path, values):
# Callback from PopulateChannelsThread to add stuff to the tree store
if values == None:
self.totem.action_error (_(u'Error listing channel categories'),
_(u'There was an unknown error getting '\
'the list of television channels '\
'available on BBC iPlayer.'))
return False
parent_iter = tree_store.get_iter (parent_path)
category_iter = tree_store.append (parent_iter, values)
# Append a dummy child row so that the expander's visible; we can
# then queue off the expander to load the programme listing for this
# category
tree_store.append (category_iter, [_(u'Loading…'), None, None])
return False
def __row_expanded_cb (self, tree_view, row_iter, path):
tree_model = tree_view.get_model ()
if self.debug:
print "__row_expanded_cb called."
# We only care about the category level (level 1), and only when
# it has the "Loading..." placeholder child row
if (get_iter_level (tree_model, row_iter) != 1 or
tree_model.iter_n_children (row_iter) != 1):
return
# Populate it with programmes asynchronously
self._populate_programme_list (self.tv_feed, tree_model, row_iter)
def __row_activated_cb (self, tree_view, path, view_column):
tree_store = tree_view.get_model ()
tree_iter = tree_store.get_iter (path)
if tree_iter == None:
return
mrl = tree_store.get_value (tree_iter, 2)
# Only allow programme rows to be activated, not channel or category
# rows
if mrl == None:
return
# Add the programme to the playlist and play it
title = tree_store.get_value (tree_iter, 0)
self.totem.add_to_playlist_and_play (mrl, title, True)
def _populate_programme_list (self, feed, tree_store, category_iter):
if self.debug:
print "Populating programme list…"
category_path = tree_store.get_path (category_iter)
thread = PopulateProgrammesThread (self.programme_download_lock,
self.__populate_programme_list_cb,
feed, (tree_store, category_path))
thread.start ()
def __populate_programme_list_cb (self, tree_store, category_path, values,
remove_placeholder):
# Callback from PopulateProgrammesThread to add stuff to the tree store
if values == None:
# Translators: the "programme feed" is the list of TV shows
# available to watch online
self.totem.action_error (_(u'Error getting programme feed'),
_(u'There was an error getting the list '\
'of programmes for this channel and '\
'category combination.'))
return False
category_iter = tree_store.get_iter (category_path)
if category_iter != None:
tree_store.append (category_iter, values)
# Remove the placeholder row
children = tree_store.iter_children (category_iter)
if remove_placeholder and children != None:
tree_store.remove (children)
return False
def get_iter_level (tree_model, tree_iter):
i = 0
while True:
tree_iter = tree_model.iter_parent (tree_iter)
if tree_iter == None:
break
i += 1
return i
def category_name_to_id (category_name):
return category_name.lower ().replace (' ', '_').replace ('&', 'and')
class PopulateChannelsThread (threading.Thread):
# Class to populate the channel list from the Internet
def __init__ (self, callback, feed, tree_model):
self.callback = callback
self.feed = feed
self.tree_model = tree_model
threading.Thread.__init__ (self)
def run (self):
shown_error = False
tree_iter = self.tree_model.get_iter_first ()
while (tree_iter != None):
channel_id = self.tree_model.get_value (tree_iter, 1)
parent_path = self.tree_model.get_path (tree_iter)
try:
# Add this channel's categories as sub-rows
# We have to pass a path because the model could theoretically
# be modified while the idle function is waiting in the queue,
# invalidating an iter
for name, _count in self.feed.get (channel_id).categories ():
category_id = category_name_to_id (name)
GObject.idle_add (self.callback,
self.tree_model, parent_path,
[name, category_id, None])
except StandardError:
# Only show the error once, rather than for each channel
# (it gets a bit grating)
if not shown_error:
GObject.idle_add (self.callback,
self.tree_model, parent_path, None)
shown_error = True
tree_iter = self.tree_model.iter_next (tree_iter)
class PopulateProgrammesThread (threading.Thread):
# Class to populate the programme list for a channel/category combination
# from the Internet
def __init__ (self, download_lock, callback, feed,
(tree_model, category_path)):
self.download_lock = download_lock
self.callback = callback
self.feed = feed
self.tree_model = tree_model
self.category_path = category_path
threading.Thread.__init__ (self)
def run (self):
self.download_lock.acquire ()
category_iter = self.tree_model.get_iter (self.category_path)
if category_iter == None:
GObject.idle_add (self.callback,
self.tree_model, self.category_path, None, False)
self.download_lock.release ()
return
category_id = self.tree_model.get_value (category_iter, 1)
parent_iter = self.tree_model.iter_parent (category_iter)
channel_id = self.tree_model.get_value (parent_iter, 1)
# Retrieve the programmes and return them
feed = self.feed.get (channel_id).get (category_id)
if feed == None:
GObject.idle_add (self.callback,
self.tree_model, self.category_path, None, False)
self.download_lock.release ()
return
# Get the programmes
try:
programmes = feed.list ()
except StandardError:
GObject.idle_add (self.callback,
self.tree_model, self.category_path, None, False)
self.download_lock.release ()
return
# Add the programmes to the tree store
remove_placeholder = True
for programme in programmes:
programme_item = programme.programme
# Get the media, which gives the stream URI.
# We go for mobile quality, since the higher-quality streams are
# RTMP-only which isn't currently supported by GStreamer or xine
# TODO: Use higher-quality streams once
# http://bugzilla.gnome.org/show_bug.cgi?id=566604 is fixed
media = programme_item.get_media_for ('mobile')
if media == None:
# Not worth displaying an error in the interface for this
print "Programme has no HTTP streams"
continue
GObject.idle_add (self.callback,
self.tree_model, self.category_path,
[programme.get_title (), programme.get_summary (),
media.url],
remove_placeholder)
remove_placeholder = False
self.download_lock.release ()
|
gpl-2.0
| 8,621,740,412,142,376,000
| 38.296992
| 80
| 0.568736
| false
| 4.303417
| false
| false
| false
|
NERC-CEH/ecomaps
|
ecomaps/services/analysis.py
|
1
|
11358
|
import datetime
from random import randint
from sqlalchemy.orm import subqueryload, subqueryload_all, aliased, contains_eager, joinedload
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import Alias, or_, asc, desc
from ecomaps.model import Dataset, Analysis, AnalysisCoverageDatasetColumn
from ecomaps.services.general import DatabaseService
from ecomaps.model import AnalysisCoverageDataset
import urllib2
__author__ = 'Phil Jenkins (Tessella)'
class AnalysisService(DatabaseService):
"""Provides operations on Analysis objects"""
def get_analyses_for_user(self, user_id):
"""Gets a list of analyses for the given user
Params:
user_id: Get analyses for the user with this ID
"""
with self.readonly_scope() as session:
return session.query(Analysis) \
.options(subqueryload(Analysis.point_dataset)) \
.options(subqueryload(Analysis.coverage_datasets)) \
.options(subqueryload(Analysis.run_by_user)) \
.filter(or_(Analysis.viewable_by == user_id, Analysis.run_by == user_id),
Analysis.deleted != True) \
.all()
def get_public_analyses(self):
"""Gets all analyses that are classed as 'public' i.e. they
aren't restricted to a particular user account"""
with self.readonly_scope() as session:
return session.query(Analysis) \
.options(subqueryload(Analysis.point_dataset)) \
.options(subqueryload(Analysis.coverage_datasets)) \
.options(subqueryload(Analysis.run_by_user)) \
.filter(Analysis.viewable_by == None,
Analysis.deleted != True) \
.all()
def publish_analysis(self, analysis_id):
"""Publishes the analysis with the supplied ID
Params:
analysis_id: ID of the analysis to publish
"""
with self.transaction_scope() as session:
try:
analysis = session.query(Analysis).filter(Analysis.id == analysis_id,
Analysis.deleted != True).one()
except NoResultFound:
return None
# Now update the "viewable by" field - setting to None
# infers that the analysis is published
analysis.viewable_by = None
analysis.result_dataset.viewable_by_user_id = None
def get_analysis_by_id(self, analysis_id, user_id):
"""Returns a single analysis with the given ID
Params:
analysis_id - ID of the analysis to look for
"""
with self.readonly_scope() as session:
try:
return session.query(Analysis)\
.options(joinedload(Analysis.run_by_user)) \
.filter(Analysis.id == analysis_id,
or_(or_(Analysis.viewable_by == user_id,
Analysis.viewable_by == None),
Analysis.run_by == user_id),
Analysis.deleted != True).one()
except NoResultFound:
return None
def create(self, name, point_dataset_id, coverage_dataset_ids,
user_id, unit_of_time, random_group, model_variable,
data_type, model_id, analysis_description,input_hash,
time_indicies):
"""Creates a new analysis object
Params:
name - Friendly name for the analysis
point_dataset_id - Id of dataset containing point data
coverage_dataset_ids - List of coverage dataset ids, which should be
in the format <id>_<column_name>
user_id - Who is creating this analysis?
unit_of_time - unit of time selected
random_group - additional input into the model
model_variable - the variable that is being modelled
data_type - data type of the variable
model_id - id of the model to be used to generate the results
analysis_description - a short string describing the analysis
input_hash - used to quickly identify a duplicate analysis in terms of inputs
time_indicies - if any columns in coverage datasets need time slicing, the index (i.e. the time slice)
to take will be stored against each relevant column in here
Returns:
ID of newly-inserted analysis
"""
with self.transaction_scope() as session:
analysis = Analysis()
analysis.name = name
analysis.run_by = user_id
analysis.viewable_by = user_id
analysis.point_data_dataset_id = int(point_dataset_id)
analysis.deleted = False
analysis.model_id = model_id
analysis.description = analysis_description
# Hook up the coverage datasets
for id in coverage_dataset_ids:
coverage_ds = AnalysisCoverageDataset()
# The coverage dataset 'ID' is actually a
# composite in the form <id>_<column-name>
ds_id, column_name = id.split('_')
id_as_int = int(ds_id)
coverage_ds.dataset_id = id_as_int
col = AnalysisCoverageDatasetColumn()
# Check to see if we need to record a time index against
# this column, will be used for time-slicing later
if id in time_indicies:
col.time_index = time_indicies[id]
col.column = column_name
coverage_ds.columns.append(col)
analysis.coverage_datasets.append(coverage_ds)
# Parameters that are used in the analysis
analysis.unit_of_time = unit_of_time
analysis.random_group = random_group
analysis.model_variable = model_variable
analysis.data_type = data_type
# Hash of input values for future comparison
analysis.input_hash = input_hash
session.add(analysis)
# Flush and refresh to give us the generated ID for this new analysis
session.flush([analysis])
session.refresh(analysis)
return analysis.id
def get_netcdf_file(self, url):
''' Gets the file with results data in
'''
file_name = url + ".dods"
return urllib2.urlopen(file_name)
def get_analysis_for_result_dataset(self, dataset_id):
""" Gets the analysis ID with the given result dataset ID
@param dataset_id: ID of the (result) dataset contained within the analysis
"""
with self.readonly_scope() as session:
return session.query(Analysis.id).filter(Analysis.result_dataset_id == dataset_id,
Analysis.deleted != True).one()[0]
def sort_and_filter_private_analyses_by_column(self,user_id,column,order, filter_variable):
"""Sorts the private analyses by the column name, and applies a filter on the model variable value selected
Params:
user_id: unique id of the user
column: The name of the column to sort on
order: either "asc" or "desc"
filter_variable: the model_variable value used to filter the analyses
"""
with self.readonly_scope() as session:
query = session.query(Analysis) \
.options(subqueryload(Analysis.point_dataset)) \
.options(subqueryload(Analysis.coverage_datasets)) \
.options(subqueryload(Analysis.run_by_user)) \
.filter(or_(Analysis.viewable_by == user_id, Analysis.run_by == user_id),
Analysis.deleted != True)
if filter_variable:
query = query.filter(Analysis.model_variable == filter_variable)
if order == "asc":
return query.order_by(asc(column)).all()
elif order == "desc":
return query.order_by(desc(column)).all()
else:
return query.all()
def sort_and_filter_public_analyses_by_column(self,column, order, filter_variable):
"""Sorts the public analyses by the column name and applies a filter on the model variable value selected
Params:
column: The name of the column to sort on
order: either "asc" or "desc"
filter_variable: the model_variable value used to filter the analyses
"""
with self.readonly_scope() as session:
query = session.query(Analysis) \
.options(subqueryload(Analysis.point_dataset)) \
.options(subqueryload(Analysis.coverage_datasets)) \
.options(subqueryload(Analysis.run_by_user)) \
.filter(Analysis.viewable_by == None,
Analysis.deleted != True)
if filter_variable:
query = query.filter(Analysis.model_variable == filter_variable)
if order == "asc":
return query.order_by(asc(column)).all()
elif order == "desc":
return query.order_by(desc(column)).all()
else:
return query.all()
def get_public_analyses_with_identical_input(self, input_hash):
""" Gets a list of published analyses matching the input hash
@param: Hash of input values used to determine whether an analysis has been run before
"""
with self.readonly_scope() as session:
try:
# Only pull out public analyses for now
return session.query(Analysis) \
.filter(Analysis.input_hash == input_hash,
Analysis.viewable_by == None,
Analysis.deleted != True) \
.all()[0]
except:
return None
def delete_private_analysis(self, analysis_id):
"""Deletion is only a 'soft' delete i.e. a flag will be set so that the analysis is not viewable by the user.
This is so that if the user wants to recover the analysis, the can be reversed.
Params
analysis_id = id of the analysis to delete
"""
with self.transaction_scope() as session:
analysis = session.query(Analysis).filter(Analysis.id == analysis_id,
Analysis.deleted != True).one()
analysis.deleted = True
def get_all_model_variables(self):
"""Return all the distinct model variable values across all the analyses
"""
with self.readonly_scope() as session:
try:
return session.query(Analysis.model_variable).distinct()
except NoResultFound:
return None
|
gpl-2.0
| 3,181,348,859,315,533,300
| 39.137809
| 118
| 0.558549
| false
| 4.839369
| false
| false
| false
|
kamimura/py-convert-temperatures
|
converter.py
|
1
|
1936
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
# added a new temperature scale, two if statements need to add
def convert_to_celsius(t, source):
if source == "Kelvin":
return t - 273.15
elif source == "Celsius":
return t
elif source == "Fahrenheit":
return (t - 32) * 5 / 9
elif source == "Rankine":
return (t - 491.67) * 5 / 9
elif source == "Delisle":
return 100 - t * 2 / 3
elif source == "Newton":
return t * 100 / 33
elif source == "Reaumur":
return t * 5 / 4
elif source == "Romer":
return (t - 7.5) * 40 / 21
else:
raise Exception("convert_to_celsius: {0}".format(source))
def convert_from_celsius(t, target):
if target == "Kelvin":
return t + 273.15
elif target == "Celsius":
return t
elif target == "Fahrenheit":
return t * 9 / 5 + 32
elif target == "Rankine":
return (t + 273.15) * 9 / 5
elif target == "Delisle":
return (100 - t) * 3 / 2
elif target == "Newton":
return t * 33 / 100
elif target == "Reaumur":
return t * 4 / 5
elif target == "Romer":
return t * 21 / 40 + 7.5
else:
raise Exception("convert_from_celsius: {0}".format(target))
def convert_temperatures(t, source, target):
return convert_from_celsius(convert_to_celsius(t, source), target)
if __name__ == '__main__':
units = ["Kelvin", "Celsius", "Fahrenheit", "Rankine", "Delisle","Newton",
"Reaumur", "Romer"]
# http://en.wikipedia.org/wiki/Comparison_of_temperature_scales#Comparison_of_temperature_scales
print("Absolute zero")
for target in units:
print("{0}: {1:.2f}".format(
target, convert_temperatures(0, "Kelvin", target)))
print("Ice melts")
for target in units:
print("{0}: {1:.2f}".format(
target, convert_temperatures(32, "Fahrenheit", target)))
|
mit
| 1,126,159,050,346,238,600
| 30.737705
| 100
| 0.558884
| false
| 3.17377
| false
| false
| false
|
dubourg/openturns
|
python/test/t_Gumbel_std.py
|
1
|
5074
|
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
# Instanciate one distribution object
distribution = Gumbel(2.0, -0.5)
print("Distribution ", repr(distribution))
print("Distribution ", distribution)
# Is this distribution elliptical ?
print("Elliptical = ", distribution.isElliptical())
# Is this distribution continuous ?
print("Continuous = ", distribution.isContinuous())
# Test for realization of distribution
oneRealization = distribution.getRealization()
print("oneRealization=", repr(oneRealization))
# Test for sampling
size = 10000
oneSample = distribution.getSample(size)
print("oneSample first=", repr(oneSample[0]), " last=", repr(oneSample[1]))
print("mean=", repr(oneSample.computeMean()))
print("covariance=", repr(oneSample.computeCovariance()))
# Define a point
point = NumericalPoint(distribution.getDimension(), 1.0)
print("Point= ", repr(point))
# Show PDF and CDF at point
eps = 1e-5
# derivative of PDF with regards its arguments
DDF = distribution.computeDDF(point)
print("ddf =", repr(DDF))
# by the finite difference technique
print("ddf (FD)=", repr(NumericalPoint(1, (distribution.computePDF(
point + NumericalPoint(1, eps)) - distribution.computePDF(point + NumericalPoint(1, -eps))) / (2.0 * eps))))
# PDF value
LPDF = distribution.computeLogPDF(point)
print("log pdf=%.6f" % LPDF)
PDF = distribution.computePDF(point)
print("pdf =%.6f" % PDF)
# by the finite difference technique from CDF
print("pdf (FD)=%.6f" % ((distribution.computeCDF(point + NumericalPoint(1, eps)) -
distribution.computeCDF(point + NumericalPoint(1, -eps))) / (2.0 * eps)))
# derivative of the PDF with regards the parameters of the distribution
CDF = distribution.computeCDF(point)
print("cdf=%.6f" % CDF)
CCDF = distribution.computeComplementaryCDF(point)
print("ccdf=%.6f" % CCDF)
PDFgr = distribution.computePDFGradient(point)
print("pdf gradient =", repr(PDFgr))
# by the finite difference technique
PDFgrFD = NumericalPoint(2)
PDFgrFD[0] = (Gumbel(distribution.getAlpha() + eps, distribution.getBeta()).computePDF(point)
- Gumbel(distribution.getAlpha() - eps, distribution.getBeta()).computePDF(point)) / (2.0 * eps)
PDFgrFD[1] = (Gumbel(distribution.getAlpha(), distribution.getBeta() + eps).computePDF(point)
- Gumbel(distribution.getAlpha(), distribution.getBeta() - eps).computePDF(point)) / (2.0 * eps)
print("pdf gradient (FD)=", repr(PDFgrFD))
# derivative of the PDF with regards the parameters of the distribution
CDFgr = distribution.computeCDFGradient(point)
print("cdf gradient =", repr(CDFgr))
# by the finite difference technique
CDFgrFD = NumericalPoint(2)
CDFgrFD[0] = (Gumbel(distribution.getAlpha() + eps, distribution.getBeta()).computeCDF(point)
- Gumbel(distribution.getAlpha() - eps, distribution.getBeta()).computeCDF(point)) / (2.0 * eps)
CDFgrFD[1] = (Gumbel(distribution.getAlpha(), distribution.getBeta() + eps).computeCDF(point)
- Gumbel(distribution.getAlpha(), distribution.getBeta() - eps).computeCDF(point)) / (2.0 * eps)
print("cdf gradient (FD)=", repr(CDFgrFD))
# quantile
quantile = distribution.computeQuantile(0.95)
print("quantile=", repr(quantile))
print("cdf(quantile)=%.6f" % distribution.computeCDF(quantile))
mean = distribution.getMean()
print("mean=", repr(mean))
standardDeviation = distribution.getStandardDeviation()
print("standard deviation=", repr(standardDeviation))
skewness = distribution.getSkewness()
print("skewness=", repr(skewness))
kurtosis = distribution.getKurtosis()
print("kurtosis=", repr(kurtosis))
covariance = distribution.getCovariance()
print("covariance=", repr(covariance))
parameters = distribution.getParametersCollection()
print("parameters=", repr(parameters))
for i in range(6):
print("standard moment n=", i, " value=",
distribution.getStandardMoment(i))
print("Standard representative=", distribution.getStandardRepresentative())
# Specific to this distribution
mu = distribution.getMu()
print("mu=%.6f" % mu)
sigma = distribution.getSigma()
print("sigma=%.6f" % sigma)
newDistribution = Gumbel(mu, sigma, Gumbel.MUSIGMA)
print("alpha from (mu, sigma)=%.6f" % newDistribution.getAlpha())
print("beta from (mu, sigma)=%.6f" % newDistribution.getBeta())
a = distribution.getA()
print("a=%.6f" % a)
b = distribution.getB()
print("b=%.6f" % b)
newDistribution = Gumbel(a, b, Gumbel.AB)
print("alpha from (a, b)=%.6f" % newDistribution.getAlpha())
print("beta from (a, b)=%.6f" % newDistribution.getBeta())
except:
import sys
print("t_Gumbel.py", sys.exc_info()[0], sys.exc_info()[1])
|
gpl-3.0
| -9,043,513,947,748,732,000
| 41.283333
| 116
| 0.665353
| false
| 3.616536
| false
| false
| false
|
3dfxsoftware/cbss-addons
|
account_bank_statement_vauxoo/model/account_journal.py
|
1
|
5195
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# "Nhomar Hernandez <nhomar@vauxoo.com>"
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.netsvc as netsvc
import logging
class account_journal(osv.Model):
_inherit = 'account.journal'
_columns = {
'default_interim_account_id': fields.many2one('account.account',
'Interim Account',
help="""In banks you probably want send account move
lines to a interim account before affect the default
debit and credit account who
will have the booked
balance for this kind of operations, in this field
you configure this account."""),
'default_income_account_id': fields.many2one('account.account',
'Extra Income Account',
help="""In banks you probably want as counter part for extra
banking income money use an specific account in this field
you can canfigure this account"""),
'default_expense_account_id': fields.many2one('account.account',
'Expense Account',
help="""In banks you probable wants send account move lines to an
extra account to be able to record account move lines due to bank
comisions and bank debit notes, in this field you configure this
account."""),
'concept_ids': fields.one2many('account.journal.bs.config', 'bsl_id',
'Concept Lines', required=False),
'moveper_line': fields.boolean('One Move per Line', required=False,
help="""Do you want one move per line or one move per bank
statement,True: One Per Line False:
One Per bank statement"""),
}
class account_journal_bs_config(osv.Model):
_name = 'account.journal.bs.config'
_order = 'sequence asc'
logger = netsvc.Logger()
_columns = {
'sequence': fields.integer('Label'),
'bsl_id': fields.many2one('account.journal', 'Journal',
required=False),
'partner_id': fields.many2one('res.partner', 'Partner',
required=False),
'account_id': fields.many2one('account.account', 'Account',
required=False),
'expresion': fields.char('Text To be Compared', size=128,
required=True, readonly=False),
'name': fields.char('Cancept Label', size=128, required=True,
readonly=False),
}
_defaults = {
'sequence': 10,
}
def _check_expresion(self, cr, user, ids, context=None):
"""
A user defined constraints listed in {_constraints}
@param cr: cursor to database
@param user: id of current user
@param ids: list of record ids on which constraints executes
@return: return True if all constraints satisfied, False otherwise
"""
try:
exp_ = self.browse(cr, user, ids, context=context)[0].expresion
exp = eval(exp_)
self.logger.notifyChannel('Chain. '+str(exp), netsvc.LOG_DEBUG,
'Succefully Validated')
if type(exp) is list:
return True
else:
self.logger.notifyChannel(
'Chain. '+str(exp_), netsvc.LOG_ERROR,
'Fail With You must use a list')
return False
except Exception, var:
self.logger.notifyChannel('Chain. '+str(exp_), netsvc.LOG_ERROR,
'Fail With %s' % var)
return False
_constraints = [
(_check_expresion, '''Error: La expresion no es lista
debe quedar algo así:
["cadenaA","cadenaB","CadenaC"]
o es inválida''', ['expresion']),
]
account_journal_bs_config
|
gpl-2.0
| 8,968,663,386,985,846,000
| 44.156522
| 119
| 0.536491
| false
| 4.695298
| true
| false
| false
|
Kaftanov/Cchat
|
chat-server/server.py
|
1
|
7250
|
#!/usr/bin/env python3
"""
#############################
Server application || TCP, socket
version python: python3
#############################
"""
import select
import signal
import socket
import sys
import uuid
import datetime
from communication import send, receive
from messages import Messages
from dbworker import DbHandler
from cmdworker import Commands
class Server:
"""
object that are contained in the
listen_count: max listening ports
serv_host: location server in net
serv_port: server's port
command_list: special server command for user
contain: ['/online', /info, ]
command_string: string which contain command
user_list: list of output client address
user_dict: embedded dict which look like: {'sid_value': {
'login': .., 'first_name': .., 'second_name': .., 'password': ..,
'hostname':..}, ..}
socket_sid_dict: contain session id value (sid_value) and linking with socket
functions Server contain
__init__
info: initialize socket
variable: listen_count, serv_host, serv_port
sighandler
info: shutting down server and closing all sockets
variable: without variable
serve
info: main server's loop
variable: without variable
exec_commands
info: execute commands from 'command_list'
variable: command_string
validation_user
info: checking if user's password is valid
variable: dict with key ['password']
broadcast_message
info: sending message on all socket, which contain in self.user_list
variable: string text
get_sid
info: get session id from socket dict
variable: socket
"""
def __init__(self, listen_count=None, serv_host=None, serv_port=None):
if listen_count is None:
listen_count = 5
if serv_host is None:
serv_host = 'localhost'
if serv_port is None:
serv_port = 3490
# set server messages worker
self.MsgWorker = Messages(host=serv_host, port=serv_port, backlog=listen_count)
# set data base worker
self.DbWorker = DbHandler()
# set command worker
self.CmdWorker = Commands()
self.uid_link = {}
self.user_list = []
self.server_password = 'qwerty'
# initialize server socket
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((serv_host, serv_port))
self.server.listen(listen_count)
print(self.MsgWorker.welcome_string())
# set signal handler
signal.signal(signal.SIGINT, self.sighandler)
def sighandler(self, signum, frame):
""" Shutdown the server if typing Ctrl + C """
for sock in self.user_list:
sock.close()
self.server.close()
sys.exit('Shutting down server...')
def generate_uid(self, login):
uid = self.DbWorker.get_uid_by_login(login)
return uid if uid else str(uuid.uuid4())
def authenticate_user(self, data):
try:
login = data['login']
password = data['password']
uid = self.generate_uid(login)
if data['type'] == 'log':
if password == self.DbWorker.get_passwd_by_login(login):
self.DbWorker.update_state(uid=uid, state=1, date='NULL')
else:
return False,
elif data['type'] == 'reg':
user_form = {'uid': uid, 'login': login, 'password': password,
'state': 1, 'left': 'NULL'}
self.DbWorker.add_user(user_form)
else:
return False,
message = self.MsgWorker.print_new_user(login)
return True, uid, message
except KeyError as error:
print(error)
return False,
def broadcast_message(self, message, sockt=None):
""" Broadcast messages for all users"""
if sockt is None:
for sock in self.user_list:
send(sock, message)
else:
for sock in self.user_list:
if sock is not sockt:
send(sock, message)
def run_server_loop(self):
input_socket_list = [self.server]
is_running = True
while is_running:
try:
in_fds, out_fds, err_fds = select.select(input_socket_list,
self.user_list, [])
except select.error as error:
print(error)
break
except socket.error as error:
print(error)
break
for sock in in_fds:
if sock is self.server:
user, user_address = self.server.accept()
data = receive(user)
request = self.authenticate_user(data)
if request[0]:
message = request[2]
self.broadcast_message(message)
self.uid_link[user] = request[1]
input_socket_list.append(user)
self.user_list.append(user)
send(user, 'Success')
else:
send(user, 'Error')
continue
else:
try:
data = receive(sock)
if data:
print(data)
if data in self.CmdWorker.command_list:
send(sock, self.CmdWorker.execute_commands(data))
else:
user = self.DbWorker.get_user(self.uid_link[sock])['login']
head = '%s~%s' % (user, self.MsgWorker.time())
message = data
self.broadcast_message({'head': head, 'message': message}, sock)
else:
time = self.CmdWorker.time()
self.DbWorker.update_state(self.uid_link[sock], 0, time)
sock.close()
input_socket_list.remove(sock)
self.user_list.remove(sock)
message = self.MsgWorker.print_user_left(self.DbWorker.get_user(
self.uid_link[sock])['login'])
self.broadcast_message(message)
except socket.error as error:
print(error)
input_socket_list.remove(sock)
self.user_list.remove(sock)
self.server.close()
if __name__ == "__main__":
Server().run_server_loop()
|
gpl-3.0
| -5,659,754,009,381,891,000
| 36.371134
| 96
| 0.495448
| false
| 4.754098
| false
| false
| false
|
kcolletripp/popular
|
search/views.py
|
1
|
1297
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse
from django.views import generic
from .models import Target
from .forms import TargetForm
import utils
# Create your views here.
#class IndexView(generic.ListView):
# template_name = 'search/index.html'
# context_object_name = 'latest_searches'
#
# def get_queryset(self):
# #Return the last 5 published questions. (not including future releases)
# return Target.objects.all().order_by('-target_name')[:5]
#class ResultsView(generic.DetailView):
# model = Target
# template_name = 'search/result.html'
def index(request):
latest_searches = Target.objects.all().order_by('-target_name')[:5]
form = TargetForm()
context = {'latest_searches': latest_searches, 'form':form} #dictionary
return render(request, 'search/index.html', context)
def result(request):
context = {}
if request.method == 'POST':
form = TargetForm(request.POST)
target = form.save()
context['target'] = target #'key':value
context['views'] = utils.get_wiki(target)
else:
#GET, or first time request
form = TargetForm()
context['form'] = form
return render(request, 'search/result.html', context)
|
gpl-3.0
| 1,606,601,490,411,531,800
| 31.425
| 80
| 0.680031
| false
| 3.716332
| false
| false
| false
|
yastrov/battleship
|
battleship/serverfactory.py
|
1
|
1999
|
# coding=utf-8
"""
This factory creates BattleshipProtocol() instances for every
connected users. If number of connected users reach the value max_clients then this factory creates instance
of ErrorBattleshipProtocol() for new users.
Also this class, on own initialization, creates instance of the class Battleship which is a game main loop.
See method buildProtocol() in current class and method initClient() in class Battleship().
"""
from twisted.internet.protocol import ServerFactory
from protocol import BattleshipProtocol
from service import Battleship
from errorprotocol import ErrorBattleshipProtocol
from twisted.python import log
class BattleshipServerFactory(ServerFactory):
"""
Battleship server factory. Process incoming client requests
"""
protocol = BattleshipProtocol
def __init__(self, max_clients, service):
"""
Battleship server factory constructor
"""
log.msg('Battleship server initialized')
# parameters
self.battleship_service = Battleship(max_clients)
self.service = service
def buildProtocol(self, addr):
"""
This method is calling when new client connected
Create new protocol BattleshipProtocol if clients < max_clients
or
send error to client, if clients >= max_clients
"""
if len(self.battleship_service.clients) < self.battleship_service.max_clients:
p = self.protocol()
p.factory = self
p = self.battleship_service.initClient(p, addr)
log.msg('class BattleshipServerFactory, method buildProtocol: protocol was built')
return p
else:
"""
If count of players more then self.max_clients then close connections for all new clients
"""
p = ErrorBattleshipProtocol()
p.factory = self
return p
|
apache-2.0
| 6,821,497,901,575,590,000
| 34.381818
| 112
| 0.646823
| false
| 4.840194
| false
| false
| false
|
marshallmcdonnell/journals
|
journals/databases/icat/sns/icat.py
|
1
|
8206
|
#!/usr/bin/env python
#import flask
from __future__ import print_function
import requests
import xmljson
import json
import lxml
import decimal
import pandas
from journals.utilities import process_numbers
#uri = "http://icat.sns.gov:2080/icat-rest-ws/experiment/SNS"
#uri = "http://icat.sns.gov:2080/icat-rest-ws/experiment/SNS/NOM"
#uri = "http://icat.sns.gov:2080/icat-rest-ws/experiment/SNS/NOM/IPTS-"+ipts+"/meta"
class ICAT(object):
def __init__(self, instrument):
self._base_uri = "http://icat.sns.gov:2080/icat-rest-ws"
self._ipts_uri = self._base_uri + "/experiment/SNS/"+instrument
self._run_uri = self._base_uri + "/dataset/SNS/"+instrument
self._data = None
self._los_data = dict()
self._meta_ipts_data = dict()
self._runs = list()
self._ipts_list = list()
self.key_list = ['ipts', 'duration', 'startTime', 'totalCounts', 'protonCharge', 'title']
# Unit Functions
#---------------
def _uri2xml(self,uri):
xml_data = requests.get(uri)
xml_data = lxml.etree.XML(xml_data.content)
return xml_data
def _xml2json(self,xml_data):
return xmljson.badgerfish.data(xml_data)
def _uri2xml2json(self,uri):
xml_data = self._uri2xml(uri)
json_data = self._xml2json(xml_data)
return json_data
def _get_list_of_all_ipts(self):
uri = self._ipts_uri
json_data = self._uri2xml2json(uri)
for x in json_data['proposals']['proposal']:
if isinstance(x['$'], str):
if x['$'].startswith('IPTS'):
self._ipts_list.append(int(x['$'].split('-')[1].split('.')[0]))
def _get_xml_data_tree(self,data):
xml_tree = lxml.etree.tostring(self.data, pretty_print=True)
return xml_tree
def _get_runs_from_ipts(self,data):
return [ element.get('id') for element in data.iter() if element.tag == 'run' ]
def _get_los_for_run(self,run,json_data):
json_metadata = json_data['metadata']
try:
ipts_pulled = json_metadata['proposal']['$'].split('-')[1]
except:
ipts_pulled = None
los_data = dict()
uid = run
meta_dict = self._get_meta_for_run(json_metadata)
meta_dict['ipts'] = ipts_pulled
los_data[uid] = meta_dict
self._update_master_los(los_data)
'''
NOTE: Below, the check for list is specific to IPTSs w/ proposal lists. These are:
Index IPTS
----- ----
88 8814
119 9818
'''
def _get_meta_for_ipts(self,runs,proposal_json):
if type(proposal_json) == list:
ipts_pulled = int(proposal_json[0]['@id'].split('-')[1])
runs_data = process_numbers(proposal_json[0]['runRange']['$'])
for i, proposal in enumerate(proposal_json[1:]):
runs_data += process_numbers(proposal_json[0]['runRange']['$'])
startTime = [(':'.join( proposal_json[0]['createTime']['$'].split(':')[0:3])).split('.')[0]]
for i, proposal in enumerate(proposal_json[1:]):
startTime += [(':'.join( proposal_json[i+1]['createTime']['$'].split(':')[0:3])).split('.')[0]]
else:
ipts_pulled = int(proposal_json['@id'].split('-')[1])
runs_data = process_numbers(proposal_json['runRange']['$'])
startTime = [(':'.join( proposal_json['createTime']['$'].split(':')[0:3])).split('.')[0]]
meta_ipts_data = dict()
meta_ipts_data[ipts_pulled] = {'runs' : runs_data,
'createtime' : startTime}
self._update_master_meta_ipts_data(meta_ipts_data)
def _update_master_meta_ipts_data(self,meta_ipts_data):
self._meta_ipts_data.update(meta_ipts_data)
def _get_los_for_ipts(self,runs,proposal_json):
if type(proposal_json) == list:
ipts_pulled = int(proposal_json[0]['@id'].split('-')[1])
runs_data = proposal_json[0]['runs']['run']
for i, proposal in enumerate(proposal_json[1:]):
runs_data += proposal_json[i+1]['runs']['run']
else:
ipts_pulled = int(proposal_json['@id'].split('-')[1])
runs_data = proposal_json['runs']['run']
los_data = dict()
if len(runs) == 1:
uid = proposal_json['runs']['run']['@id']
x = proposal_json['runs']['run']
meta_dict = self._get_meta_for_run(x)
meta_dict['ipts'] = ipts_pulled
los_data[uid] = meta_dict
else:
for x in runs_data:
uid = x['@id']
meta_dict = self._get_meta_for_run(x)
meta_dict['ipts'] = ipts_pulled
los_data[uid] = meta_dict
self._update_master_los(los_data)
def _update_master_los(self,los_data):
self._los_data.update(los_data)
def _get_meta_for_run(self,metadata):
meta = dict.fromkeys(self.key_list)
print(meta)
for key in self.key_list:
if key in metadata:
if key == 'duration':
meta[key] = str(int(float(metadata[key]['$'])/60.))+'min'
elif key == 'startTime':
meta[key] = (':'.join( metadata[key]['$'].split(':')[0:3])).split('.')[0]
elif key == 'totalCounts':
meta[key] = '{:.2E}'.format(decimal.Decimal(metadata[key]['$']))
elif key == 'protonCharge':
meta[key] = float("{0:.2f}".format(metadata[key]['$'] / 1e12) )
else:
meta[key] = metadata[key]['$']
return meta
# Main Functions
#------------------
def initializeMetaIptsData(self):
ipts_list = self.getListOfIPTS()
self.getIPTSs( ipts_list[-2:], data='meta')
def getMetaIptsData(self):
return self._meta_ipts_data
def applyIptsFilter(self,ipts_list):
self.reset_los()
self.getIPTSs(ipts_list)
def getDataFrame(self):
data = self.get_los()
df = pandas.DataFrame.from_dict(data,orient='index')
df = df.reset_index()
df = df.rename(columns={'index': '#Scan', 'duration': 'time', 'protonCharge': 'PC/pC'})
col_order = ['#Scan', 'ipts', 'time', 'startTime', 'totalCounts', 'PC/pC', 'title']
df = df[col_order]
return df
def getListOfIPTS(self):
if not self._ipts_list:
self._get_list_of_all_ipts()
return sorted(self._ipts_list)
def getIPTSs(self,proposals,**kwargs):
for i, ipts in enumerate(proposals):
self.getIPTS(ipts,**kwargs)
def getIPTS(self,ipts,data='all'):
uri = self._ipts_uri + "/IPTS-"+str(ipts)+"/"+data
xml_data = self._uri2xml(uri)
runs = self._get_runs_from_ipts(xml_data)
json_data = self._xml2json(xml_data)
if data == 'all':
try:
self._get_los_for_ipts(runs,json_data['proposals']['proposal'])
except KeyError:
print(ipts, json_data['proposals'])
if data == 'meta':
self._get_meta_for_ipts(runs,json_data['proposals']['proposal'])
def getRun(self,run):
uri = self._run_uri+'/'+ str(run)+"/metaOnly"
json_data = self._uri2xml2json(uri)
self._get_los_for_run(run, json_data)
def reset_los(self):
self._los_data = dict()
def get_los(self):
return self._los_data
def print_runs(self):
if self._runs is None:
self._get_runs()
for run in self._runs:
print(run)
def print_los(self):
if self._los_data is None:
print(self._los_data, "( No data yet in los dictionary. )")
los_data = self._los_data
print("#Scan IPTS time starttime totalCounts PC/C title")
for run in sorted(los_data.keys()):
print(run, end=' ')
for key in self.key_list:
print(los_data[run][key], end=' ')
print()
|
mit
| 5,587,422,821,829,324,000
| 33.334728
| 111
| 0.530466
| false
| 3.400746
| false
| false
| false
|
karesansui/karesansui
|
bin/stop_network.py
|
1
|
3039
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import sys
import logging
from optparse import OptionParser
from ksscommand import KssCommand, KssCommandException, KssCommandOptException
import __cmd__
try:
import karesansui
from karesansui import __version__
from karesansui.lib.virt.virt import KaresansuiVirtConnection
from karesansui.lib.utils import load_locale
except ImportError, e:
print >>sys.stderr, "[Error] some packages not found. - %s" % e
sys.exit(1)
_ = load_locale()
usage = '%prog [options]'
def getopts():
optp = OptionParser(usage=usage, version=__version__)
optp.add_option('-n', '--name', dest='name', help=_('Network name'))
return optp.parse_args()
def chkopts(opts):
if not opts.name:
raise KssCommandOptException('ERROR: %s option is required.' % '-n or --name')
class StopNetwork(KssCommand):
def process(self):
(opts, args) = getopts()
chkopts(opts)
self.up_progress(10)
conn = KaresansuiVirtConnection(readonly=False)
try:
active_networks = conn.list_active_network()
inactive_networks = conn.list_inactive_network()
if not (opts.name in active_networks or opts.name in inactive_networks):
raise KssCommandException('Network not found. - net=%s' % (opts.name))
self.up_progress(10)
conn.stop_network(opts.name)
self.up_progress(40)
if opts.name in conn.list_active_network():
raise KssCommandException('Failed to stop network. - net=%s' % (opts.name))
self.logger.info('Stopped network. - net=%s' % (opts.name))
print >>sys.stdout, _('Stopped network. - net=%s') % (opts.name)
return True
finally:
conn.close()
if __name__ == "__main__":
target = StopNetwork()
sys.exit(target.run())
|
mit
| 6,552,379,057,524,915,000
| 33.146067
| 91
| 0.678513
| false
| 3.881226
| false
| false
| false
|
DynamoDS/Coulomb
|
SessionTools/stacktrace_extractor.py
|
1
|
5506
|
from __future__ import print_function
import json
import xml.etree.ElementTree as xmlElementTree
import base64
import os
import sys
from os.path import isfile, join
import gzip
import datetime
import random
import traceback
import sys
import features_JSON
import features_XML
VERSION="StackTraces-2018-08-16" # This is the name of the feature set, update it with any major changes
# Progress counters
processed = 0
skipped = 0
err_count = 0
i = 0
if len(sys.argv) != 2:
print ("Usage: python stacktrace_extractor.py PathToSessions")
path = sys.argv[1]
paths = []
# Walk over the dataset computing a list of all the files to process
print ('Enumerating files')
for root, subdirs, files in os.walk(path):
for ff in files:
i = i + 1
if i % 1000 == 0:
print (i)
path = join(root,ff)
if (not path.endswith('sorted.gz')):
continue
paths.append(path)
# Randomise them in order to avoid collisions + leapfrogging problems with distributed workers
# If processing locally removing this may slightly improve performance
random.shuffle(paths)
print ('Paths to process: ' + str(len(paths)))
for path in paths:
# Report progress
print (str(datetime.datetime.now()) + ": " + path + ": processed: " + str(processed) + ", errs: " + str(err_count) + ", results_exist: " + str(skipped) + ", total: " + str(processed + skipped) )
out_path = path + ".features" + "." + VERSION
# skip files that have been processed already
if os.path.exists(out_path) and os.path.getmtime(out_path) > os.path.getmtime(path):
skipped = skipped + 1
continue
try:
f = gzip.open (path)
fo = open(out_path , 'w')
processed = processed + 1
isJSON = False
# Initialise structures for this day
stackTraces = []
tags = set()
userId = None
version = None
sessionStartMicroTime = 0
sessionEndMicroTime = 0
sessionDate = ''
# Helper function to export data so far
# TODO: De-nest this function
def writeDataToFile():
if (len(stackTraces) == 0): # If no stack traces, skip
if os.path.exists(out_path):
os.remove(out_path)
return
print (json.dumps(
{
"StackTraces" : stackTraces,
"Tags" : list(tags),
"UserID": userId,
"WorkspaceVersion": version,
"SessionDuration": sessionEndMicroTime - sessionStartMicroTime,
"Date": sessionDate
}), file=fo)
# Process each line of the session file
for ln in f:
if ln.startswith("Downloading phase"): # Marker from the download script, ignore
continue
data = json.loads(ln)
# Compute the first day
if sessionStartMicroTime == 0:
sessionStartMicroTime = int(data["MicroTime"])
sessionDate = data["DateTime"].split(" ")[0]
# If a day has rolled over, export the data
if sessionDate != data["DateTime"].split(" ")[0]:
print (path + " has session over multiple days")
# Split the session: write session so far to file, then reset data collection.
writeDataToFile()
stackTraces = []
tags = set()
sessionStartMicroTime = int(data["MicroTime"])
sessionDate = data["DateTime"].split(" ")[0]
sessionEndMicroTime = int(data["MicroTime"])
# Keep track of what we've seen in the data file
tags.add(data["Tag"])
# Data is in base64 to protect against special characters, unpack it
b64decodedData = base64.b64decode(data["Data"])
# Split what to do based on what kind of message this is
# Text entered into the search box
if data["Tag"] == "StackTrace":
stackTraces.append(b64decodedData)
# A workspace being reported
if data["Tag"] == "Workspace":
if b64decodedData == '': # An empty workspace, ignore
continue
# Select which feature extraction library to use depending on what version on the file format
feature_lib = None
if b64decodedData.startswith("<"):
feature_lib = features_XML
else:
isJSON = True
continue # Skip JSON coded files for now
# feature_lib = features_JSON
# Extract version number (first time only)
if (version == None):
version = feature_lib.getVersion(b64decodedData)
# Extract user ID (first time only)
if userId == None:
userId = data["UserID"]
except Exception as e:
# If there were a problem, get the stack trace for what happeend
exc_type, exc_value, exc_traceback = sys.exc_info()
# Log it
print (e)
print (path)
traceback.print_tb(exc_traceback, file=sys.stdout)
# Remove partial results
fo.flush()
os.remove(out_path)
err_count = err_count + 1
continue
# Flush any further data to the file
writeDataToFile()
|
mit
| 1,103,576,669,027,327,500
| 31.579882
| 198
| 0.562841
| false
| 4.426045
| false
| false
| false
|
nasa/39A
|
spaceapps/locations/forms.py
|
1
|
3715
|
from django.forms import Form, ModelForm, CharField, ChoiceField
from django.forms.models import (
modelformset_factory,
inlineformset_factory,
BaseInlineFormSet,
)
from django.forms.formsets import formset_factory, BaseFormSet
import selectable.forms as selectable
from awards.models import LocalAward, Nomination
from registration.models import Registration
from projects.models import Project
from .lookups import UserLookup
from .models import (
Location,
Sponsor,
Lead,
Resource,
)
class LocalAwardForm(Form):
def __init__(self, projects, *args, **kwargs):
super(LocalAwardForm, self).__init__(*args, **kwargs)
self.fields['project'] = ChoiceField(choices=projects)
choices=(('1', 'First',))
title = CharField(max_length=100)
project = ChoiceField(choices=choices)
class LocationForm(ModelForm):
class Meta:
model = Location
exclude = ('name', 'slug', 'private', 'start', 'end' )
class LeadForm(ModelForm):
class Meta(object):
model = Lead
widgets = {
'lead': selectable.AutoCompleteSelectWidget(
lookup_class=UserLookup)
}
class CheckInForm(ModelForm):
test = 'happy'
class Meta(object):
model = Registration
# fields = ('check_in', )
CheckInFormSet = modelformset_factory(
Registration,
form=CheckInForm,
extra=0,
)
SponsorFormSet = inlineformset_factory(
Location,
Sponsor,
extra=1,
)
LeadFormSet = inlineformset_factory(
Location,
Lead,
form=LeadForm,
extra=1,
)
ResourceFormSet = inlineformset_factory(
Location,
Resource,
extra=1,
)
class LocalAwardBaseFormSet(BaseFormSet):
def __init__(self, projects, *args, **kwargs):
self.projects = projects
super(LocalAwardBaseFormSet, self).__init__(*args, **kwargs)
def _construct_forms(self):
self.forms = []
for i in xrange(self.total_form_count()):
self.forms.append(self._construct_form(i, projects=self.projects))
LocalAwardFormSet = formset_factory(
LocalAwardForm,
formset=LocalAwardBaseFormSet,
extra=1,
)
class AwardBaseFormSet(BaseInlineFormSet):
def __init__(self, *args, **kwargs):
self.projects = kwargs.pop('projects')
super(AwardBaseFormSet, self).__init__(*args, **kwargs)
def _construct_forms(self):
self.forms = []
for i in xrange(self.total_form_count()):
self.forms.append(self._construct_form(i, projects=self.projects))
class AwardForm(ModelForm):
class Meta:
model = LocalAward
def __init__(self, *args, **kwargs):
projects = kwargs.pop('projects')
super(AwardForm, self).__init__(*args, **kwargs)
if projects is not None:
self.fields['project'].queryset = Project.objects.filter(
id__in=projects).distinct()
AwardFormSet = inlineformset_factory(
Location,
LocalAward,
form=AwardForm,
formset=AwardBaseFormSet,
extra=1,
)
class NominationForm(ModelForm):
class Meta:
model = LocalAward
def __init__(self, *args, **kwargs):
projects = kwargs.pop('projects')
super(NominationForm, self).__init__(*args, **kwargs)
if projects is not None:
self.fields['project'].queryset = Project.objects.filter(
id__in=projects).distinct()
NominationFormSet = inlineformset_factory(
Location,
Nomination,
form=NominationForm,
formset=AwardBaseFormSet,
extra=2,
max_num=2,
)
|
apache-2.0
| -321,959,692,586,215,400
| 25.161972
| 78
| 0.621534
| false
| 3.964781
| false
| false
| false
|
linsalrob/EdwardsLab
|
bin/NSF_bibtex_by_year.py
|
1
|
1668
|
"""
Parse a bibtex file and only print those entries within a certain number of years
"""
import os
import sys
import argparse
import datetime
from pybtex.database import parse_file, BibliographyData
if __name__ == "__main__":
now = datetime.datetime.now()
earlyyear = now.year - 4
parser = argparse.ArgumentParser(description='Parse a bibtex file and create a list of conflicts')
parser.add_argument('-f', help='bibtex file', required=True)
parser.add_argument('-y', help="Earliest year to report conflict (default={})".format(earlyyear), default=earlyyear, type=int)
args = parser.parse_args()
entries = set()
dupentries=False
with open(args.f, 'r') as bin:
for l in bin:
if l.startswith('@'):
l = l.replace('@misc', '')
l = l.replace('@article', '')
l = l.replace('@inproceedings', '')
if l in entries:
sys.stderr.write("Duplicate entry " + l.replace('{', '').replace(',', ''))
dupentries=True
entries.add(l)
if dupentries:
sys.stderr.write("FATAL: The bibtex file has duplicate entries in it. Please remove them before trying to continue\n")
sys.stderr.write("(It is an issue with Google Scholar, but pybtex breaks with duplicate entries. Sorry)\n")
sys.exit(-1)
bib = parse_file(args.f, 'bibtex')
for e in bib.entries:
if 'year' in bib.entries[e].fields:
if int(bib.entries[e].fields['year']) >= args.y:
bib_data = BibliographyData({e : bib.entries[e]})
print(bib_data.to_string('bibtex'))
|
mit
| 4,710,846,875,079,895,000
| 35.26087
| 130
| 0.598321
| false
| 3.861111
| false
| false
| false
|
tzicatl/lfs-shipping-ups
|
shipping_ups/plugin.py
|
1
|
2998
|
from django.contrib.sites.models import get_current_site
from lfs.cart.utils import get_cart
from lfs.customer.utils import get_customer
from lfs.plugins import ShippingMethodPriceCalculator
from ups.client import UPSClient, UPSError
from ups.model import Package, Address
from .models import UPSConfiguration
class UPSPriceCalculator(ShippingMethodPriceCalculator):
#Cache price
_price = None
def _ups_config(self):
site = get_current_site(self.request)
return UPSConfiguration.objects.get(site=site)
def _get_quote(self):
ups_cfg = self._ups_config()
credentials = {
'username': ups_cfg.username,
'password': ups_cfg.password,
'access_license': ups_cfg.access_license,
'shipper_number': ups_cfg.shipper_number,
}
shipper = Address(
name=ups_cfg.shipper_name,
address=ups_cfg.shipper_address,
city=ups_cfg.shipper_city,
state=ups_cfg.shipper_state,
zip=ups_cfg.shipper_zipcode,
country=ups_cfg.shipper_country.code
)
customer = get_customer(self.request)
ship_address = customer.get_selected_shipping_address()
recipient = Address(
name=' '.join([ship_address.firstname or '', ship_address.lastname or '']),
address=' '.join([ship_address.line1 or '', ship_address.line2 or '']),
city=ship_address.city,
state=ship_address.state,
zip=ship_address.zip_code,
country=ship_address.country.code
)
cart = get_cart(self.request)
#weight, length, width, height
product_info = [0, 0, 0, 0]
for line_item in cart.get_items():
product_info[0] += line_item.product.weight * line_item.amount
product_info[1] += line_item.product.length * line_item.amount
product_info[2] += line_item.product.width * line_item.amount
product_info[3] += line_item.product.height * line_item.amount
#import pdb; pdb.set_trace()
quote = 0.0
if all(product_info):
packages = [Package(*product_info)]
ups = UPSClient(credentials)
response = ups.rate(
packages=packages,
packaging_type=ups_cfg.default_packaging_type,
shipper=shipper,
recipient=recipient
)
quote = float(response['info'][0]['cost'])
return quote
def get_price_net(self):
return self.get_price_gross()
def get_price_gross(self):
#XXX No error handler :P
# return self.get_price_net() * ((100 + self.shipping_method.tax.rate) / 100)
try:
if self._price is None:
self._price = self._get_quote()
return self._price
except UPSError:
return 0.0
def get_tax(self):
#How do I calculate taxes?
return 0.0
|
bsd-3-clause
| -7,931,353,110,327,888,000
| 31.956044
| 87
| 0.588726
| false
| 3.82398
| false
| false
| false
|
wanghuok02/iherb
|
iherb/spiders/iherbspider.py
|
1
|
1439
|
import scrapy
import logging
from scrapy.spiders import Spider
from scrapy.selector import Selector
from iherb.items import IherbItem
class IherbSpider(Spider):
name = "iherbspider"
allowed_domains = ["iherb.cn"]
max_page = 5
cur_page = 1
start_urls = [
"http://www.iherb.cn/Supplements?oos=true&disc=false&p=1"
]
def parse(self, response):
brands = response.xpath("//article[div[span[@itemprop='price']]]/a[1]/@title").extract()
desc = response.xpath("//article[div[span[@itemprop='price']]]/a[1]/@title").extract()
urls = response.xpath("//article[div[span[@itemprop='price']]]/a[1]/@href").extract()
prices = response.xpath("//span[@itemprop='price']/@content").extract()
items = []
length = len(brands)
for it in range(length):
item = IherbItem()
item['url'] = urls[it]
item['brand'] = brands[it].split(',')[0]
item['desc'] = brands[it].split(',')[1]
item['price'] = prices[it][1:]
#items.append(item)
yield item
if(self.cur_page <= 431):
self.cur_page += 1
self.logger.info("cur_page*********************************** %s", self.cur_page)
yield scrapy.Request("http://www.iherb.cn/Supplements?oos=true&disc=false&p="+str(self.cur_page), self.parse)
|
apache-2.0
| -8,967,152,911,173,662,000
| 35.923077
| 122
| 0.542738
| false
| 3.484262
| false
| false
| false
|
emanuele/jstsp2015
|
simulation.py
|
1
|
7420
|
"""Simulation estimating Type I and Type II error of CBT and KTST.
Author: Sandro Vega-Pons, Emanuele Olivetti
"""
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
from kernel_two_sample_test import MMD2u, compute_null_distribution
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold, cross_val_score
# from multiprocessing import cpu_count
from joblib import Parallel, delayed
# Temporarily stop warnings to cope with the too verbose sklearn
# GridSearchCV.score warning:
import warnings
warnings.simplefilter("ignore")
# boundaries for seeds generation during parallel processing:
MAX_INT = np.iinfo(np.uint32(1)).max
MIN_INT = np.iinfo(np.uint32(1)).min
def estimate_pvalue(score_unpermuted, scores_null):
iterations = len(scores_null)
p_value = max(1.0/iterations, (scores_null > score_unpermuted).sum() /
float(iterations))
return p_value
def compute_svm_score(K, y, n_folds, scoring='accuracy', random_state=0):
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=True,
random_state=random_state)
clf = SVC(C=1.0, kernel='precomputed')
scores = cross_val_score(clf, K, y, scoring=scoring, cv=cv, n_jobs=1)
score = scores.mean()
return score
def compute_svm_score_nestedCV(K, y, n_folds, scoring='accuracy',
random_state=None,
param_grid=[{'C': np.logspace(-5, 5, 20)}]):
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=True,
random_state=random_state)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
cvclf = SVC(kernel='precomputed')
y_train = y[train]
cvcv = StratifiedKFold(y_train, n_folds=n_folds,
shuffle=True,
random_state=random_state)
clf = GridSearchCV(cvclf, param_grid=param_grid, scoring=scoring,
cv=cvcv, n_jobs=1)
clf.fit(K[:, train][train, :], y_train)
scores[i] = clf.score(K[test, :][:, train], y[test])
return scores.mean()
if __name__ == '__main__':
np.random.seed(0)
print("JSTSP Simulation Experiments.")
nA = 20 # size of class A
nB = 20 # size of class B
d = 5 # number of dimensions
# separation between the two normally-distributed classes:
delta = 0.75
twist = np.ones(d)
print("nA = %s" % nA)
print("nB = %s" % nB)
print("d = %s" % d)
print("delta = %s" % delta)
print("twist = %s" % twist)
muA = np.zeros(d)
muB = np.ones(d) * delta
covA = np.eye(d)
covB = np.eye(d) * twist
seed_data = 0 # random generation of data
rng_data = np.random.RandomState(seed_data)
seed_ktst = 0 # random permutations of KTST
rng_ktst = np.random.RandomState(seed_ktst)
seed_cv = 0 # random splits of cross-validation
rng_cv = np.random.RandomState(seed_cv)
svm_param_grid = [{'C': np.logspace(-5, 5, 20)}]
# svm_param_grid = [{'C': np.logspace(-3, 2, 10)}]
repetitions = 100
print("This experiments will be repeated on %s randomly-sampled datasets."
% repetitions)
scores = np.zeros(repetitions)
p_value_scores = np.zeros(repetitions)
mmd2us = np.zeros(repetitions)
p_value_mmd2us = np.zeros(repetitions)
for r in range(repetitions):
print("")
print("Repetition %s" % r)
A = rng_data.multivariate_normal(muA, covA, size=nA)
B = rng_data.multivariate_normal(muB, covB, size=nB)
X = np.vstack([A, B])
y = np.concatenate([np.zeros(nA), np.ones(nB)])
distances = pairwise_distances(X, metric='euclidean')
sigma2 = np.median(distances) ** 2.0
K = np.exp(- distances * distances / sigma2)
# K = X.dot(X.T)
iterations = 10000
mmd2u_unpermuted = MMD2u(K, nA, nB)
print("mmd2u: %s" % mmd2u_unpermuted)
mmd2us[r] = mmd2u_unpermuted
mmd2us_null = compute_null_distribution(K, nA, nB, iterations,
random_state=rng_ktst)
p_value_mmd2u = estimate_pvalue(mmd2u_unpermuted, mmd2us_null)
print("mmd2u p-value: %s" % p_value_mmd2u)
p_value_mmd2us[r] = p_value_mmd2u
scoring = 'accuracy'
n_folds = 5
iterations = 1
# score_unpermuted = compute_svm_score_nestedCV(K, y, n_folds,
# scoring=scoring,
# random_state=rng_cv)
rngs = [np.random.RandomState(rng_cv.randint(low=MIN_INT, high=MAX_INT)) for i in range(iterations)]
scores_unpermuted = Parallel(n_jobs=-1)(delayed(compute_svm_score_nestedCV)(K, y, n_folds, scoring, rngs[i], param_grid=svm_param_grid) for i in range(iterations))
score_unpermuted = np.mean(scores_unpermuted)
print("accuracy: %s" % score_unpermuted)
scores[r] = score_unpermuted
# print("Doing permutations:"),
iterations = 100
scores_null = np.zeros(iterations)
# for i in range(iterations):
# if (i % 10) == 0:
# print(i)
# yi = rng_cv.permutation(y)
# scores_null[i] = compute_svm_score_nestedCV(K, yi, n_folds,
# scoring=scoring,
# random_state=rng_cv)
rngs = [np.random.RandomState(rng_cv.randint(low=MIN_INT, high=MAX_INT)) for i in range(iterations)]
yis = [np.random.permutation(y) for i in range(iterations)]
scores_null = Parallel(n_jobs=-1)(delayed(compute_svm_score_nestedCV)(K, yis[i], n_folds, scoring, rngs[i], param_grid=svm_param_grid) for i in range(iterations))
p_value_score = estimate_pvalue(score_unpermuted, scores_null)
p_value_scores[r] = p_value_score
print("%s p-value: %s" % (scoring, p_value_score))
p_value_threshold = 0.05
mmd2u_power = (p_value_mmd2us[:r+1] <= p_value_threshold).mean()
scores_power = (p_value_scores[:r+1] <= p_value_threshold).mean()
print("p_value_threshold: %s" % p_value_threshold)
print("Partial results - MMD2u: %s , %s: %s" %
(mmd2u_power, scoring, scores_power))
print("")
print("FINAL RESULTS:")
p_value_threshold = 0.1
print("p_value_threshold: %s" % p_value_threshold)
mmd2u_power = (p_value_mmd2us <= p_value_threshold).mean()
scores_power = (p_value_scores <= p_value_threshold).mean()
print("MMD2u Power: %s" % mmd2u_power)
print("%s Power: %s" % (scoring, scores_power))
print("")
p_value_threshold = 0.05
print("p_value_threshold: %s" % p_value_threshold)
mmd2u_power = (p_value_mmd2us <= p_value_threshold).mean()
scores_power = (p_value_scores <= p_value_threshold).mean()
print("MMD2u Power: %s" % mmd2u_power)
print("%s Power: %s" % (scoring, scores_power))
print("")
p_value_threshold = 0.01
print("p_value_threshold: %s" % p_value_threshold)
mmd2u_power = (p_value_mmd2us <= p_value_threshold).mean()
scores_power = (p_value_scores <= p_value_threshold).mean()
print("MMD2u Power: %s" % mmd2u_power)
print("%s Power: %s" % (scoring, scores_power))
print("")
|
mit
| -3,438,818,910,070,712,000
| 37.645833
| 171
| 0.592453
| false
| 3.185917
| false
| false
| false
|
azlanismail/prismgames
|
examples/games/car/networkx/readwrite/gexf.py
|
1
|
33475
|
"""
****
GEXF
****
Read and write graphs in GEXF format.
GEXF (Graph Exchange XML Format) is a language for describing complex
network structures, their associated data and dynamics.
This implementation does not support mixed graphs (directed and
unidirected edges together).
Format
------
GEXF is an XML format. See http://gexf.net/format/schema.html for the
specification and http://gexf.net/format/basic.html for examples.
"""
# Based on GraphML NetworkX GraphML reader
import itertools
import networkx as nx
from networkx.utils import open_file, make_str
try:
from xml.etree.cElementTree import Element, ElementTree, tostring
except ImportError:
try:
from xml.etree.ElementTree import Element, ElementTree, tostring
except ImportError:
pass
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)'])
__all__ = ['write_gexf', 'read_gexf', 'relabel_gexf_graph', 'generate_gexf']
@open_file(1,mode='wb')
def write_gexf(G, path, encoding='utf-8',prettyprint=True,version='1.1draft'):
"""Write G in GEXF format to path.
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Parameters
----------
G : graph
A NetworkX graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_gexf(G, "test.gexf")
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together).
The node id attribute is set to be the string of the node label.
If you want to specify an id use set it as node data, e.g.
node['a']['id']=1 to set the id of node 'a' to 1.
References
----------
.. [1] GEXF graph format, http://gexf.net/format/
"""
writer = GEXFWriter(encoding=encoding,prettyprint=prettyprint,
version=version)
writer.add_graph(G)
writer.write(path)
def generate_gexf(G, encoding='utf-8',prettyprint=True,version='1.1draft'):
"""Generate lines of GEXF format representation of G"
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Parameters
----------
G : graph
A NetworkX graph
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> linefeed=chr(10) # linefeed=\n
>>> s=linefeed.join(nx.generate_gexf(G)) # doctest: +SKIP
>>> for line in nx.generate_gexf(G): # doctest: +SKIP
... print line
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together).
The node id attribute is set to be the string of the node label.
If you want to specify an id use set it as node data, e.g.
node['a']['id']=1 to set the id of node 'a' to 1.
References
----------
.. [1] GEXF graph format, http://gexf.net/format/
"""
writer = GEXFWriter(encoding=encoding,prettyprint=prettyprint,
version=version)
writer.add_graph(G)
for line in str(writer).splitlines():
yield line
@open_file(0,mode='rb')
def read_gexf(path,node_type=str,relabel=False,version='1.1draft'):
"""Read graph in GEXF format from path.
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
node_type: Python type (default: str)
Convert node ids to this type
relabel : bool (default: False)
If True relabel the nodes to use the GEXF node "label" attribute
instead of the node "id" attribute as the NetworkX node label.
Returns
-------
graph: NetworkX graph
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together).
References
----------
.. [1] GEXF graph format, http://gexf.net/format/
"""
reader = GEXFReader(node_type=node_type,version=version)
if relabel:
G=relabel_gexf_graph(reader(path))
else:
G=reader(path)
return G
class GEXF(object):
# global register_namespace
versions={}
d={'NS_GEXF':"http://www.gexf.net/1.1draft",
'NS_VIZ':"http://www.gexf.net/1.1draft/viz",
'NS_XSI':"http://www.w3.org/2001/XMLSchema-instance",
'SCHEMALOCATION':' '.join(['http://www.gexf.net/1.1draft',
'http://www.gexf.net/1.1draft/gexf.xsd'
]),
'VERSION':'1.1'
}
versions['1.1draft']=d
d={'NS_GEXF':"http://www.gexf.net/1.2draft",
'NS_VIZ':"http://www.gexf.net/1.2draft/viz",
'NS_XSI':"http://www.w3.org/2001/XMLSchema-instance",
'SCHEMALOCATION':' '.join(['http://www.gexf.net/1.2draft',
'http://www.gexf.net/1.2draft/gexf.xsd'
]),
'VERSION':'1.2'
}
versions['1.2draft']=d
types=[(int,"integer"),
(float,"float"),
(float,"double"),
(bool,"boolean"),
(list,"string"),
(dict,"string"),
]
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
types.extend([
(str,"liststring"),
(str,"anyURI"),
(str,"string")])
except ValueError: # Python 2.6+
types.extend([
(str,"liststring"),
(str,"anyURI"),
(str,"string"),
(unicode,"liststring"),
(unicode,"anyURI"),
(unicode,"string")])
xml_type = dict(types)
python_type = dict(reversed(a) for a in types)
convert_bool={'true':True,'false':False}
# try:
# register_namespace = ET.register_namespace
# except AttributeError:
# def register_namespace(prefix, uri):
# ET._namespace_map[uri] = prefix
def set_version(self,version):
d=self.versions.get(version)
if d is None:
raise nx.NetworkXError('Unknown GEXF version %s'%version)
self.NS_GEXF = d['NS_GEXF']
self.NS_VIZ = d['NS_VIZ']
self.NS_XSI = d['NS_XSI']
self.SCHEMALOCATION = d['NS_XSI']
self.VERSION=d['VERSION']
self.version=version
# register_namespace('viz', d['NS_VIZ'])
class GEXFWriter(GEXF):
# class for writing GEXF format files
# use write_gexf() function
def __init__(self, graph=None, encoding="utf-8",
mode='static',prettyprint=True,
version='1.1draft'):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GEXF writer requires '
'xml.elementtree.ElementTree')
self.prettyprint=prettyprint
self.mode=mode
self.encoding = encoding
self.set_version(version)
self.xml = Element("gexf",
{'xmlns':self.NS_GEXF,
'xmlns:xsi':self.NS_XSI,
'xmlns:viz':self.NS_VIZ,
'xsi:schemaLocation':self.SCHEMALOCATION,
'version':self.VERSION})
# counters for edge and attribute identifiers
self.edge_id=itertools.count()
self.attr_id=itertools.count()
# default attributes are stored in dictionaries
self.attr={}
self.attr['node']={}
self.attr['edge']={}
self.attr['node']['dynamic']={}
self.attr['node']['static']={}
self.attr['edge']['dynamic']={}
self.attr['edge']['static']={}
if graph is not None:
self.add_graph(graph)
def __str__(self):
if self.prettyprint:
self.indent(self.xml)
s=tostring(self.xml).decode(self.encoding)
return s
def add_graph(self, G):
# Add a graph element to the XML
if G.is_directed():
default='directed'
else:
default='undirected'
graph_element = Element("graph",defaultedgetype=default,mode=self.mode)
self.graph_element=graph_element
self.add_nodes(G,graph_element)
self.add_edges(G,graph_element)
self.xml.append(graph_element)
def add_nodes(self, G, graph_element):
nodes_element = Element('nodes')
for node,data in G.nodes_iter(data=True):
node_data=data.copy()
node_id = make_str(node_data.pop('id', node))
kw={'id':node_id}
label = make_str(node_data.pop('label', node))
kw['label']=label
try:
pid=node_data.pop('pid')
kw['pid'] = make_str(pid)
except KeyError:
pass
# add node element with attributes
node_element = Element("node", **kw)
# add node element and attr subelements
default=G.graph.get('node_default',{})
node_data=self.add_parents(node_element, node_data)
if self.version=='1.1':
node_data=self.add_slices(node_element, node_data)
else:
node_data=self.add_spells(node_element, node_data)
node_data=self.add_viz(node_element,node_data)
node_data=self.add_attributes("node", node_element,
node_data, default)
nodes_element.append(node_element)
graph_element.append(nodes_element)
def add_edges(self, G, graph_element):
def edge_key_data(G):
# helper function to unify multigraph and graph edge iterator
if G.is_multigraph():
for u,v,key,data in G.edges_iter(data=True,keys=True):
edge_data=data.copy()
edge_data.update(key=key)
edge_id=edge_data.pop('id',None)
if edge_id is None:
edge_id=next(self.edge_id)
yield u,v,edge_id,edge_data
else:
for u,v,data in G.edges_iter(data=True):
edge_data=data.copy()
edge_id=edge_data.pop('id',None)
if edge_id is None:
edge_id=next(self.edge_id)
yield u,v,edge_id,edge_data
edges_element = Element('edges')
for u,v,key,edge_data in edge_key_data(G):
kw={'id':make_str(key)}
try:
edge_weight=edge_data.pop('weight')
kw['weight']=make_str(edge_weight)
except KeyError:
pass
try:
edge_type=edge_data.pop('type')
kw['type']=make_str(edge_type)
except KeyError:
pass
edge_element = Element("edge",
source=make_str(u),target=make_str(v),
**kw)
default=G.graph.get('edge_default',{})
edge_data=self.add_viz(edge_element,edge_data)
edge_data=self.add_attributes("edge", edge_element,
edge_data, default)
edges_element.append(edge_element)
graph_element.append(edges_element)
def add_attributes(self, node_or_edge, xml_obj, data, default):
# Add attrvalues to node or edge
attvalues=Element('attvalues')
if len(data)==0:
return data
if 'start' in data or 'end' in data:
mode='dynamic'
else:
mode='static'
for k,v in data.items():
# rename generic multigraph key to avoid any name conflict
if k == 'key':
k='networkx_key'
attr_id = self.get_attr_id(make_str(k), self.xml_type[type(v)],
node_or_edge, default, mode)
if type(v)==list:
# dynamic data
for val,start,end in v:
e=Element("attvalue")
e.attrib['for']=attr_id
e.attrib['value']=make_str(val)
e.attrib['start']=make_str(start)
e.attrib['end']=make_str(end)
attvalues.append(e)
else:
# static data
e=Element("attvalue")
e.attrib['for']=attr_id
e.attrib['value']=make_str(v)
attvalues.append(e)
xml_obj.append(attvalues)
return data
def get_attr_id(self, title, attr_type, edge_or_node, default, mode):
# find the id of the attribute or generate a new id
try:
return self.attr[edge_or_node][mode][title]
except KeyError:
# generate new id
new_id=str(next(self.attr_id))
self.attr[edge_or_node][mode][title] = new_id
attr_kwargs = {"id":new_id, "title":title, "type":attr_type}
attribute=Element("attribute",**attr_kwargs)
# add subelement for data default value if present
default_title=default.get(title)
if default_title is not None:
default_element=Element("default")
default_element.text=make_str(default_title)
attribute.append(default_element)
# new insert it into the XML
attributes_element=None
for a in self.graph_element.findall("attributes"):
# find existing attributes element by class and mode
a_class=a.get('class')
a_mode=a.get('mode','static') # default mode is static
if a_class==edge_or_node and a_mode==mode:
attributes_element=a
if attributes_element is None:
# create new attributes element
attr_kwargs = {"mode":mode,"class":edge_or_node}
attributes_element=Element('attributes', **attr_kwargs)
self.graph_element.insert(0,attributes_element)
attributes_element.append(attribute)
return new_id
def add_viz(self,element,node_data):
viz=node_data.pop('viz',False)
if viz:
color=viz.get('color')
if color is not None:
if self.VERSION=='1.1':
e=Element("{%s}color"%self.NS_VIZ,
r=str(color.get('r')),
g=str(color.get('g')),
b=str(color.get('b')),
)
else:
e=Element("{%s}color"%self.NS_VIZ,
r=str(color.get('r')),
g=str(color.get('g')),
b=str(color.get('b')),
a=str(color.get('a')),
)
element.append(e)
size=viz.get('size')
if size is not None:
e=Element("{%s}size"%self.NS_VIZ,value=str(size))
element.append(e)
thickness=viz.get('thickness')
if thickness is not None:
e=Element("{%s}thickness"%self.NS_VIZ,value=str(thickness))
element.append(e)
shape=viz.get('shape')
if shape is not None:
if shape.startswith('http'):
e=Element("{%s}shape"%self.NS_VIZ,
value='image',uri=str(shape))
else:
e=Element("{%s}shape"%self.NS_VIZ,value=str(shape.get))
element.append(e)
position=viz.get('position')
if position is not None:
e=Element("{%s}position"%self.NS_VIZ,
x=str(position.get('x')),
y=str(position.get('y')),
z=str(position.get('z')),
)
element.append(e)
return node_data
def add_parents(self,node_element,node_data):
parents=node_data.pop('parents',False)
if parents:
parents_element=Element('parents')
for p in parents:
e=Element('parent')
e.attrib['for']=str(p)
parents_element.append(e)
node_element.append(parents_element)
return node_data
def add_slices(self,node_element,node_data):
slices=node_data.pop('slices',False)
if slices:
slices_element=Element('slices')
for start,end in slices:
e=Element('slice',start=str(start),end=str(end))
slices_element.append(e)
node_element.append(slices_element)
return node_data
def add_spells(self,node_element,node_data):
spells=node_data.pop('spells',False)
if spells:
spells_element=Element('spells')
for start,end in spells:
e=Element('spell',start=str(start),end=str(end))
spells_element.append(e)
node_element.append(spells_element)
return node_data
def write(self, fh):
# Serialize graph G in GEXF to the open fh
if self.prettyprint:
self.indent(self.xml)
document = ElementTree(self.xml)
header='<?xml version="1.0" encoding="%s"?>'%self.encoding
fh.write(header.encode(self.encoding))
document.write(fh, encoding=self.encoding)
def indent(self, elem, level=0):
# in-place prettyprint formatter
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class GEXFReader(GEXF):
# Class to read GEXF format files
# use read_gexf() function
def __init__(self, node_type=None,version='1.1draft'):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GEXF reader requires '
'xml.elementtree.ElementTree')
self.node_type=node_type
# assume simple graph and test for multigraph on read
self.simple_graph=True
self.set_version(version)
def __call__(self, stream):
self.xml = ElementTree(file=stream)
g=self.xml.find("{%s}graph" % self.NS_GEXF)
if g is not None:
return self.make_graph(g)
# try all the versions
for version in self.versions:
self.set_version(version)
g=self.xml.find("{%s}graph" % self.NS_GEXF)
if g is not None:
return self.make_graph(g)
raise nx.NetworkXError("No <graph> element in GEXF file")
def make_graph(self, graph_xml):
# mode is "static" or "dynamic"
graph_mode = graph_xml.get("mode", "")
self.dynamic=(graph_mode=='dynamic')
# start with empty DiGraph or MultiDiGraph
edgedefault = graph_xml.get("defaultedgetype", None)
if edgedefault=='directed':
G=nx.MultiDiGraph()
else:
G=nx.MultiGraph()
# graph attributes
graph_start=graph_xml.get('start')
if graph_start is not None:
G.graph['start']=graph_start
graph_end=graph_xml.get('end')
if graph_end is not None:
G.graph['end']=graph_end
# node and edge attributes
attributes_elements=graph_xml.findall("{%s}attributes"%self.NS_GEXF)
# dictionaries to hold attributes and attribute defaults
node_attr={}
node_default={}
edge_attr={}
edge_default={}
for a in attributes_elements:
attr_class = a.get("class")
if attr_class=='node':
na,nd = self.find_gexf_attributes(a)
node_attr.update(na)
node_default.update(nd)
G.graph['node_default']=node_default
elif attr_class=='edge':
ea,ed = self.find_gexf_attributes(a)
edge_attr.update(ea)
edge_default.update(ed)
G.graph['edge_default']=edge_default
else:
raise # unknown attribute class
# Hack to handle Gephi0.7beta bug
# add weight attribute
ea={'weight':{'type': 'double', 'mode': 'static', 'title': 'weight'}}
ed={}
edge_attr.update(ea)
edge_default.update(ed)
G.graph['edge_default']=edge_default
# add nodes
nodes_element=graph_xml.find("{%s}nodes" % self.NS_GEXF)
if nodes_element is not None:
for node_xml in nodes_element.findall("{%s}node" % self.NS_GEXF):
self.add_node(G, node_xml, node_attr)
# add edges
edges_element=graph_xml.find("{%s}edges" % self.NS_GEXF)
if edges_element is not None:
for edge_xml in edges_element.findall("{%s}edge" % self.NS_GEXF):
self.add_edge(G, edge_xml, edge_attr)
# switch to Graph or DiGraph if no parallel edges were found.
if self.simple_graph:
if G.is_directed():
G=nx.DiGraph(G)
else:
G=nx.Graph(G)
return G
def add_node(self, G, node_xml, node_attr, node_pid=None):
# add a single node with attributes to the graph
# get attributes and subattributues for node
data = self.decode_attr_elements(node_attr, node_xml)
data = self.add_parents(data, node_xml) # add any parents
if self.version=='1.1':
data = self.add_slices(data, node_xml) # add slices
else:
data = self.add_spells(data, node_xml) # add spells
data = self.add_viz(data, node_xml) # add viz
data = self.add_start_end(data, node_xml) # add start/end
# find the node id and cast it to the appropriate type
node_id = node_xml.get("id")
if self.node_type is not None:
node_id=self.node_type(node_id)
# every node should have a label
node_label = node_xml.get("label")
data['label']=node_label
# parent node id
node_pid = node_xml.get("pid", node_pid)
if node_pid is not None:
data['pid']=node_pid
# check for subnodes, recursive
subnodes=node_xml.find("{%s}nodes" % self.NS_GEXF)
if subnodes is not None:
for node_xml in subnodes.findall("{%s}node" % self.NS_GEXF):
self.add_node(G, node_xml, node_attr, node_pid=node_id)
G.add_node(node_id, data)
def add_start_end(self, data, xml):
# start and end times
node_start = xml.get("start")
if node_start is not None:
data['start']=node_start
node_end = xml.get("end")
if node_end is not None:
data['end']=node_end
return data
def add_viz(self, data, node_xml):
# add viz element for node
viz={}
color=node_xml.find("{%s}color"%self.NS_VIZ)
if color is not None:
if self.VERSION=='1.1':
viz['color']={'r':int(color.get('r')),
'g':int(color.get('g')),
'b':int(color.get('b'))}
else:
viz['color']={'r':int(color.get('r')),
'g':int(color.get('g')),
'b':int(color.get('b')),
'a':float(color.get('a')),
}
size=node_xml.find("{%s}size"%self.NS_VIZ)
if size is not None:
viz['size']=float(size.get('value'))
thickness=node_xml.find("{%s}thickness"%self.NS_VIZ)
if thickness is not None:
viz['thickness']=float(thickness.get('value'))
shape=node_xml.find("{%s}shape"%self.NS_VIZ)
if shape is not None:
viz['shape']=shape.get('shape')
if viz['shape']=='image':
viz['shape']=shape.get('uri')
position=node_xml.find("{%s}position"%self.NS_VIZ)
if position is not None:
viz['position']={'x':float(position.get('x',0)),
'y':float(position.get('y',0)),
'z':float(position.get('z',0))}
if len(viz)>0:
data['viz']=viz
return data
def add_parents(self, data, node_xml):
parents_element=node_xml.find("{%s}parents"%self.NS_GEXF)
if parents_element is not None:
data['parents']=[]
for p in parents_element.findall("{%s}parent"%self.NS_GEXF):
parent=p.get('for')
data['parents'].append(parent)
return data
def add_slices(self, data, node_xml):
slices_element=node_xml.find("{%s}slices"%self.NS_GEXF)
if slices_element is not None:
data['slices']=[]
for s in slices_element.findall("{%s}slice"%self.NS_GEXF):
start=s.get('start')
end=s.get('end')
data['slices'].append((start,end))
return data
def add_spells(self, data, node_xml):
spells_element=node_xml.find("{%s}spells"%self.NS_GEXF)
if spells_element is not None:
data['spells']=[]
for s in spells_element.findall("{%s}spell"%self.NS_GEXF):
start=s.get('start')
end=s.get('end')
data['spells'].append((start,end))
return data
def add_edge(self, G, edge_element, edge_attr):
# add an edge to the graph
# raise error if we find mixed directed and undirected edges
edge_direction = edge_element.get("type")
if G.is_directed() and edge_direction=='undirected':
raise nx.NetworkXError(\
"Undirected edge found in directed graph.")
if (not G.is_directed()) and edge_direction=='directed':
raise nx.NetworkXError(\
"Directed edge found in undirected graph.")
# Get source and target and recast type if required
source = edge_element.get("source")
target = edge_element.get("target")
if self.node_type is not None:
source=self.node_type(source)
target=self.node_type(target)
data = self.decode_attr_elements(edge_attr, edge_element)
data = self.add_start_end(data,edge_element)
# GEXF stores edge ids as an attribute
# NetworkX uses them as keys in multigraphs
# if networkx_key is not specified as an attribute
edge_id = edge_element.get("id")
if edge_id is not None:
data["id"] = edge_id
# check if there is a 'multigraph_key' and use that as edge_id
multigraph_key = data.pop('networkx_key',None)
if multigraph_key is not None:
edge_id=multigraph_key
weight = edge_element.get('weight')
if weight is not None:
data['weight']=float(weight)
edge_label = edge_element.get("label")
if edge_label is not None:
data['label']=edge_label
if G.has_edge(source,target):
# seen this edge before - this is a multigraph
self.simple_graph=False
G.add_edge(source, target, key=edge_id, **data)
if edge_direction=='mutual':
G.add_edge(target, source, key=edge_id, **data)
def decode_attr_elements(self, gexf_keys, obj_xml):
# Use the key information to decode the attr XML
attr = {}
# look for outer "<attvalues>" element
attr_element=obj_xml.find("{%s}attvalues" % self.NS_GEXF)
if attr_element is not None:
# loop over <attvalue> elements
for a in attr_element.findall("{%s}attvalue" % self.NS_GEXF):
key = a.get('for') # for is required
try: # should be in our gexf_keys dictionary
title=gexf_keys[key]['title']
except KeyError:
raise nx.NetworkXError("No attribute defined for=%s"%key)
atype=gexf_keys[key]['type']
value=a.get('value')
if atype=='boolean':
value=self.convert_bool[value]
else:
value=self.python_type[atype](value)
if gexf_keys[key]['mode']=='dynamic':
# for dynamic graphs use list of three-tuples
# [(value1,start1,end1), (value2,start2,end2), etc]
start=a.get('start')
end=a.get('end')
if title in attr:
attr[title].append((value,start,end))
else:
attr[title]=[(value,start,end)]
else:
# for static graphs just assign the value
attr[title] = value
return attr
def find_gexf_attributes(self, attributes_element):
# Extract all the attributes and defaults
attrs = {}
defaults = {}
mode=attributes_element.get('mode')
for k in attributes_element.findall("{%s}attribute" % self.NS_GEXF):
attr_id = k.get("id")
title=k.get('title')
atype=k.get('type')
attrs[attr_id]={'title':title,'type':atype,'mode':mode}
# check for the "default" subelement of key element and add
default=k.find("{%s}default" % self.NS_GEXF)
if default is not None:
if atype=='boolean':
value=self.convert_bool[default.text]
else:
value=self.python_type[atype](default.text)
defaults[title]=value
return attrs,defaults
def relabel_gexf_graph(G):
"""Relabel graph using "label" node keyword for node label.
Parameters
----------
G : graph
A NetworkX graph read from GEXF data
Returns
-------
H : graph
A NetworkX graph with relabed nodes
Notes
-----
This function relabels the nodes in a NetworkX graph with the
"label" attribute. It also handles relabeling the specific GEXF
node attributes "parents", and "pid".
"""
# build mapping of node labels, do some error checking
try:
mapping=[(u,G.node[u]['label']) for u in G]
except KeyError:
raise nx.NetworkXError('Failed to relabel nodes: '
'missing node labels found. '
'Use relabel=False.')
x,y=zip(*mapping)
if len(set(y))!=len(G):
raise nx.NetworkXError('Failed to relabel nodes: '
'duplicate node labels found. '
'Use relabel=False.')
mapping=dict(mapping)
H=nx.relabel_nodes(G,mapping)
# relabel attributes
for n in G:
m=mapping[n]
H.node[m]['id']=n
if 'pid' in H.node[m]:
H.node[m]['pid']=mapping[G.node[n]['pid']]
if 'parents' in H.node[m]:
H.node[m]['parents']=[mapping[p] for p in G.node[n]['parents']]
return H
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import xml.etree.cElementTree
except:
raise SkipTest("xml.etree.cElementTree not available")
# fixture for nose tests
def teardown_module(module):
import os
try:
os.unlink('test.gexf')
except:
pass
|
gpl-2.0
| 3,307,386,771,986,776,000
| 34.705044
| 79
| 0.522121
| false
| 3.967643
| false
| false
| false
|
odoo-argentina/account
|
l10n_ar_account/models/account_invoice.py
|
1
|
12294
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api, _
from openerp.exceptions import UserError
import re
import logging
_logger = logging.getLogger(__name__)
class AccountInvoice(models.Model):
_inherit = "account.invoice"
currency_rate = fields.Float(
string='Currency Rate',
copy=False,
digits=(16, 4),
# TODO make it editable, we have to change move create method
readonly=True,
)
document_letter_id = fields.Many2one(
related='document_type_id.document_letter_id',
)
afip_responsible_type_id = fields.Many2one(
'afip.responsible.type',
string='AFIP Responsible Type',
readonly=True,
copy=False,
)
invoice_number = fields.Integer(
compute='_get_invoice_number',
string="Invoice Number",
)
point_of_sale_number = fields.Integer(
compute='_get_invoice_number',
string="Point Of Sale",
)
vat_base_amount = fields.Monetary(
compute="_get_argentina_amounts",
string='VAT Base Amount'
)
vat_exempt_base_amount = fields.Monetary(
compute="_get_argentina_amounts",
string='VAT Exempt Base Amount'
)
# TODO borrar, no los necesitariamos mas porque modificamos compute all
# para que cree estos impuestos
# base iva cero (tenemos que agregarlo porque odoo no crea las lineas para
# impuestos con valor cero)
# vat_zero_base_amount = fields.Monetary(
# compute="_get_argentina_amounts",
# string='VAT Zero Base Amount'
# )
# no gravado en iva (tenemos que agregarlo porque odoo no crea las lineas
# para impuestos con valor cero)
vat_untaxed_base_amount = fields.Monetary(
compute="_get_argentina_amounts",
string='VAT Untaxed Base Amount'
)
vat_amount = fields.Monetary(
compute="_get_argentina_amounts",
string='VAT Amount'
)
other_taxes_amount = fields.Monetary(
compute="_get_argentina_amounts",
string='Other Taxes Amount'
)
vat_tax_ids = fields.One2many(
compute="_get_argentina_amounts",
comodel_name='account.invoice.tax',
string='VAT Taxes'
)
not_vat_tax_ids = fields.One2many(
compute="_get_argentina_amounts",
comodel_name='account.invoice.tax',
string='Not VAT Taxes'
)
afip_incoterm_id = fields.Many2one(
'afip.incoterm',
'Incoterm',
readonly=True,
states={'draft': [('readonly', False)]}
)
point_of_sale_type = fields.Selection(
related='journal_id.point_of_sale_type',
readonly=True,
)
# estos campos los agregamos en este modulo pero en realidad los usa FE
# pero entendemos que podrian ser necesarios para otros tipos, por ahora
# solo lo vamos a hacer requerido si el punto de venta es del tipo
# electronico
afip_concept = fields.Selection(
compute='_get_concept',
# store=True,
selection=[('1', 'Producto / Exportación definitiva de bienes'),
('2', 'Servicios'),
('3', 'Productos y Servicios'),
('4', '4-Otros (exportación)'),
],
string="AFIP concept",
)
afip_service_start = fields.Date(
string='Service Start Date'
)
afip_service_end = fields.Date(
string='Service End Date'
)
@api.one
def _get_argentina_amounts(self):
"""
"""
# vat values
# we exclude exempt vats and untaxed (no gravados)
wihtout_tax_id = self.tax_line_ids.filtered(lambda r: not r.tax_id)
if wihtout_tax_id:
raise UserError(_(
"Some Invoice Tax Lines don't have a tax_id asociated, please "
"correct them or try to refresh invoice "))
vat_taxes = self.tax_line_ids.filtered(
lambda r: (
r.tax_id.tax_group_id.type == 'tax' and
r.tax_id.tax_group_id.tax == 'vat' and
r.tax_id.tax_group_id.afip_code not in [1, 2]))
vat_amount = sum(vat_taxes.mapped('amount'))
self.vat_tax_ids = vat_taxes
self.vat_amount = vat_amount
self.vat_base_amount = sum(vat_taxes.mapped('base_amount'))
# vat exempt values
# exempt taxes are the ones with code 2
vat_exempt_taxes = self.tax_line_ids.filtered(
lambda r: (
r.tax_id.tax_group_id.type == 'tax' and
r.tax_id.tax_group_id.tax == 'vat' and
r.tax_id.tax_group_id.afip_code == 2))
self.vat_exempt_base_amount = sum(
vat_exempt_taxes.mapped('base_amount'))
# vat_untaxed_base_amount values (no gravado)
# vat exempt taxes are the ones with code 1
vat_untaxed_taxes = self.tax_line_ids.filtered(
lambda r: (
r.tax_id.tax_group_id.type == 'tax' and
r.tax_id.tax_group_id.tax == 'vat' and
r.tax_id.tax_group_id.afip_code == 1))
self.vat_untaxed_base_amount = sum(
vat_untaxed_taxes.mapped('base_amount'))
# other taxes values
not_vat_taxes = self.tax_line_ids - vat_taxes
other_taxes_amount = sum(not_vat_taxes.mapped('amount'))
self.not_vat_tax_ids = not_vat_taxes
self.other_taxes_amount = other_taxes_amount
@api.one
@api.depends('document_number', 'number')
def _get_invoice_number(self):
""" Funcion que calcula numero de punto de venta y numero de factura
a partir del document number. Es utilizado principalmente por el modulo
de vat ledger citi
"""
# TODO mejorar estp y almacenar punto de venta y numero de factura por
# separado, de hecho con esto hacer mas facil la carga de los
# comprobantes de compra
str_number = self.document_number or self.number or False
if str_number and self.state not in [
'draft', 'proforma', 'proforma2', 'cancel']:
if self.document_type_id.code in [33, 99, 331, 332]:
point_of_sale = '0'
# leave only numbers and convert to integer
invoice_number = str_number
# despachos de importacion
elif self.document_type_id.code == 66:
point_of_sale = '0'
invoice_number = '0'
elif "-" in str_number:
splited_number = str_number.split('-')
invoice_number = splited_number.pop()
point_of_sale = splited_number.pop()
elif "-" not in str_number and len(str_number) == 12:
point_of_sale = str_number[:4]
invoice_number = str_number[-8:]
else:
raise UserError(_(
'Could not get invoice number and point of sale for '
'invoice id %i') % (self.id))
self.invoice_number = int(
re.sub("[^0-9]", "", invoice_number))
self.point_of_sale_number = int(
re.sub("[^0-9]", "", point_of_sale))
@api.one
@api.depends(
'invoice_line_ids',
'invoice_line_ids.product_id',
'invoice_line_ids.product_id.type',
'localization',
)
def _get_concept(self):
afip_concept = False
if self.point_of_sale_type in ['online', 'electronic']:
# exportaciones
invoice_lines = self.invoice_line_ids
product_types = set(
[x.product_id.type for x in invoice_lines if x.product_id])
consumible = set(['consu', 'product'])
service = set(['service'])
mixed = set(['consu', 'service', 'product'])
# default value "product"
afip_concept = '1'
if product_types.issubset(mixed):
afip_concept = '3'
if product_types.issubset(service):
afip_concept = '2'
if product_types.issubset(consumible):
afip_concept = '1'
if self.document_type_id.code in [19, 20, 21]:
# TODO verificar esto, como par expo no existe 3 y existe 4
# (otros), considermaos que un mixto seria el otros
if afip_concept == '3':
afip_concept = '4'
self.afip_concept = afip_concept
@api.multi
def get_localization_invoice_vals(self):
self.ensure_one()
if self.localization == 'argentina':
commercial_partner = self.partner_id.commercial_partner_id
currency_rate = self.currency_id.compute(
1., self.company_id.currency_id)
return {
'afip_responsible_type_id': (
commercial_partner.afip_responsible_type_id.id),
'currency_rate': currency_rate,
}
else:
return super(
AccountInvoice, self).get_localization_invoice_vals()
@api.multi
def _get_available_journal_document_types(self):
"""
This function search for available document types regarding:
* Journal
* Partner
* Company
* Documents configuration
If needed, we can make this funcion inheritable and customizable per
localization
"""
self.ensure_one()
if self.localization != 'argentina':
return super(
AccountInvoice, self)._get_available_journal_document_types()
invoice_type = self.type
journal_document_types = journal_document_type = self.env[
'account.journal.document.type']
if invoice_type in [
'out_invoice', 'in_invoice', 'out_refund', 'in_refund']:
if self.use_documents:
letters = self.journal_id.get_journal_letter(
counterpart_partner=self.commercial_partner_id)
domain = [
('journal_id', '=', self.journal_id.id),
'|',
('document_type_id.document_letter_id', 'in', letters.ids),
('document_type_id.document_letter_id', '=', False),
]
# If internal_type in context we try to serch specific document
# for eg used on debit notes
internal_type = self._context.get('internal_type', False)
if internal_type:
journal_document_type = journal_document_type.search(
domain + [
('document_type_id.internal_type',
'=', internal_type)], limit=1)
# For domain, we search all documents
journal_document_types = journal_document_types.search(domain)
# If not specific document type found, we choose another one
if not journal_document_type and journal_document_types:
journal_document_type = journal_document_types[0]
if invoice_type == 'in_invoice':
other_document_types = (
self.commercial_partner_id.other_document_type_ids)
domain = [
('journal_id', '=', self.journal_id.id),
('document_type_id',
'in', other_document_types.ids),
]
other_journal_document_types = self.env[
'account.journal.document.type'].search(domain)
journal_document_types += other_journal_document_types
# if we have some document sepecific for the partner, we choose it
if other_journal_document_types:
journal_document_type = other_journal_document_types[0]
return {
'available_journal_document_types': journal_document_types,
'journal_document_type': journal_document_type,
}
|
agpl-3.0
| 2,440,302,413,177,547,000
| 37.898734
| 79
| 0.549219
| false
| 3.860553
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/servicebus/azure-servicebus/samples/async_samples/schedule_messages_and_cancellation_async.py
|
1
|
2329
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
Example to show scheduling messages to and cancelling messages from a Service Bus Queue asynchronously.
"""
# pylint: disable=C0111
import os
import asyncio
import datetime
from azure.servicebus.aio import ServiceBusClient
from azure.servicebus import ServiceBusMessage
CONNECTION_STR = os.environ['SERVICE_BUS_CONNECTION_STR']
QUEUE_NAME = os.environ["SERVICE_BUS_QUEUE_NAME"]
async def schedule_single_message(sender):
message = ServiceBusMessage("ServiceBusMessage to be scheduled")
scheduled_time_utc = datetime.datetime.utcnow() + datetime.timedelta(seconds=30)
sequence_number = await sender.schedule_messages(message, scheduled_time_utc)
return sequence_number
async def schedule_multiple_messages(sender):
messages_to_schedule = []
for _ in range(10):
messages_to_schedule.append(ServiceBusMessage("Message to be scheduled"))
scheduled_time_utc = datetime.datetime.utcnow() + datetime.timedelta(seconds=30)
sequence_numbers = await sender.schedule_messages(messages_to_schedule, scheduled_time_utc)
return sequence_numbers
async def main():
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR, logging_enable=True)
async with servicebus_client:
sender = servicebus_client.get_queue_sender(queue_name=QUEUE_NAME)
async with sender:
sequence_number = await schedule_single_message(sender)
print("Single message is scheduled and sequence number is {}".format(sequence_number))
sequence_numbers = await schedule_multiple_messages(sender)
print("Multiple messages are scheduled and sequence numbers are {}".format(sequence_numbers))
await sender.cancel_scheduled_messages(sequence_number)
await sender.cancel_scheduled_messages(sequence_numbers)
print("All scheduled messages are cancelled.")
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
mit
| -1,515,303,370,977,743,400
| 40.589286
| 109
| 0.682267
| false
| 4.496139
| false
| false
| false
|
aspilotros/YouTube_views_forecasting
|
CountingLines3-noplot.py
|
1
|
3206
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 15 17:54:35 2017
@author: ale
"""
#sys.stdout.write(frmt_date)
# Counting the number of lines in a file each 5 min
import time
import datetime as dt
import matplotlib.pyplot as plt
import plotly as py
import plotly.graph_objs as go
import numpy as np
root1 = input('insert the complete path that hosts the output folder')
root2 = input('output folder name')
name = root1 + '/' + root2 + '/key.done'
name1 = root1 + '/' + root2 + 'key.disabled'
name2 = root1 + '/' + root2 + 'key.invalidrequest'
name3 = root1 + '/' + root2 + 'key.nostatyet'
name4 = root1 + '/' + root2 + 'key.notfound'
name5 = root1 + '/' + root2 + 'key.private'
name6 = root1 + '/' + root2 + 'key.quotalimit'
j=0
counts=[]
counts1=[]
counts2=[]
counts3=[]
counts4=[]
counts5=[]
counts6=[]
while True:
handle = open(name, 'r')
handle1 = open(name1, 'r')
handle2 = open(name2, 'r')
handle3 = open(name3, 'r')
handle4 = open(name4, 'r')
handle5 = open(name5, 'r')
handle6 = open(name6, 'r')
counts.append(0)
counts1.append(0)
counts2.append(0)
counts3.append(0)
counts4.append(0)
counts5.append(0)
counts6.append(0)
for line in handle:
counts[j]=counts[j]+1
for line1 in handle1:
counts1[j]=counts1[j]+1
for line2 in handle2:
counts2[j]=counts2[j]+1
for line3 in handle3:
counts3[j]=counts3[j]+1
for line4 in handle4:
counts4[j]=counts4[j]+1
for line5 in handle5:
counts5[j]=counts5[j]+1
for line6 in handle6:
counts6[j]=counts6[j]+1
total=counts[j]+counts1[j]+counts2[j]+counts3[j]+counts4[j]+counts5[j]+counts6[j]
epoch_now = time.time()
frmt_date = dt.datetime.utcfromtimestamp(epoch_now)
frmt_date=frmt_date+dt.timedelta(hours=2)
frmt_date = frmt_date.strftime("%Y/%m/%d %H:%M")
#plt.plot(epoch_now,counts, 'r--', counts1, 'b--', counts2, 'g--', counts3, 'rs', counts4, 'bs', counts5, 'gs', counts6, 'r^')
#plt.show()
# Create traces
print (
'line in file = ',counts[j],'time = ' ,frmt_date, ' out of total =', total,'/n',
'done ',counts[j],' disabled ',counts1[j],' invalidreq',counts2[j],' notstatyet ',counts3[j],' notfound ',counts4[j],' private ',counts5[j],' quotalimit ',counts6[j])
#plotting each 12 cycles i.e. each 12*300sec=each hour
'''
if j % 12 == 11:
trace0 = go.Scatter(
x = np.arange(j),
y = counts,
mode = 'markers',
name = 'done'
)
trace1 = go.Scatter(
x = np.arange(j),
y = counts1+counts2+counts3+counts4+counts5,
mode = 'markers',
name = 'key not available'
)
trace2 = go.Scatter(
x = np.arange(j),
y = counts6,
mode = 'lines',
name = 'quotalimit'
)
data = [trace0, trace1, trace2]
py.offline.plot({
"data": data,
"layout": go.Layout(title="Crawler Stats")
})
'''
j=j+1
time.sleep(300)
|
mit
| -4,667,322,212,081,878,000
| 22.925373
| 178
| 0.554273
| false
| 3.044634
| false
| false
| false
|
rdireen/nearside
|
nearside/cylindrical/structures.py
|
1
|
3017
|
# Copyright (C) 2015 Randy Direen <nearside@direentech.com>
#
# This file is part of NearSide.
#
# NearSide is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NearSide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NearSide. If not, see <http://www.gnu.org/licenses/>
"""***************************************************************************
Holds all the structures for cylindrical measurements
Randy Direen
3/06/2015
A description
***************************************************************************"""
#--------------------------Place in each *.py file----------------------------
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from six.moves import range #use range instead of xrange
#-----------------------------------------------------------------------------
#---------------------------------------------------------------------Built-ins
import json
from os.path import dirname
#--------------------------------------------------------------------3rd Party
import numpy as np
import spherepy as sp
#------------------------------------------------------------------------Custom
import nearside.probe as pb
#==============================================================================
# Global Declarations
#==============================================================================
err_msg = {}
#=============================================================================
# Objects
#=============================================================================
#-=-=-=-=-=-=-=-=-=-=-= COEFFICIENT REPRESENTATIONS =-=-=-=-=-=-=-=-=-=-=-=-=-
# The coefficients represent the device or environment that has been measured.
# These coefficients can be transformed back to field values.
class CylindricalScalarCoeffs(object):
pass
class CylindricalVectorCoeffs(object):
pass
#-=-=-=-=-=-=-=-=-=-=-= MEASURED ON UNIFORM GRID =-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# These objects use the algorithms that require data to be equally spaced in
# the theta direction and the phi direction.
class CylindricalMeasurementScalarUniform(object):
pass
class CylindricalMeasurementTransverseUniform(object):
pass
#-=-=-=-=-=-=-=-=-=-=-= MEASURED ON NON UNIFORM GRID =-=-=-=-=-=-=-=-=-=-=-=-=
# These objects use the algorithms that DO NOT require data to be equally
# spaced in the theta direction and the phi direction.
class CylindricalMeasurementScalarNonUniform(object):
pass
class CylindricalMeasurementTransverseNonUniform(object):
pass
|
gpl-3.0
| -3,671,326,859,326,461,000
| 33.295455
| 79
| 0.526019
| false
| 4.87399
| false
| false
| false
|
dataversioncontrol/dvc
|
dvc/state.py
|
1
|
16303
|
"""Manages state database used for checksum caching."""
from __future__ import unicode_literals
import os
import sqlite3
import dvc.logger as logger
from dvc.config import Config
from dvc.utils import file_md5, remove, current_timestamp
from dvc.exceptions import DvcException
from dvc.utils.fs import get_mtime_and_size, get_inode
class StateVersionTooNewError(DvcException):
"""Thrown when dvc version is older than the state database version."""
def __init__(self, dvc_version, expected, actual):
super(StateVersionTooNewError, self).__init__(
"you are using an old version '{dvc_version}' of dvc that is "
"using state file version '{expected}' which is not compatible "
"with the state file version '{actual}' that is used in this "
"repo. Please upgrade right now!".format(
dvc_version=dvc_version, expected=expected, actual=actual
)
)
def _file_metadata_changed(actual_mtime, mtime, actual_size, size):
return actual_mtime != mtime or actual_size != size
class State(object): # pylint: disable=too-many-instance-attributes
"""Class for the state database.
Args:
repo (dvc.repo.Repo): repo instance that this state belongs to.
config (configobj.ConfigObj): config for the state.
Raises:
StateVersionTooNewError: thrown when dvc version is older than the
state database version.
"""
VERSION = 3
STATE_FILE = "state"
STATE_TABLE = "state"
STATE_TABLE_LAYOUT = (
"inode INTEGER PRIMARY KEY, "
"mtime TEXT NOT NULL, "
"size TEXT NOT NULL, "
"md5 TEXT NOT NULL, "
"timestamp TEXT NOT NULL"
)
STATE_INFO_TABLE = "state_info"
STATE_INFO_TABLE_LAYOUT = "count INTEGER"
STATE_INFO_ROW = 1
LINK_STATE_TABLE = "link_state"
LINK_STATE_TABLE_LAYOUT = (
"path TEXT PRIMARY KEY, "
"inode INTEGER NOT NULL, "
"mtime TEXT NOT NULL"
)
STATE_ROW_LIMIT = 100000000
STATE_ROW_CLEANUP_QUOTA = 50
MAX_INT = 2 ** 63 - 1
MAX_UINT = 2 ** 64 - 2
def __init__(self, repo, config):
self.repo = repo
self.dvc_dir = repo.dvc_dir
self.root_dir = repo.root_dir
self.row_limit = 100
self.row_cleanup_quota = 50
state_config = config.get(Config.SECTION_STATE, {})
self.row_limit = state_config.get(
Config.SECTION_STATE_ROW_LIMIT, self.STATE_ROW_LIMIT
)
self.row_cleanup_quota = state_config.get(
Config.SECTION_STATE_ROW_CLEANUP_QUOTA,
self.STATE_ROW_CLEANUP_QUOTA,
)
if not self.dvc_dir:
self.state_file = None
return
self.state_file = os.path.join(self.dvc_dir, self.STATE_FILE)
# https://www.sqlite.org/tempfiles.html
self.temp_files = [
self.state_file + "-journal",
self.state_file + "-wal",
]
self.database = None
self.cursor = None
self.inserts = 0
def __enter__(self):
self.load()
def __exit__(self, typ, value, tbck):
self.dump()
def _collect(self, path):
if os.path.isdir(path):
return self.repo.cache.local.collect_dir_cache(path)
return (file_md5(path)[0], None)
def changed(self, path, md5):
"""Check if file/directory has the expected md5.
Args:
path (str): path to the file/directory to check.
md5 (str): expected md5.
Returns:
bool: True if path has the expected md5, False otherwise.
"""
actual = self.update(path)
msg = "File '{}', md5 '{}', actual '{}'"
logger.debug(msg.format(path, md5, actual))
if not md5 or not actual:
return True
return actual.split(".")[0] != md5.split(".")[0]
def _execute(self, cmd):
logger.debug(cmd)
return self.cursor.execute(cmd)
def _fetchall(self):
ret = self.cursor.fetchall()
logger.debug("fetched: {}".format(ret))
return ret
def _to_sqlite(self, num):
assert num >= 0
assert num < self.MAX_UINT
# NOTE: sqlite stores unit as signed ints, so maximum uint is 2^63-1
# see http://jakegoulding.com/blog/2011/02/06/sqlite-64-bit-integers/
if num > self.MAX_INT:
ret = -(num - self.MAX_INT)
else:
ret = num
assert self._from_sqlite(ret) == num
return ret
def _from_sqlite(self, num):
assert abs(num) <= self.MAX_INT
if num < 0:
return abs(num) + self.MAX_INT
assert num < self.MAX_UINT
assert num >= 0
return num
def _prepare_db(self, empty=False):
from dvc import VERSION
if not empty:
cmd = "PRAGMA user_version;"
self._execute(cmd)
ret = self._fetchall()
assert len(ret) == 1
assert len(ret[0]) == 1
assert isinstance(ret[0][0], int)
version = ret[0][0]
if version > self.VERSION:
raise StateVersionTooNewError(VERSION, self.VERSION, version)
elif version < self.VERSION:
msg = (
"State file version '{}' is too old. "
"Reformatting to the current version '{}'."
)
logger.warning(msg.format(version, self.VERSION))
cmd = "DROP TABLE IF EXISTS {};"
self._execute(cmd.format(self.STATE_TABLE))
self._execute(cmd.format(self.STATE_INFO_TABLE))
self._execute(cmd.format(self.LINK_STATE_TABLE))
# Check that the state file is indeed a database
cmd = "CREATE TABLE IF NOT EXISTS {} ({})"
self._execute(cmd.format(self.STATE_TABLE, self.STATE_TABLE_LAYOUT))
self._execute(
cmd.format(self.STATE_INFO_TABLE, self.STATE_INFO_TABLE_LAYOUT)
)
self._execute(
cmd.format(self.LINK_STATE_TABLE, self.LINK_STATE_TABLE_LAYOUT)
)
cmd = (
"INSERT OR IGNORE INTO {} (count) SELECT 0 "
"WHERE NOT EXISTS (SELECT * FROM {})"
)
self._execute(cmd.format(self.STATE_INFO_TABLE, self.STATE_INFO_TABLE))
cmd = "PRAGMA user_version = {};"
self._execute(cmd.format(self.VERSION))
def load(self):
"""Loads state database."""
retries = 1
while True:
assert self.database is None
assert self.cursor is None
assert self.inserts == 0
empty = not os.path.exists(self.state_file)
self.database = sqlite3.connect(self.state_file)
self.cursor = self.database.cursor()
# Try loading once to check that the file is indeed a database
# and reformat it if it is not.
try:
self._prepare_db(empty=empty)
return
except sqlite3.DatabaseError:
self.cursor.close()
self.database.close()
self.database = None
self.cursor = None
self.inserts = 0
if retries > 0:
os.unlink(self.state_file)
retries -= 1
else:
raise
def _vacuum(self):
# NOTE: see https://bugs.python.org/issue28518
self.database.isolation_level = None
self._execute("VACUUM")
self.database.isolation_level = ""
def dump(self):
"""Saves state database."""
assert self.database is not None
cmd = "SELECT count from {} WHERE rowid={}"
self._execute(cmd.format(self.STATE_INFO_TABLE, self.STATE_INFO_ROW))
ret = self._fetchall()
assert len(ret) == 1
assert len(ret[0]) == 1
count = self._from_sqlite(ret[0][0]) + self.inserts
if count > self.row_limit:
msg = "cleaning up state, this might take a while."
logger.warning(msg)
delete = count - self.row_limit
delete += int(self.row_limit * (self.row_cleanup_quota / 100.0))
cmd = (
"DELETE FROM {} WHERE timestamp IN ("
"SELECT timestamp FROM {} ORDER BY timestamp ASC LIMIT {});"
)
self._execute(
cmd.format(self.STATE_TABLE, self.STATE_TABLE, delete)
)
self._vacuum()
cmd = "SELECT COUNT(*) FROM {}"
self._execute(cmd.format(self.STATE_TABLE))
ret = self._fetchall()
assert len(ret) == 1
assert len(ret[0]) == 1
count = ret[0][0]
cmd = "UPDATE {} SET count = {} WHERE rowid = {}"
self._execute(
cmd.format(
self.STATE_INFO_TABLE,
self._to_sqlite(count),
self.STATE_INFO_ROW,
)
)
self._update_cache_directory_state()
self.database.commit()
self.cursor.close()
self.database.close()
self.database = None
self.cursor = None
self.inserts = 0
def _do_update(self, path, known_checksum=None):
"""
Make sure the stored info for the given path is up to date.
"""
if not os.path.exists(path):
return None, None
actual_mtime, actual_size = get_mtime_and_size(path)
actual_inode = get_inode(path)
existing_record = self.get_state_record_for_inode(actual_inode)
if existing_record:
md5, info = self._update_existing_state_record(
path,
actual_inode,
actual_mtime,
actual_size,
existing_record,
known_checksum,
)
else:
md5, info = self._insert_new_state_record(
path, actual_inode, actual_mtime, actual_size, known_checksum
)
return md5, info
def _update_existing_state_record(
self,
path,
actual_inode,
actual_mtime,
actual_size,
existing_record,
known_checksum=None,
):
mtime, size, md5, _ = existing_record
if _file_metadata_changed(actual_mtime, mtime, actual_size, size):
md5, info = self._update_state_for_path_changed(
path, actual_inode, actual_mtime, actual_size, known_checksum
)
else:
info = None
self._update_state_record_timestamp_for_inode(actual_inode)
return md5, info
def _update_state_record_timestamp_for_inode(self, actual_inode):
cmd = 'UPDATE {} SET timestamp = "{}" WHERE inode = {}'
self._execute(
cmd.format(
self.STATE_TABLE,
current_timestamp(),
self._to_sqlite(actual_inode),
)
)
def _update_state_for_path_changed(
self,
path,
actual_inode,
actual_mtime,
actual_size,
known_checksum=None,
):
if known_checksum:
md5, info = known_checksum, None
else:
md5, info = self._collect(path)
cmd = (
"UPDATE {} SET "
'mtime = "{}", size = "{}", '
'md5 = "{}", timestamp = "{}" '
"WHERE inode = {}"
)
self._execute(
cmd.format(
self.STATE_TABLE,
actual_mtime,
actual_size,
md5,
current_timestamp(),
self._to_sqlite(actual_inode),
)
)
return md5, info
def _insert_new_state_record(
self, path, actual_inode, actual_mtime, actual_size, known_checksum
):
if known_checksum:
md5, info = known_checksum, None
else:
md5, info = self._collect(path)
cmd = (
"INSERT INTO {}(inode, mtime, size, md5, timestamp) "
'VALUES ({}, "{}", "{}", "{}", "{}")'
)
self._execute(
cmd.format(
self.STATE_TABLE,
self._to_sqlite(actual_inode),
actual_mtime,
actual_size,
md5,
current_timestamp(),
)
)
self.inserts += 1
return md5, info
def get_state_record_for_inode(self, inode):
cmd = "SELECT mtime, size, md5, timestamp from {} " "WHERE inode={}"
cmd = cmd.format(self.STATE_TABLE, self._to_sqlite(inode))
self._execute(cmd)
results = self._fetchall()
if results:
# uniquness constrain on inode
assert len(results) == 1
return results[0]
return None
def update(self, path, known_checksum=None):
"""Gets the checksum for the specified path. Checksum will be
retrieved from the state database if available, otherwise it will be
computed and cached in the state database for the further use.
Args:
path (str): path to get the checksum for.
Returns:
str: checksum for the specified path.
"""
return self._do_update(path, known_checksum)[0]
def update_info(self, path):
"""Gets the checksum and the directory info (if applicable) for the
specified path.
Args:
path (str): path to get the checksum and the directory info for.
Returns:
tuple: checksum for the specified path along with a directory info
(list of {relative_path: checksum} entries for each file in the
directory) if applicable, otherwise None.
"""
md5, info = self._do_update(path)
if not info:
info = self.repo.cache.local.load_dir_cache(md5)
return (md5, info)
def update_link(self, path):
"""Adds the specified path to the list of links created by dvc. This
list is later used on `dvc checkout` to cleanup old links.
Args:
path (str): path to add to the list of links.
"""
if not os.path.exists(path):
return
mtime, _ = get_mtime_and_size(path)
inode = get_inode(path)
relpath = os.path.relpath(path, self.root_dir)
cmd = (
"REPLACE INTO {}(path, inode, mtime) "
'VALUES ("{}", {}, "{}")'.format(
self.LINK_STATE_TABLE, relpath, self._to_sqlite(inode), mtime
)
)
self._execute(cmd)
def remove_unused_links(self, used):
"""Removes all saved links except the ones that are used.
Args:
used (list): list of used links that should not be removed.
"""
unused = []
self._execute("SELECT * FROM {}".format(self.LINK_STATE_TABLE))
for row in self.cursor:
relpath, inode, mtime = row
inode = self._from_sqlite(inode)
path = os.path.join(self.root_dir, relpath)
if path in used:
continue
if not os.path.exists(path):
continue
actual_inode = get_inode(path)
actual_mtime, _ = get_mtime_and_size(path)
if inode == actual_inode and mtime == actual_mtime:
logger.debug("Removing '{}' as unused link.".format(path))
remove(path)
unused.append(relpath)
for relpath in unused:
cmd = 'DELETE FROM {} WHERE path = "{}"'
self._execute(cmd.format(self.LINK_STATE_TABLE, relpath))
def _update_cache_directory_state(self):
cache_path = self.repo.cache.local.cache_dir
mtime, size = get_mtime_and_size(cache_path)
inode = get_inode(cache_path)
cmd = (
"INSERT OR REPLACE INTO {}(inode, size, mtime, timestamp, md5) "
'VALUES ({}, "{}", "{}", "{}", "")'.format(
self.STATE_TABLE,
self._to_sqlite(inode),
size,
mtime,
current_timestamp(),
)
)
self._execute(cmd)
|
apache-2.0
| -3,852,605,697,348,844,000
| 30.412331
| 79
| 0.532601
| false
| 4.094174
| true
| false
| false
|
sony/nnabla
|
python/src/nnabla/backward_function/image_augmentation.py
|
1
|
1336
|
# Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla.functions as F
def image_augmentation_backward(inputs, shape=None, pad=(0, 0), min_scale=1.0, max_scale=1.0, angle=0.0, aspect_ratio=1.0, distortion=0.0, flip_lr=False, flip_ud=False, brightness=0.0, brightness_each=False, contrast=1.0, contrast_center=0.0, contrast_each=False, noise=0.0, seed=-1):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
dy = inputs[0]
x0 = inputs[1]
raise NotImplementedError(
"image_augmentation_backward is not implemented.")
|
apache-2.0
| 1,924,461,627,481,144,800
| 42.096774
| 284
| 0.729042
| false
| 3.731844
| false
| false
| false
|
RowsberryPi/rowsberrypi
|
pyrow/statshow.py
|
1
|
2273
|
#!/usr/bin/env python
#Copyright (c) 2011, Sam Gambrell
#Licensed under the Simplified BSD License.
#This is an example file to show how to make use of pyrow
#Have the rowing machine on and plugged into the computer before starting the program
#The program will display any changes to the machine status, stroke state, or workout state
#NOTE: This code has not been thoroughly tested and may not function as advertised.
#Please report and findings to the author so that they may be addressed in a stable release.
from . import pyrow, find
import time
import logging
if __name__ == '__main__':
#Connecting to erg
ergs = list(find())
if len(ergs) == 0:
exit("No ergs found.")
erg = pyrow(ergs[0])
logging.info("Connected to erg.")
#Create a dictionary of the different status states
state = ['Error', 'Ready', 'Idle', 'Have ID', 'N/A', 'In Use',
'Pause', 'Finished', 'Manual', 'Offline']
stroke = ['Wait for min speed', 'Wait for acceleration', 'Drive', 'Dwelling', 'Recovery']
workout = ['Waiting begin', 'Workout row', 'Countdown pause', 'Interval rest',
'Work time inverval', 'Work distance interval', 'Rest end time', 'Rest end distance',
'Time end rest', 'Distance end rest', 'Workout end', 'Workout terminate',
'Workout logged', 'Workout rearm']
command = ['CSAFE_GETSTATUS_CMD', 'CSAFE_PM_GET_STROKESTATE', 'CSAFE_PM_GET_WORKOUTSTATE']
#prime status number
cstate = -1
cstroke = -1
cworkout = -1
erg.set_workout(distance=2000, split=100, pace=120)
#Inf loop
while 1:
results = erg.send(command)
if cstate != (results['CSAFE_GETSTATUS_CMD'][0] & 0xF):
cstate = results['CSAFE_GETSTATUS_CMD'][0] & 0xF
logging.debug("State %s: %s", str(cstate), state[cstate])
if cstroke != results['CSAFE_PM_GET_STROKESTATE'][0]:
cstroke = results['CSAFE_PM_GET_STROKESTATE'][0]
logging.debug("Stroke %s: %s", str(cstroke), stroke[cstroke])
if cworkout != results['CSAFE_PM_GET_WORKOUTSTATE'][0]:
cworkout = results['CSAFE_PM_GET_WORKOUTSTATE'][0]
logging.debug("Workout %s: %s", str(cworkout), workout[cworkout])
time.sleep(1)
|
bsd-2-clause
| -7,115,628,451,162,838,000
| 38.877193
| 100
| 0.636604
| false
| 3.342647
| false
| false
| false
|
tensorflow/datasets
|
tensorflow_datasets/text/lm1b.py
|
1
|
3711
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Language Model 1 Billion dataset."""
import os
from absl import logging
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """\
@article{DBLP:journals/corr/ChelbaMSGBK13,
author = {Ciprian Chelba and
Tomas Mikolov and
Mike Schuster and
Qi Ge and
Thorsten Brants and
Phillipp Koehn},
title = {One Billion Word Benchmark for Measuring Progress in Statistical Language
Modeling},
journal = {CoRR},
volume = {abs/1312.3005},
year = {2013},
url = {http://arxiv.org/abs/1312.3005},
archivePrefix = {arXiv},
eprint = {1312.3005},
timestamp = {Mon, 13 Aug 2018 16:46:16 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/ChelbaMSGBK13},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DESCRIPTION = """\
A benchmark corpus to be used for measuring progress in statistical language \
modeling. This has almost one billion words in the training data.
"""
_DOWNLOAD_URL = ("http://www.statmt.org/lm-benchmark/"
"1-billion-word-language-modeling-benchmark-r13output.tar.gz")
_TOP_LEVEL_DIR = "1-billion-word-language-modeling-benchmark-r13output"
_TRAIN_FILE_FORMAT = os.path.join(_TOP_LEVEL_DIR,
"training-monolingual.tokenized.shuffled",
"news.en-*")
_HELDOUT_FILE_FORMAT = os.path.join(_TOP_LEVEL_DIR,
"heldout-monolingual.tokenized.shuffled",
"news.en.heldout-*")
def _train_data_filenames(tmp_dir):
return tf.io.gfile.glob(os.path.join(tmp_dir, _TRAIN_FILE_FORMAT))
def _test_data_filenames(tmp_dir):
return tf.io.gfile.glob(os.path.join(tmp_dir, _HELDOUT_FILE_FORMAT))
class Lm1b(tfds.core.GeneratorBasedBuilder):
"""1 Billion Word Language Model Benchmark dataset."""
VERSION = tfds.core.Version("1.1.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"text": tfds.features.Text(),
}),
supervised_keys=("text", "text"),
homepage="http://www.statmt.org/lm-benchmark/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
lm1b_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
train_files = _train_data_filenames(lm1b_path)
test_files = _test_data_filenames(lm1b_path)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN, gen_kwargs={"files": train_files}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST, gen_kwargs={"files": test_files}),
]
def _generate_examples(self, files):
for filepath in files:
logging.info("generating examples from = %s", filepath)
with tf.io.gfile.GFile(filepath) as f:
for idx, line in enumerate(f):
yield "%s_%d" % (os.path.basename(filepath), idx), {
"text": line.strip(),
}
|
apache-2.0
| 4,412,130,310,659,180,000
| 34.009434
| 88
| 0.642684
| false
| 3.452093
| false
| false
| false
|
sadig/DC2
|
components/dc2-admincenter/dc2/admincenter/lib/auth/kerberos.py
|
1
|
2306
|
# -*- coding: utf-8 -*-
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <sh@sourcecode.de>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import sys
import os
try:
import web
except ImportError as e:
print(e)
print('you did not install web.py')
print(e)
sys.exit(1)
try:
import krbV
except ImportError as e:
print(e)
print('you don\'t have python-krbV installed')
print(e)
sys.exit(1)
try:
from dc2.lib.auth.kerberos.authentication import run
from dc2.lib.auth.kerberos.authentication import krb5_format_principal_name
from dc2.lib.auth.kerberos.authentication import get_ccache_name
except ImportError as e:
print(e)
print("You didn't install dc2.lib")
print(e)
sys.exit(1)
from exceptions import KerberosAuthError
ENCODING = 'UTF-8'
def do_kinit(username=None, password=None):
if username is None or password is None:
raise ValueError('Username and Password can\'t be None')
if username == '' or password == '':
raise ValueError('Username and Password can\'t be empty strings')
realm = krbV.default_context().default_realm.decode(ENCODING)
principal = krb5_format_principal_name(username, realm)
ccache_name = get_ccache_name()
(stdout, stderr, returncode) = run(
['/usr/bin/kinit', principal],
env={'KRB5CCNAME': ccache_name},
stdin=password, raiseonerr=False)
os.environ['KRB5CCNAME'] = ccache_name
web.ctx.session.krb5ccname = ccache_name
if returncode != 0:
raise KerberosAuthError(principal=principal, message=unicode(stderr))
|
gpl-2.0
| -8,005,599,435,126,051,000
| 32.405797
| 79
| 0.708894
| false
| 3.595944
| false
| false
| false
|
tdhooper/starstoloves
|
starstoloves/lib/search/tests/test_result.py
|
1
|
2284
|
import pytest
from starstoloves.lib.track.lastfm_track import LastfmTrack
from ..result import LastfmResultParser
pytestmark = pytest.mark.django_db
@pytest.fixture()
def result_parser(request):
return LastfmResultParser()
class TestResultParser():
many_results = {
'trackmatches': {
'track': [
{
'name': 'trackA',
'artist': 'artistA',
'url': 'urlA',
'listeners': '222',
},{
'name': 'trackB',
'artist': 'artistB',
'url': 'urlB',
'listeners': '888',
},
]
}
}
single_result = {
'trackmatches': {
'track': {
'name': 'trackA',
'artist': 'artistA',
'url': 'urlA',
'listeners': '222',
}
}
}
no_results = {
'trackmatches': "\n"
}
def test_parse_returns_lastfm_tracks(self, result_parser):
tracks = result_parser.parse(self.single_result)
assert isinstance(tracks[0], LastfmTrack)
def test_parse_extracts_track_details(self, result_parser):
tracks = result_parser.parse(self.many_results)
assert [track.track_name for track in tracks] == ['trackA', 'trackB']
assert [track.artist_name for track in tracks] == ['artistA', 'artistB']
assert [track.url for track in tracks] == ['urlA', 'urlB']
assert [track.listeners for track in tracks] == [222, 888]
def test_parse_extracts_track_details_when_there_is_only_one(self, result_parser):
tracks = result_parser.parse(self.single_result)
assert [track.track_name for track in tracks] == ['trackA']
assert [track.artist_name for track in tracks] == ['artistA']
assert [track.url for track in tracks] == ['urlA']
assert [track.listeners for track in tracks] == [222]
def test_parse_returns_none_when_there_are_no_tracks(self, result_parser):
assert result_parser.parse(self.no_results) is None
def test_parse_returns_none_when_given_an_error(self, result_parser):
assert result_parser.parse(TypeError) is None
|
gpl-2.0
| 778,271,204,807,938,200
| 28.662338
| 86
| 0.549037
| false
| 3.904274
| true
| false
| false
|
AragurDEV/yowsup
|
yowsup/layers/protocol_profiles/protocolentities/iq_picture_get_result.py
|
1
|
1785
|
from .iq_picture import PictureIqProtocolEntity
from yowsup.structs import ProtocolTreeNode
class ResultGetPictureIqProtocolEntity(PictureIqProtocolEntity):
'''
<iq type="result" from="{{jid}}" id="{{id}}">
<picture type="image | preview" id="{{another_id}}">
{{Binary bytes of the picture.}}
</picture>
</iq>
'''
def __init__(self, jid, pictureData, pictureId, preview = True, _id = None):
super(ResultGetPictureIqProtocolEntity, self).__init__(jid, _id, "result")
self.setResultPictureProps(pictureData, pictureId, preview)
def setResultPictureProps(self, pictureData, pictureId, preview = True):
self.preview = preview
self.pictureData = pictureData
self.pictureId = pictureId
def isPreview(self):
return self.preview
def getPictureData(self):
return self.pictureData.encode('latin-1')
def getPictureId(self):
return self.pictureId
def writeToFile(self, path):
with open(path, "wb") as outFile:
outFile.write(self.getPictureData())
def toProtocolTreeNode(self):
node = super(ResultGetPictureIqProtocolEntity, self).toProtocolTreeNode()
pictureNode = ProtocolTreeNode({"type": "preview" if self.isPreview() else "image" }, data = self.getPictureData())
node.addChild(pictureNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = PictureIqProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = ResultGetPictureIqProtocolEntity
pictureNode = node.getChild("picture")
entity.setResultPictureProps(pictureNode.getData(), pictureNode.getAttributeValue("id"), pictureNode.getAttributeValue("type") == "preview")
return entity
|
gpl-3.0
| -5,251,179,448,151,242,000
| 38.666667
| 148
| 0.677871
| false
| 4.170561
| false
| false
| false
|
Instanssi/Instanssi.org
|
Instanssi/kompomaatti/admin.py
|
1
|
1416
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from imagekit.admin import AdminThumbnail
from Instanssi.kompomaatti.models import Compo, Entry, Event, Vote, VoteCodeRequest, Profile, Competition, \
CompetitionParticipation, TicketVoteCode, VoteGroup, EntryCollection
class TicketVoteCodeAdmin(admin.ModelAdmin):
list_display = [
'associated_to',
'event',
'ticket',
'time',
]
class VoteCodeRequestAdmin(admin.ModelAdmin):
list_display = [
'user',
'event',
'status',
'text',
]
class EntryAdmin(admin.ModelAdmin):
list_display = [
'name',
'compo',
'user',
'creator',
'entryfile',
'disqualified',
'admin_thumbnail',
]
admin_thumbnail = AdminThumbnail(image_field='imagefile_thumbnail')
class EntryCollectionAdmin(admin.ModelAdmin):
list_display = [
'compo',
'file',
'updated_at',
]
admin.site.register(Compo)
admin.site.register(Entry, EntryAdmin)
admin.site.register(Event)
admin.site.register(Vote)
admin.site.register(VoteGroup)
admin.site.register(TicketVoteCode, TicketVoteCodeAdmin)
admin.site.register(VoteCodeRequest, VoteCodeRequestAdmin)
admin.site.register(EntryCollection, EntryCollectionAdmin)
admin.site.register(Profile)
admin.site.register(Competition)
admin.site.register(CompetitionParticipation)
|
mit
| -8,645,906,533,803,368,000
| 23.413793
| 108
| 0.680085
| false
| 3.575758
| false
| false
| false
|
Workday/OpenFrame
|
tools/perf/benchmarks/start_with_url2.py
|
1
|
1913
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from benchmarks import startup2
import page_sets
from telemetry import benchmark
# TODO(gabadie): Replaces start_with_url.* by start_with_url2.* after confirming
# that both benchmarks produce the same results.
# Disable accessing protected member for startup2._StartupPerfBenchmark. It
# needs to be protected to not be listed in the list of benchmarks to run, even
# though its purpose is only to factorise common code between startup
# benchmarks.
# pylint: disable=protected-access
@benchmark.Enabled('has tabs')
@benchmark.Enabled('android')
@benchmark.Disabled('chromeos', 'linux', 'mac', 'win')
class StartWithUrlColdTBM(startup2._StartupPerfBenchmark):
"""Measures time to start Chrome cold with startup URLs."""
page_set = page_sets.StartupPagesPageSetTBM
options = {'pageset_repeat': 5}
def SetExtraBrowserOptions(self, options):
options.clear_sytem_cache_for_browser_and_profile_on_start = True
super(StartWithUrlColdTBM, self).SetExtraBrowserOptions(options)
@classmethod
def Name(cls):
return 'start_with_url2.cold.startup_pages'
@benchmark.Enabled('has tabs')
@benchmark.Enabled('android')
@benchmark.Disabled('chromeos', 'linux', 'mac', 'win')
class StartWithUrlWarmTBM(startup2._StartupPerfBenchmark):
"""Measures stimetime to start Chrome warm with startup URLs."""
page_set = page_sets.StartupPagesPageSetTBM
options = {'pageset_repeat': 11}
@classmethod
def Name(cls):
return 'start_with_url2.warm.startup_pages'
@classmethod
def ValueCanBeAddedPredicate(cls, value, is_first_result):
del value # unused
# Ignores first results because the first invocation is actualy cold since
# we are loading the profile for the first time.
return not is_first_result
|
bsd-3-clause
| 5,598,138,598,909,910,000
| 33.160714
| 80
| 0.755358
| false
| 3.700193
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.