code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from collections import Counter
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from imblearn.ensemble import EasyEnsemble
from sklearn.externals import joblib
import os
class naive:
def __init__(self):
pass
def fit(self, X, Y):
pass
def predict(self, X):
xSize = X.size / 6
Y = np.zeros((xSize, 1))
for i in range(xSize):
entry = X[i]
countObservedBiasCorrected = entry[4]
countExpectedPusm = entry[5]
if (countObservedBiasCorrected < 0.5 * countExpectedPusm):
Y[i][0] = 2 #UNTRUSTED
elif (countObservedBiasCorrected > 1.5 * countExpectedPusm):
Y[i][0] = 0 #REPEAT
else:
Y[i][0] = 1 #TRUSTED
return Y
class statistical:
def __init__(self):
pass
def fit(self, X, Y):
pass
def predict(self, X):
xSize = X.size / 6
Y = np.zeros((xSize, 1))
for i in range(xSize):
entry = X[i]
zscore = entry[0]
if (zscore < -2):
Y[i][0] = 2 #UNTRUSTED
elif (zscore > 2):
Y[i][0] = 0 #REPEAT
else:
Y[i][0] = 1 #TRUSTED
pass
return Y
class classifier:
def __init__(self):
self.features = []
self.classes = []
#self.models = [GaussianNB(), DecisionTreeClassifier(), DecisionTreeClassifier(class_weight = 'balanced'), RandomForestClassifier(), RandomForestClassifier(class_weight = 'balanced'), LogisticRegression(), LogisticRegression(class_weight = 'balanced')]#, AdaBoostClassifier(), AdaBoostClassifier(DecisionTreeClassifier(class_weight = 'balanced'))]
#self.modelnames = ['GaussianNB', 'DecisionTreeClassifier', 'DecisionTreeClassifier(balanced)', 'RandomForestClassifier', 'RandomForestClassifier(balanced)', 'LogisticRegression', 'LogisticRegression(balanced)']#, 'AdaBoostClassifier', 'AdaBoostClassifier(balanced)']
self.models = [naive(), statistical(), GaussianNB(), DecisionTreeClassifier(class_weight = 'balanced'), RandomForestClassifier(class_weight = 'balanced'), LogisticRegression(class_weight = 'balanced')]#, AdaBoostClassifier(), AdaBoostClassifier(DecisionTreeClassifier(class_weight = 'balanced'))]
self.modelnames = ['naive', 'statistical', 'GaussianNB', 'DecisionTreeClassifier', 'RandomForestClassifier', 'LogisticRegression(balanced)']#, 'AdaBoostClassifier', 'AdaBoostClassifier(balanced)']
self.best_model = naive()
def add(self, num1, num2):
print "miau"
return num1 + num2
def read_data(self, input_file):
print("Reading data...")
df = pd.read_csv(input_file, header = 0, delimiter= ';')
X = df[self.features].values
Y = df['type'].values
return (X, Y)
#takes std::vector<std::string>
def set_features(self, features):
self.features = features
#takes std::vector<int>
def set_classes(self, classes):
self.classes = classes
def balance_dataset(self, X, Y):
X_new = X
Y_new = Y
overSampler = RandomOverSampler()
underSampler = RandomUnderSampler()
#sm = EasyEnsemble()
#X_refit, Y_refit = sm.fit_sample(X, Y)
#print('Resampled dataset shape {}'.format(Counter(Y_refit[0])))
#X, Y = X_refit[0], Y_refit[0]
classCounts = Counter(Y)
print('Original training dataset shape {}'.format(classCounts))
avg = 0
minCount = classCounts[self.classes[0]]
maxCount = classCounts[self.classes[0]]
for i in self.classes:
avg = avg + classCounts[i]
if classCounts[i] < minCount:
minCount = classCounts[i]
if classCounts[i] > maxCount:
maxCount = classCounts[i]
avg = avg // len(classCounts)
print("Rounded-down average class count in training dataset: " + str(avg))
print("minCount: " + str(minCount))
print("maxCount: " + str(maxCount))
rate = avg / float(maxCount)
print("rate: " + str(rate))
underSampler = RandomUnderSampler(ratio = rate)
X_new, Y_new = underSampler.fit_sample(X_new, Y_new)
classCounts = Counter(Y_new)
print('Class counts after undersampling {}'.format(classCounts))
#avg = 0
#minCount = classCounts[0]
#maxCount = classCounts[0]
#for i in range(len(classCounts)):
# avg = avg + classCounts[i]
# if classCounts[i] < minCount:
# minCount = classCounts[i]
# if classCounts[i] > maxCount:
# maxCount = classCounts[i]
#avg = avg // len(classCounts)
#rate = minCount / float(avg)
#print("rate: " + str(rate))
#overSampler = RandomOverSampler(ratio = rate)
#print("I am here1")
X_new, Y_new = overSampler.fit_sample(X_new, Y_new)
#print("I am here2")
return X_new, Y_new
def choose_best_classifier(self, X_train, X_test, y_train, y_test, bestF, useSampling):
if useSampling == True:
print("Trying to choose the best classifier with presampling the training dataset...")
X_train, y_train = self.balance_dataset(X_train, y_train)
else:
print("Trying to choose the best classifier without presampling the training dataset...")
expected = y_test
best_fscore = bestF
best_num = 0
for i in range(len(self.models)):
# Fit the model
print("Fitting model: " + self.modelnames[i] + "...")
self.models[i].fit(X_train, y_train)
# Evaluate the model
print("Predicting class labels with model: " + self.modelnames[i] + "...")
predicted = self.models[i].predict(X_test)
# print some metrics
print(metrics.confusion_matrix(expected, predicted))
print(metrics.classification_report(expected, predicted))
fscore = metrics.f1_score(expected, predicted, average = 'macro')
print("The average F-Score for model " + self.modelnames[i] + " with equal class weights is: " + str(fscore))
if fscore > best_fscore:
self.best_model = self.models[i]
best_fscore = fscore
best_num = i
print("And the winner is: " + self.modelnames[best_num])
return best_fscore
#takes std::string
# TODO: Mix oversampling and undersampling in order to get (original average count) many entries per class
def set_csv_file(self, csv_path):
print(csv_path)
(X, Y) = self.read_data(csv_path)
print('Original dataset shape {}'.format(Counter(Y)))
# Split into training and testing data
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.34)
bestF = 0
# Do not prebalance the dataset before training, use sample weights
bestF = self.choose_best_classifier(X_train, X_test, y_train, y_test, bestF, False)
#bestFOld = bestF
# Try to prebalance the dataset before training
#bestF = self.choose_best_classifier(X_train, X_test, y_train, y_test, bestF, True)
#if bestF > bestFOld:
# print("Resampling was good.")
#else:
# print("Resampling was useless or even harmful.")
#takes std::string
def store_classifier(self, filename):
joblib.dump(self.best_model, filename, compress=9)
print("Stored classifier.")
#takes std::string
def load_classifier(self, filename):
self.best_model = joblib.load(filename)
print("Loaded classifier.")
#takes std::vector<double> of size len(_features), returns std::vector<double> of size len(_classes)
def proba(self, feature_vector):
matrix = []
matrix.append(feature_vector)
probs = self.best_model.predict_proba(matrix)[0]
res = [float(i) for i in probs]
return res
#takes std::vector<double>, returns a single int
def classify(self, feature_vector):
matrix = []
matrix.append(feature_vector)
return self.best_model.predict(matrix)[0]
|
algomaus/PAEC
|
src/python/blackbox_kmer.py
|
Python
|
gpl-3.0
| 8,184
|
"""
Provides a base class for pipe-based jobs.
"""
# Copyright 2013 Paul Griffiths
# Email: mail@paulgriffiths.net
#
# All rights reserved.
from math import pi
from jobcalc.helper import ptoc, draw_dim_line
from jobcalc.flange import Flange
from jobcalc.component import DrawnComponent
class Pipe(DrawnComponent):
"""
Pipe class to be used to automatically create a drawing.
Public methods:
__init__()
"""
def __init__(self, casingod, casingid, liningod, liningid, flange):
"""
Initializes a Pipe instance.
Subclasses should call this at the beginning of their own
constructor function.
Arguments:
casingod -- outside diameter of casing, in mm
casingid -- inside diameter of casing, in mm
liningod -- outside diameter of lining, in mm
liningid -- inside diameter of lining, in mm
flange -- name of flange
"""
# Call superclass constructor
DrawnComponent.__init__(self)
# Properties common to all pipe components
self.flange = Flange(flange)
# Bend diameters and radii common to all pipe components
self.diameters = {"co": casingod,
"ci": casingid,
"lo": liningod,
"li": liningid,
"fi": self.flange.hole_diameter,
"fo": self.flange.flange_diameter}
self.p_rad = {"co": casingod / 2.0,
"ci": casingid / 2.0,
"lo": liningod / 2.0,
"li": liningid / 2.0,
"fi": self.flange.hole_diameter / 2.0,
"fo": self.flange.flange_diameter / 2.0}
# Colors common to all pipe components
comp_col = {"co": (0.8, 0.8, 0.8), "ci": (0.9, 0.9, 0.9),
"lo": (0.6, 0.6, 0.6), "li": (1.0, 1.0, 1.0)}
self.colors = {"comp": comp_col}
def set_scale(self, ctx, page_w, page_h):
"""
Automatically sets a scale factor for the drawing.
This will normally be called after the set_scale()
function of a subclass has set the scale factor.
'page_h' and 'page_w' should already have been up-scaled
by the time this happens.
Arguments:
ctx -- a Pycairo context
page_w, page_h -- height and width of the drawing area
"""
# Upscale lines
# Note that self.dim_line_length is used to calculate
# the lengths of radius dimension lines. These lengths
# can differ for individual subclasses, and so
# self.dim_line_length should be set in the subclass's
# own set_scale() function, but the dimension is common
# to all Pipe subclasses, and so we upscale it here.
self.dim_line_length /= self.scale # pylint: disable=E1101
# Call parent function
DrawnComponent.set_scale(self, ctx, page_w, page_h)
def draw_component(self, ctx, page_w, page_h):
"""
Draws a core pipe component. Mainly calls supporting functions.
This function is called by the 'component' base class. Subclasses
should override this and call it at the beginning of their own
draw_component() function, before providing further specific
drawing functionality.
Arguments:
ctx -- a Pycairo context
page_w, page_h -- width and height of drawing area
"""
# Draw the filled outer casing without an outline. Leaving
# the overall outline until last, and drawing the other
# segments with just side outlines, avoids drawing overlapping
# lines across the bend face.
self.draw_pipe_comp(ctx, "co", fill=True)
self.draw_pipe_comp(ctx, "ci", fill=True, edges=True)
self.draw_pipe_comp(ctx, "lo", fill=True, edges=True)
self.draw_pipe_comp(ctx, "li", fill=True, edges=True)
self.draw_pipe_comp(ctx, "co", outline=True)
# Draw the other components
self.draw_bend_profile(ctx)
self.draw_rad_dims(ctx)
self.draw_flanges(ctx)
def draw_pipe_comp(self, ctx, comp, fill=False,
outline=False, edges=False):
"""
Draws a segmented component of a bend.
Arguments:
ctx -- a Pycairo context
comp -- type of component, "co", "ci", "lo" or "li"
fill -- fills the component with color if True
outline -- draws an outline around the entire component if True
edges -- draws lines only along the segment edges if True
"""
ctx.save()
pts_out = self.pc_pts["out"][comp] # pylint: disable=E1101
pts_in = self.pc_pts["in"][comp] # pylint: disable=E1101
if fill or outline:
pts = pts_out + pts_in
for point in pts:
if point is pts[0]:
ctx.move_to(*point.t())
else:
ctx.line_to(*point.t())
ctx.close_path()
if fill:
if comp == "co" and self.hatching:
ctx.set_source(self.chatch)
elif comp == "lo" and self.hatching:
ctx.set_source(self.lhatch)
else:
ctx.set_source_rgb(*self.colors["comp"][comp])
if outline:
ctx.fill_preserve()
else:
ctx.fill()
if outline:
ctx.set_source_rgb(*self.drawing_line_color)
ctx.stroke()
if edges:
ctx.set_source_rgb(*self.drawing_line_color)
for point in pts_out:
if point is pts_out[0]:
ctx.move_to(*point.t())
else:
ctx.line_to(*point.t())
ctx.stroke()
for point in pts_in:
if point is pts_in[0]:
ctx.move_to(*point.t())
else:
ctx.line_to(*point.t())
ctx.stroke()
ctx.restore()
def draw_center_line(self, ctx):
"""
Draws a dashed line along the center of the pipe.
Note that this line will be segmented, not curved, for pipe bends.
Arguments:
ctx -- a Pycairo context
"""
ctx.save()
ctx.set_dash(self.dash_style)
for point in self.pc_pts["ctr"]: # pylint: disable=E1101
if point == self.pc_pts["ctr"][0]: # pylint: disable=E1101
ctx.move_to(*point.t())
else:
ctx.line_to(*point.t())
ctx.stroke()
ctx.restore()
def draw_rad_dims(self, ctx):
"""
Draw the radius dimensions of the bend.
Arguments:
ctx -- a Pycairo context
"""
# The angle at which the radius dimension lines are drawn
# depends on the bend angle for pipe bends, but is always
# vertical for pipe straights. Since this will be called
# from a subclass, check whether we have a pipe bend by
# verifying whether the "bend_arc" instance attribute is set.
if hasattr(self, "bend_arc"):
b_arc = self.bend_arc # pylint: disable=E1101
else:
b_arc = 0
pts = {}
ctx.save()
for scale, comp in zip(range(4, 0, -1), ["co", "ci", "lo", "li"]):
# pylint: disable=E1101
dll = self.dim_line_length * scale
for i in ["out", "in"]:
point = self.pc_pts[i][comp][-1 if i == "out" else 0]
pts[i] = ptoc(b_arc + pi / 2, dll, point)
ctx.move_to(*point.t())
ctx.line_to(*pts[i].t())
# pylint: enable=E1101
ctx.stroke()
draw_dim_line(ctx, pts["out"], pts["in"],
self.diameters[comp], self.scale, 0)
ctx.restore()
def draw_bend_profile(self, ctx):
"""
Draws a half profile view of the pipe at the bottom.
Arguments:
ctx -- a Pycairo context
"""
ctx.save()
ctx.translate(self.pc_pts["ctr"][0].x, # pylint: disable=E1101
self.pc_pts["ctr"][0].y) # pylint: disable=E1101
for rad in [self.p_rad[k] for k in ["li", "lo", "ci", "co"]]:
ctx.arc(0, 0, rad, 0, pi)
ctx.stroke()
ctx.restore()
def draw_flanges(self, ctx):
"""
Draws the flanges.
Arguments:
ctx -- a Pycairo context
"""
# The angle at which the upper flange is drawn
# depends on the bend angle for pipe bends, but is always
# horizontal for pipe straights. Since this will be called
# from a subclass, check whether we have a pipe bend by
# verifying whether the "bend_arc" instance attribute is set.
# pylint: disable=E1101
if hasattr(self, "bend_arc"):
b_arc = self.bend_arc
else:
b_arc = 0
self.flange.draw(ctx, cfp=self.pc_pts["ctr"][0], angle=0,
profile=True, dash_style=self.dash_style)
self.flange.draw(ctx, cfp=self.pc_pts["ctr"][-1], angle=b_arc + pi,
profile=False)
# pylint: enable=E1101
|
paulgriffiths/jobcalc
|
jclib/pipe.py
|
Python
|
gpl-3.0
| 9,416
|
# flake8: noqa
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sim', '0003_section_turn'),
]
operations = [
migrations.RemoveField(
model_name='sectionturndates',
name='section',
),
migrations.DeleteModel(
name='SectionTurnDates',
),
]
|
thraxil/countryx
|
countryx/sim/migrations/0004_auto_20150428_0639.py
|
Python
|
gpl-3.0
| 448
|
from django.db import models
import hashlib
from django.contrib.auth.models import AbstractUser
from notification.utils import logout_notify
class CustomUser(AbstractUser):
token = models.CharField(
max_length=512,
null=True
)
def get_monitor_tokens(self):
return [monitor.android_token for monitor in self.monitors.all()]
class PatientUser(CustomUser):
pass
class Monitor(CustomUser):
android_token = models.CharField(
max_length=512,
null=True,
blank=True
)
monitor_user = models.ForeignKey(
'umiss_auth.CustomUser',
related_name='monitors',
on_delete=models.SET_NULL,
null=True,
)
is_logged = models.CharField(max_length=20, default='false')
__original_token = None
def __init__(self, *args, **kwargs):
# patient = PatientUser.objects.filter(token=kwargs['token'])
# print(args, kwargs, patient)
super(CustomUser, self).__init__(*args, **kwargs)
self.__original_token = self.android_token
def save(self, force_insert=False, force_update=False, *args, **kwargs):
"""Adding a monitor to a patient if the token can be same"""
def logout_user():
print('log')
print(self.__original_token, self.android_token)
if self.android_token != self.__original_token:
logout_notify(self.__original_token)
logout_user()
super(
CustomUser,
self).save(
force_insert,
force_update,
*
args,
**kwargs)
qs_patient = PatientUser.objects.filter(token=self.token)
if len(qs_patient):
patient = qs_patient[0]
patient.monitors.add(self)
patient.save()
else:
try:
self.monitor_user.monitors.remove(self)
except AttributeError:
pass
self.__original_token = self.token
def get_patients_tokens(self):
return [patient.token for patient in PatientUser.objects.all()]
|
CadeiraCuidadora/UMISS-backend
|
umiss_project/umiss_auth/models.py
|
Python
|
gpl-3.0
| 2,130
|
#!/home/srbrown/anaconda3/bin/python3
import requests
import json
import textwrap
import os
import sys
import logging
from PIL import Image, ImageDraw, ImageFont
from pathlib import Path
from configparser import ConfigParser
# set the home path
home = str(Path.home())
# setup logging
logging.basicConfig(filename=home + '/.apod/apod.log',
format='%(asctime)s %(message)s',
level=logging.INFO)
# Read from config file
parser = ConfigParser()
configFilePath = home + '/.apod/apod.settings'
parser.read(configFilePath)
scrn_width = int(parser.get('apod_config', 'Screen_Size').split(',')[0])
scrn_length = int(parser.get('apod_config', 'Screen_Size').split(',')[1])
num_chars = int(parser.get('apod_config', 'Num_Characters'))
api_key = parser.get('apod_config', 'API_Key')
title_font_size = int(parser.get('apod_config', 'T_Font_Size'))
body_font_size = int(parser.get('apod_config', 'B_Font_Size'))
url = 'https://api.nasa.gov/planetary/apod?api_key=' + api_key
if len(sys.argv) > 1:
date = str(sys.argv[1])
data = json.loads(requests.get(url + '&date=' + date).text)
logging.info('Command: ' + url + '&date=' + date)
if len(data) > 2:
logging.info('Command: ' + url + '&date=' + date)
else:
logging.warning('Command failure: ' + url + '&date=' + date)
sys.exit()
else:
data = json.loads(requests.get(url).text)
if len(data) > 2:
logging.info('Command: ' + url)
else:
logging.warning('Command failure: %s', url)
sys.exit()
if data['media_type'] == 'image':
image_url = requests.get(data['hdurl'], stream=True)
if image_url.status_code == 404:
logging.warning('404 Image not found: %s', data['hdurl'])
sys.exit()
elif image_url.status_code != 200:
logging.warning(str(image_url.status_code) +
' Image retrieval error: %s', data['hdurl'])
sys.exit()
else:
logging.info(str(image_url.status_code) +
' Image retrieval successful: %s', data['hdurl'])
image = Image.open(image_url.raw).resize((scrn_width, scrn_length),
resample=0)
draw = ImageDraw.Draw(image)
exp_font = ImageFont.truetype("arial.ttf", body_font_size, encoding="unic")
title_font = ImageFont.truetype("arial.ttf",
title_font_size, encoding="unic")
lines = textwrap.wrap(data['explanation'], width=num_chars)
y_text = (scrn_length / 6) * 5
draw.text((20, y_text - 30), data['title'], fill='white', font=title_font)
for line in lines:
width, height = exp_font.getsize(line)
draw.text((20, y_text), line, font=exp_font, fill='white')
y_text += height
image.save(home + "/.apod/apod.png", 'PNG')
os.system("/usr/bin/gsettings set org.gnome.desktop.background picture-uri file://" +
home + "/.apod/apod.png")
logging.info('Desktop Background set to %s', data['title'])
else:
logging.warning('Image not available')
|
bennsarrow/apod
|
apod.py
|
Python
|
gpl-3.0
| 3,078
|
#!/usr/bin/env python
"""
In this question your task is again to run the clustering algorithm from lecture, but on a MUCH bigger graph. So big,
in fact, that the distances (i.e., edge costs) are only defined implicitly, rather than being provided as an explicit
list.
The data set is below.
clustering_big.txt
The format is:
[# of nodes] [# of bits for each node's label]
[first bit of node 1] ... [last bit of node 1]
[first bit of node 2] ... [last bit of node 2]
...
For example, the third line of the file "0 1 1 0 0 1 1 0 0 1 0 1 1 1 1 1 1 0 1 0 1 1 0 1" denotes the 24 bits
associated with node #2.
The distance between two nodes u and v in this problem is defined as the Hamming distance--- the number of differing
bits --- between the two nodes' labels. For example, the Hamming distance between the 24-bit label of node #2 above
and the label "0 1 0 0 0 1 0 0 0 1 0 1 1 1 1 1 1 0 1 0 0 1 0 1" is 3 (since they differ in the 3rd, 7th, and 21st bits).
The question is: what is the largest value of k such that there is a k-clustering with spacing at least 3? That is,
how many clusters are needed to ensure that no pair of nodes with all but 2 bits in common get split into different
clusters?
NOTE: The graph implicitly defined by the data file is so big that you probably can't write it out explicitly, let
alone sort the edges by cost. So you will have to be a little creative to complete this part of the question. For
example, is there some way you can identify the smallest distances without explicitly looking at every pair of nodes?
"""
__author__ = 'Vinayak'
from fileIO import readAsList,writeSingleToFile
from itertools import combinations
import re
class UnionFind(object):
"""Implementation of Union Find Data Structure"""
def __init__(self, nodeList):
self._nodeLeaderList=dict()
self._leaderSets = dict()
self._nodeList=nodeList
self._nodeCount=len(nodeList)
for node in nodeList:
self._leaderSets.update({node:{node}})
self._nodeLeaderList.update({node:node})
def find(self, node):
"""Return leader of given node"""
return self._nodeLeaderList[node]
def union(self, node1, node2):
"""Union of tree containing node1 and tree containing node2"""
leader1 = self.find(node1)
leader2 = self.find(node2)
if leader1==leader2:
return
tree1 = self._leaderSets[leader1]
tree2 = self._leaderSets[leader2]
if len(tree1)<len(tree2):
for node in tree1:
self._nodeLeaderList[node]=leader2
self._leaderSets[leader2]=self._leaderSets[leader2].union(self._leaderSets[leader1])
del self._leaderSets[leader1]
else:
for node in tree2:
self._nodeLeaderList[node]=leader1
self._leaderSets[leader1]=self._leaderSets[leader1].union(self._leaderSets[leader2])
del self._leaderSets[leader2]
def uniqueLeaders(self):
return len(self._leaderSets)
def calculateMasks(bitCount, maxCombinations):
"""Create swap masks in the form tuples with numbers indicating which bits to swap."""
masks = []
for i in range(1,maxCombinations):
masks=masks+[ c for c in combinations(range(bitCount), i) ]
return masks
def swapByMask(nodeLabel, mask):
"""Swap bits in label according to a mask"""
modNodeLabel = [ c for c in nodeLabel ]
for bitNum in mask:
if modNodeLabel[bitNum] == '0':
modNodeLabel[bitNum] = '1'
elif modNodeLabel[bitNum] == '1':
modNodeLabel[bitNum] = '0'
return "".join(modNodeLabel)
def generateIntegerLabels(nodeList):
"""Returns Dictionary of labels for nodes"""
d = dict()
for node in nodeList:
d.update({node:int(node,2)})
return d
def maskClustering(nodeList, maxSpacing, bitCount):
"""Return the number of clusters formed when maxSpacing is achieved"""
swapMasks = calculateMasks(bitCount,maxSpacing)
labels = generateIntegerLabels(nodeList)
UFObj = UnionFind([value for key, value in labels.items() ])
for node in nodeList:
currentLabel=labels[node]
for mask in swapMasks:
modNodeLabel = swapByMask(node,mask)
if modNodeLabel in labels:
UFObj.union(labels[modNodeLabel],currentLabel)
return UFObj.uniqueLeaders()
if __name__=='__main__':
inputList = readAsList("_fe8d0202cd20a808db6a4d5d06be62f4_clustering_big.txt")
nodeCount, bitCount = [int(value) for value in inputList.pop(0).split(' ')]
inputList = [re.sub('\s+','',string) for string in inputList]
writeSingleToFile("Problem2b.txt",maskClustering(inputList,3,bitCount))
|
vinjai/Algorithms_Coursera_Stanford2
|
scripts/Problem2b.py
|
Python
|
gpl-3.0
| 4,760
|
__author__ = 'Scott Ficarro'
__version__ = '1.0'
import wx
import wx.grid as grid
try:
from agw import pybusyinfo as PBI
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.pybusyinfo as PBI
#import BlaisPepCalcSlim_aui as BlaisPepCalc
import DatabaseOperations as db
#import QueryBuilder
import os
import mzStudio as BlaisBrowser
import re
import mz_workbench.mz_core as mz_core
from collections import defaultdict
import wx.lib.agw.aui as aui
import copy
from multiplierz.mgf import standard_title_parse
from autocomplete import AutocompleteTextCtrl, list_completer
class dbGrid(wx.grid.Grid):
def __init__(self, parent, rows):
self.parent = parent
wx.grid.Grid.__init__(self, parent, -1, pos=(0,40), size =(1200, 550))#
self.CreateGrid(rows,len(self.parent.cols))
for i, col in enumerate(self.parent.cols):
self.SetColLabelValue(i, col)
self.SetColSize(i, len(col)*10)
for i, member in enumerate(self.parent.rows):
if i % 1000 == 0:
print i
for k, column in enumerate(self.parent.cols):
self.SetCellValue(i, k, str(member[column]))
#self.AutoSize()
#size = self.GetClientSize()
#self.SetSize(size)
#self.ForceRefresh()
#self.Refresh()
#a = 1
class dbFrame(wx.Panel):
def __init__(self, parent, id, bpc):
#PARENT = AUI FRAME
#busy = PBI.PyBusyInfo("Building grid...", parent=None, title="Processing...")
wx.Yield()
#self.parent = parent
wx.Panel.__init__(self,parent,id=id, name='mzResult', pos = (50,50)) #, size =(1250,400)
self.ordering = "desc"
self.reverse = False
self.currentPage = parent.ctrl.GetPage(parent.ctrl.GetSelection())
self.currentFile = self.currentPage.msdb.files[self.currentPage.msdb.Display_ID[self.currentPage.msdb.active_file]]
self.ActiveFileNumber = self.currentPage.msdb.active_file
self.fileName = self.currentPage.msdb.Display_ID[self.ActiveFileNumber]
self.parent = parent #THIS IS AUI FRAME OBJECT
self.parentFileName = self.currentFile['FileAbs']
self.bpc = bpc
#------------------------GET DATA FOR GRID
self.rows, self.cols = db.pull_data_dict(self.currentFile["database"], 'select * from "peptides"')
#-----------------------BUTTONS AND TEXT CONTROLS
#self.query = wx.TextCtrl(self, -1, "select * from peptides;", pos=(60,20)) #, size=(1120,20)
autoTerms = self.currentFile["mzSheetcols"] + ['SELECT', 'FROM', 'peptides', 'WHERE', 'DISTINCT']
self.query = AutocompleteTextCtrl(self, completer = list_completer(autoTerms))
self.query.SetValue('select * from "peptides"')
self.btn = wx.Button(self, -1, "Submit", pos = (40, 20), size= (60,23))
#self.builder = wx.Button(self, -1, "B", pos = (20, 20), size= (20,20))
#-----------------------CREATE GRID
self.grid = dbGrid(self, len(self.rows))
self.current_cell = wx.TextCtrl(self, -1, self.grid.GetCellValue(0,0), pos=(0,0))#, size=(1120,20)
#----------------------EVENTS IN DB FRAME
self.grid.Bind(wx.grid.EVT_GRID_SELECT_CELL, self.OnSelect)
self.grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.OnLabelClick)
self.Bind(wx.EVT_BUTTON, self.OnClick, self.btn)
self.grid.EnableEditing(False) #Turn off cell editing so cells cannot be overwritten.
#self.Bind(wx.EVT_BUTTON, self.OnBuilder, self.builder)
#------------------------------------------------------------Peptigram not yet ready for release.
#self.peptigram = wx.Button(self, -1, "Peptogram", pos = (0, 20), size= (75,23))
#self.Bind(wx.EVT_BUTTON, self.OnPeptigram, self.peptigram)
#------------------------------------------------------------Hide functionality for now.
self.query.Bind(wx.EVT_KEY_UP, self.OnQKeyUp)
#------------------------FRAME SIZERS
self.SizeFrame()
self.currentFileNamesSet = set()
self.curdir = os.path.dirname(self.currentFile["FileAbs"]).lower()
self.currentFileNamesSet.add(self.currentFile["Filename"].lower())
#self.topSizer = topSizer
#self.gridSizer = gridSizer
if "File" in self.currentFile["mzSheetcols"]:
if self.currentFile["SearchType"]=="Mascot":
for row in self.currentFile["rows"]:
if self.currentFile["vendor"]=='Thermo':
currentFileName = self.curdir + '\\' + row["Spectrum Description"].split(".")[0] + '.raw'
elif self.currentFile["vendor"]=='ABI':
currentFileName = self.curdir + '\\' + os.path.basename(row["File"])[:-8]
self.currentFileNamesSet.add(currentFileName)
self.Check_diff = False
if len(self.currentFileNamesSet) > 1:
self.Check_diff = True
self.sub_bank = BlaisBrowser.MS_Data_Manager(self.parent.ctrl.GetPage(self.parent.ctrl.GetSelection()))
#self.sub_bank.addFile(self.currentFile["FileAbs"])
#-----------------------------Manually add the existing file to the "sub-bank"
display_key = self.sub_bank.getFileNum()
self.currentFile['FileAbs']= self.currentFile['FileAbs'].lower()
self.sub_bank.Display_ID[display_key]=self.currentFile["FileAbs"].lower()
self.sub_bank.files[self.currentFile["FileAbs"].lower()]=self.currentFile
self.sub_bank.files[self.currentFile["FileAbs"].lower()]["mzSheetcols"] = self.currentFile["mzSheetcols"]
self.sub_bank.files[self.currentFile["FileAbs"].lower()]["rows"] = self.currentFile["rows"]
self.sub_bank.files[self.currentFile["FileAbs"].lower()]["ID_Dict"] = self.currentFile["ID_Dict"]
self.sub_bank.files
#------------------------------Are there additional files to load?
if "File" in self.currentFile["mzSheetcols"]:
file_set = set()
if self.currentFile['settings']['multiFileOption'] == 'LOAD ALL':
for row in self.currentFile["rows"]:
file_set.add((self.curdir + '\\' + re.compile('(\S+?.raw)').match(os.path.basename(row['File'])).groups()[0]).lower())
for name in list(file_set):
if name not in [x.lower() for x in self.sub_bank.files.keys()]:
print "Loading additional file!"
#Current file is not loaded, need to load
#currentName=self.curdir + '\\' + re.compile('(\S+?.raw)').match(name).groups()[0] #Gets the rawfilename from the file column
print name
self.sub_bank.addFile(name)
#Need to update sheet information; copy direct from currentObject
self.sub_bank.files[name]["mzSheetcols"] = self.currentFile["mzSheetcols"]
self.sub_bank.files[name]["rows"] = self.currentFile["rows"]
self.sub_bank.files[name]["fixedmod"] = self.currentFile["fixedmod"]
self.sub_bank.files[name]["database"] = self.currentFile["database"]
self.sub_bank.files[name]["SearchType"] = self.currentFile["SearchType"]
#print currentGridFilename
self.sub_bank.files[name]["ID_Dict"] = self.currentPage.build_ID_dict(self.currentFile["rows"], self.currentFile["mzSheetcols"], os.path.basename(name))
#print "DUMPING"
#self.currentPage.dump_ID_Dict(self.sub_bank.files[name]["ID_Dict"])
#self.sub_bank.files[currentGridFileName]["ID_Dict"] = self.currentFileObject["ID_Dict"]
self.dump_bank()
self.current_col = 0
self.current_row = 0
self.grid.SetRowAttr(0, self.get_active())
#self.grid.SetColAttr(0, self.get_active())
self.grid.Refresh()
#del busy
self.parent._mgr.Update()
#----------------TRANSLATIONS TO ALLOW SHORTCUTS IN QUERY BOX
self.qtrans = {'[var]':'"Variable Modifications"', '[scr]':'"Peptide Score"',
'[lk]':'like "%%"', '[pdesc]':'"Protein Description"',
'[set1]':'"Accession Number", "Protein Description", "Peptide Sequence", "Variable Modifications", "Experimental mz", "Charge", "Predicted mr", "Delta", "Peptide Score", "Spectrum Description", "Scan", "GeneName"',
'~vm':'"Variable Modifications"', '~lk':'like "%%"', '~pepd':'order by "Peptide Score" desc', '~gn':'"GeneName"', '~var':'"Variable Modifications"',
'~xc':'"Cross-Correlation"', '~sc':'"Peptide Score"', '~ex':'"Expect"', '~ac':'"Accession"', '~de':'"Protein Description"', '~seq':'"Peptide Sequence"'}
def OnClose(self, event):
#if self.aui_pane.name != event.pane.name:
# print "%s got event but isn't being closed." % self.aui_pane.name
# event.Skip()
# return
self.currentFile["xlsSource"]=''
self.currentFile['SearchType'] = None
self.currentFile["database"] = None
self.currentFile["rows"], self.currentFile["mzSheetcols"] = [], []
self.currentFile['header']={}
self.currentFile['fixedmod']=""
self.currentFile['varmod']=""
self.currentFile['ID_Dict']={}
self.currentFile["mascot_ID"] = {}
self.currentFile["SILAC"]={"mode":False, "peaks":(), "method":None}
self.currentFile["datLink"] = False
self.currentFile["viewMascot"] = False
self.currentFile['ID']=False
self.currentFile['label_dict']={}
currentPage = self.parent.ctrl.GetPage(self.parent.ctrl.GetSelection())
currentPage.Window.UpdateDrawing()
self.parent.parentFrame.ObjectOrganizer.removeObject(self)
print "Db frame close!"
def SizeFrame(self):
topSizer = wx.BoxSizer(wx.VERTICAL)
gridSizer = wx.BoxSizer(wx.HORIZONTAL)
querySizer = wx.BoxSizer(wx.HORIZONTAL)
valueSizer = wx.BoxSizer(wx.HORIZONTAL)
valueSizer.Add(self.current_cell, 1, wx.ALL|wx.EXPAND, 5)
querySizer.Add(self.btn, 0, wx.ALL, 5)
#querySizer.Add(self.builder, 0, wx.ALL, 5)
#-----------------------------------------------------Hide peptigram functionality
#querySizer.Add(self.peptigram, 0, wx.ALL, 5)
querySizer.Add(self.query, 1, wx.ALL|wx.EXPAND, 5)
gridSizer.Add(self.grid, 1, wx.ALL|wx.EXPAND, 5)
topSizer.Add(valueSizer, 0, wx.ALL|wx.EXPAND, 5)
topSizer.Add(querySizer, 0, wx.ALL|wx.EXPAND, 5)
topSizer.Add(gridSizer, 0, wx.ALL|wx.EXPAND, 5)
self.SetSizer(topSizer)
topSizer.Fit(self)
#-------------------THIS EVENT CHECKS QUERY BOX WITH EACH KEY PRESS FOR SHORTCUT
def OnQKeyUp(self, evt):
found = False
current = self.query.GetValue()
for key in self.qtrans.keys():
if current.find(key) > -1:
found = True
orig_pos = self.query.GetInsertionPoint()
current = current.replace(key, self.qtrans[key])
typed = len(key)
replaced = len(self.qtrans[key])
newpoint = orig_pos - typed + replaced
if found:
self.query.SetValue(current)
self.query.SetInsertionPoint(newpoint)
evt.Skip()
def OnPeptigram(self, evt):
#Go through self.rows
#Make peptigram for each unique seq/mod/cg; if more than one seq/mod/cg, make peptigram from most intense
#Run analysis first?
pep_dict = {} #Key to max intensity
max_dict = {} #Key to scan of max intensity
all_scans = defaultdict(list) #All scans for key
mz_dict = {} #Key to decal mass
for row in self.rows:
seq = row["Peptide Sequence"]
cg = int(row["Charge"])
varmod = row["Variable Modifications"]
spec = row['Spectrum Description']
if 'MultiplierzMGF' in spec:
ms1 = standard_title_parse(spec)['scan']
else:
ms1 = int(spec.split('.')[1])
try:
decal = float(spec.split('|')[1])
except:
decal = float(row['Experimental mz'])
if not varmod:
varmod = "None"
key = seq + "|" + str(cg) + '|' + varmod
start, stop, scan_array = mz_core.derive_elution_range_by_C12_C13(self.currentFile['m'], self.currentFile['scan_dict'], int(ms1), decal, int(cg), 0.02, 200)
scan_array.sort(key=lambda t:t[1], reverse = True)
intensity = scan_array[0][1]
if key in pep_dict.keys(): #KEY ALREADY FOUND
current_inten = pep_dict[key]
if intensity > current_inten:
pep_dict[key] = intensity
max_dict[key] = ms1
mz_dict[key] = decal
all_scans[key].append(ms1)
else: #NOT MORE INTENSE; just add to list of all MS2
all_scans[key].append(ms1)
else: #NEW KEY
pep_dict[key] = intensity
max_dict[key] = ms1
mz_dict[key] = decal
all_scans[key].append(ms1)
#Open XIC frame
self.currentPage = self.parent.ctrl.GetPage(self.parent.ctrl.GetSelection())
#self.currentFile = currentPage.msdb.files[currentPage.msdb.Display_ID[currentPage.msdb.active_file]]
self.frm = BlaisBrowser.xicFrame(self.currentPage, self.currentFile, self.currentPage.msdb.active_file)
self.frm.Show()
winMax = self.frm.get_next_available_window()
currentRow = self.frm.GetXICEntries()
trace = 0
print key
for key in pep_dict.keys():
self.frm.grid.SetCellValue(currentRow, 0, str(winMax)) #WINDOW
self.frm.grid.SetCellValue(currentRow, 1, str(mz_dict[key]-0.02)) #START
self.frm.grid.SetCellValue(currentRow, 2, str(mz_dict[key]+0.02)) #STOP
self.frm.grid.SetCellValue(currentRow, 3, "Full ms ") #FILTER
self.frm.grid.SetCellValue(currentRow, 5, "Auto")
self.frm.grid.SetCellValue(currentRow, 6, '1')
self.frm.grid.SetCellValue(currentRow, 7, '1')
self.frm.grid.SetCellValue(currentRow, 8, 'p')
#SEQ MZ CG SCAN
self.frm.grid.SetCellValue(currentRow, 9, key.split('|')[0])
self.frm.grid.SetCellValue(currentRow, 10, str(mz_dict[key]))
self.frm.grid.SetCellValue(currentRow, 11, key.split('|')[1])
self.frm.grid.SetCellValue(currentRow, 12, str(max_dict[key]))
mark_dict = {}
for scan in all_scans[key]:
mark_dict[scan]=BlaisBrowser.XICLabel(self.currentFile['m'].timeForScan(int(scan)), int(scan), key.split('|')[0], None, cg=int(key.split('|')[1]), fixedmod=self.currentFile["fixedmod"], varmod=key.split('|')[2])
#{9187:XICLabel(current['m'].timeForScan(9187), 9187, "Peptide", current['xic'][1][1])}
self.frm.mark_base.append(mark_dict)
currentRow += 1
self.frm.OnClick(None)
self.frm.Destroy()
def get_default(self):
activeL = wx.grid.GridCellAttr()
activeL.SetBackgroundColour(self.grid.GetDefaultCellBackgroundColour())
activeL.SetTextColour(self.grid.GetDefaultCellTextColour())
activeL.SetFont(self.grid.GetDefaultCellFont())
return activeL
def get_active(self):
activeL = wx.grid.GridCellAttr()
activeL.SetBackgroundColour("pink")
activeL.SetTextColour("black")
activeL.SetFont(wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD))
return activeL
def dump_bank(self):
#print "---------------------------"
for member in self.sub_bank.files.keys():
print self.sub_bank.files[member]["FileAbs"].lower()
def OnBuilder(self, event):
self.QB = QueryBuilder.QueryBuilder(self, id=-1)
self.QB.Show(True)
def OnClick(self, event): ####################EXECUTING QUERY
'''
This function is the event handler for Entering a new Query.
'''
#busy = PBI.PyBusyInfo("Executing Query...", parent=None, title="Processing...")
#wx.Yield()
#try:
#-------------------------------------------------------
query = self.query.GetValue()
if query.find("~set1")>-1:
query = query.replace("~set1", '"Protein Description", "Peptide Sequence", "Variable Modifications" , "Charge" ,"Peptide Score", "Spectrum Description", "Scan"')
#-------------------------------------------------------
#self.rows = db.pull_data_dict(self.currentFile["database"], query)
#self.cols = db.get_columns(self.currentFile["database"], table='peptides' if self.currentFile["SearchType"]=='Mascot' else 'fdr')
#if self.currentFile["SearchType"]=="Mascot":
try:
self.rows, self.cols = db.pull_data_dict(self.currentFile["database"], query)
self.currentFile["mzSheetcols"] = self.cols
except:
wx.MessageBox("There was an error processing\nthe query!")
return
if len(self.rows)==0:
wx.MessageBox("Query returned no results.")
return
#if self.currentFile["SearchType"]=="Pilot":
# self.rows = db.pull_data_dict(self.currentFile["database"], "select * from fdr;", table='fdr')
#self.rows, self.cols = db.construct_data_dict(self.currentFile["database"], query)
print self.cols
self.grid.Destroy()
self.grid = dbGrid(self, len(self.rows))
self.grid.Bind(wx.grid.EVT_GRID_SELECT_CELL, self.OnSelect)
self.grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.OnLabelClick)
self.grid.EnableEditing(False)
self.SizeFrame()
self.grid.Refresh()
self.parent._mgr.Update()
#del busy
#except:
#del busy
#dlg = wx.MessageDialog(self, 'There was an error executing the query...!',
#'Alert',
#wx.OK | wx.ICON_INFORMATION
##wx.YES_NO | wx.NO_DEFAULT | wx.CANCEL | wx.ICON_INFORMATION
#)
#dlg.ShowModal()
#dlg.Destroy()
def OnSelect(self, event):
'''
Highlights a row with light red when selected.
'''
if event.GetRow() != self.current_row:
self.grid.SetRowAttr(self.current_row, self.get_default())
self.grid.SetRowAttr(event.GetRow(), self.get_active())
# self.grid.SetColAttr(self.current_col, self.get_active())
self.grid.Refresh()
self.current_col = event.GetCol()
self.current_row = event.GetRow()
self.current_cell.SetValue(self.grid.GetCellValue(event.GetRow(), event.GetCol()))
def OnLabelClick(self,event):
#-----------------------------------
#----------This code handles clicking on row or column.
#----------Column click handles sorting by adding sort command to query
#----------Row click --> if combined, check if different file
#----------Lookup scan. Build ID. Send sequence to BPC.
self.dump_bank()
row = event.GetRow()
col = event.GetCol()
'''
CLICKED ON COLUMN
'''
if col > -1:
if self.ordering == "desc":
self.ordering = 'asc'
else:
self.ordering = 'desc'
curQ = self.query.GetValue()
#select * from peptides where "X" like "%%";
q = curQ.find("order by")
if q > -1:
if curQ.endswith(';'):
curQ= curQ[:(q-1)]
else:
curQ = curQ[:q]
else:
if curQ.endswith(';'):
curQ=curQ[:-1]
curQ += ' order by "' + self.cols[col] + '" ' + self.ordering #+ ';'
#print curQ
self.query.SetValue(curQ)
self.OnClick(None)
'''
CLICKED ON ROW
'''
if row > -1:
if row != self.current_row:
#------------------HIGHLIGHT SELECTED ROW
self.grid.SetRowAttr(self.current_row, self.get_default())
self.grid.SetRowAttr(row, self.get_active())
self.current_row = row
#self.grid.SelectRow(row)
self.grid.SetGridCursor(row, self.current_col)
self.grid.Refresh()
currentGridFileName = None
if self.Check_diff:
#print self.currentFile["SearchType"]
spec_lookup = "Spectrum Description" if self.currentFile["SearchType"]=='Mascot' else "Spectrum"
spec_index = 0 if self.currentFile["SearchType"]=='Mascot' else 3
if self.currentFile["vendor"]=='Thermo':
currentGridFileName = (self.curdir + '\\' + self.grid.GetCellValue(row, self.cols.index("Spectrum Description")).split(".")[0] + '.raw' if self.currentFile["SearchType"]=='Mascot' else self.curdir + '\\' + self.grid.GetCellValue(row, self.cols.index("File")).split(".")[0].replace("_RECAL",'') + '.raw').lower()
#elif self.currentFile["vendor"]=='ABI':
# currentGridFileName = self.curdir + '\\' + os.path.basename(self.grid.GetCellValue(row, self.cols.index("File")))[:-8]
if self.currentFile["FileAbs"].lower() != currentGridFileName:
#Switched files, need to update self and parent
if currentGridFileName.lower() not in [x.lower() for x in self.sub_bank.files.keys()]:
#Current file is not loaded, need to load
self.sub_bank.addFile(currentGridFileName)
#Need to update sheet information; copy direct from currentObject
self.sub_bank.files[currentGridFileName]["mzSheetcols"] = self.currentFile["mzSheetcols"]
self.sub_bank.files[currentGridFileName]["rows"] = self.currentFile["rows"]
self.sub_bank.files[currentGridFileName]["fixedmod"] = self.currentFile["fixedmod"]
self.sub_bank.files[currentGridFileName]["database"] = self.currentFile["database"]
self.sub_bank.files[currentGridFileName]["SearchType"] = self.currentFile["SearchType"]
#print currentGridFileName
self.sub_bank.files[currentGridFileName]["ID_Dict"] = self.currentPage.build_ID_dict(self.currentFile["rows"], self.currentFile["mzSheetcols"], os.path.basename(currentGridFileName))
#print "DUMPING"
self.currentPage.dump_ID_Dict(self.sub_bank.files[currentGridFileName]["ID_Dict"])
#self.sub_bank.files[currentGridFileName]["ID_Dict"] = self.currentFileObject["ID_Dict"]
#To switch, need to delete dictionary entry in parent msdb
##print "Attempting delete..."
##print self.parent.msdb.files[self.currentFile["FileAbs"]]
del self.currentPage.msdb.files[self.currentFile["FileAbs"].lower()]
#Need to update with currentFile
##print currentGridFileName
self.currentPage.msdb.files[currentGridFileName] = self.sub_bank.files[currentGridFileName]
self.currentPage.msdb.Display_ID[self.ActiveFileNumber]=currentGridFileName
self.currentFile = self.sub_bank.files[currentGridFileName]
if not currentGridFileName:
currentGridFileName = self.currentFile["FileAbs"].lower()
# mzSheetcols is also set somewhere else, but only sometimes, and not
# reliably reset when a new file is loaded.
self.currentFile['mzSheetcols'] = [self.grid.GetColLabelValue(x) for x in range(self.grid.GetNumberCols())]
#--------------LOOK UP SCAN NUMBER
if self.currentFile['vendor']=='Thermo':
if self.currentFile['SearchType'] in ['Mascot', 'X!Tandem', 'COMET']:
if "Spectrum Description" in self.currentFile['mzSheetcols']:
desc = self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Spectrum Description"))
else:
wx.MessageBox("No spectrum description column!\nCan't get scan number!", "mzStudio")
return
if 'MultiplierzMGF' in desc:
scan = int(standard_title_parse(desc)['scan'])
elif 'Locus' in desc:
#scan = (int(desc.split('.')[3]) * self.currentFile['m'].exp_num) + int(desc.split('.')[4].split()[0])-1# MAY NOT BE CORRECT
scan = self.currentFile['m'].make_implicit[int(desc.split('.')[3]),
int(desc.split('.')[4].split()[0])]
else:
scan = int(desc.split(".")[1])
else:
# Proteome Discoverer
if self.currentFile['FileAbs'].lower().endswith(".wiff"):
scan = int(float(self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("scan"))))
if self.currentFile['FileAbs'].lower().endswith(".raw"):
scan = int(float(self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("First Scan"))))
#scan = int(self.currentFile["mzSheetcols"].index("First Scan"))
elif self.currentFile['vendor']=='mgf':
rowdesc = self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Spectrum Description"))
try:
rowscannum = standard_title_parse(rowdesc)['scan']
except:
rowscannum = rowdesc.split(".")[1]
scan = self.currentFile["scan_dict"][int(rowscannum)] # I assume its not an X-to-X dict in non-MGF cases.
elif self.currentFile['vendor']=='ABI':
scan = int(self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Spectrum Description")).split(".")[3])-1
try:
exp = str(int(self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Spectrum Description")).split(".")[4].strip())-1)
except:
exp = str(int(self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Spectrum Description")).split(".")[4].split(" ")[0].strip())-1)
self.currentFile["scanNum"] = scan
if self.currentFile["vendor"]=='ABI':
self.currentFile["experiment"] = exp
self.currentPage.msdb.set_scan(scan, self.ActiveFileNumber)
#--------------BUILD CURRENT ID
if self.currentFile['vendor']=='Thermo':
self.currentPage.msdb.build_current_ID(currentGridFileName, scan, 'Thermo')
if self.currentFile["SearchType"] in ['Mascot', 'X!Tandem', 'COMET']:
sequence = self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Peptide Sequence"))
else: # Proteome Discoverer
sequence = self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Annotated Sequence")).upper()
if self.currentFile['vendor']=='mgf':
self.currentPage.msdb.build_current_ID(currentGridFileName, scan, 'mgf')
if self.currentFile["SearchType"] in ['Mascot', 'X!Tandem', 'COMET']:
sequence = self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Peptide Sequence"))
else:
sequence = self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Annotated Sequence"))
if self.currentFile['vendor']=='ABI':
exp = self.currentFile['experiment']
#self.currentPage.msdb.build_current_ID(currentGridFileName, (scan-1, str(int(exp)-1)), 'ABI')
self.currentPage.msdb.build_current_ID(currentGridFileName, (scan, exp), 'ABI')
if self.currentFile["SearchType"] in ['Mascot', 'X!Tandem', 'COMET']:
sequence = self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Peptide Sequence"))
else: # Proteome Discoverer
sequence = self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Annotated Sequence")).upper()
try:
if self.currentPage.msdb.files[currentGridFileName]['fd']['reaction'] == 'etd':
self.bpc.b.FindWindowByName('ions').SetValue('c/z')
else:
self.bpc.b.FindWindowByName('ions').SetValue('b/y')
except AttributeError:
pass
try:
if self.currentFile["SearchType"] in ['Mascot', 'X!Tandem', 'COMET']:
score = self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Peptide Score"))
else:
score = self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("XCorr"))
self.currentFile['score'] = score
except ValueError:
pass # Input dummy value
pa = re.compile('([a-z]*[A-Z]+?)')
peptide = pa.findall(sequence)
fixedmod = self.currentFile["fixedmod"]
if self.currentFile["SearchType"] in ['Mascot', 'X!Tandem', 'COMET']:
varmod = self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Variable Modifications"))
else:
varmod = self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Modifications"))
if not varmod:
varmod = ''
#print "---***"
#print sequence
#print varmod
#print fixedmod
peptide_container = mz_core.create_peptide_container(sequence, varmod, fixedmod)
#print peptide_container
current_sequence = ''
for member in peptide_container:
current_sequence += member
self.bpc.b.FindWindowByName("sequence").SetValue(current_sequence)
c_mod_dict = {"C-Term(Methyl)" : "methyl ester",
'Methyl:2H(3)' : "d3 methyl ester"}
mod_dict = {'iTRAQ4plex': 'iTRAQ',
'TMT6plex': 'TMT',
'TMT': 'cTMT',
'iTRAQ8plex': 'iTRAQ8plex',
'HGly-HGly': 'HCGlyHCGly',
'HCGly-HCGly': 'HCGlyHCGly',
'HCGly-HCGly-HCGly-HCGly': 'HCGlyHCGlyHCGlyHCGly',
'HNGly-HNGly-HNGly-HNGly': 'HNGlyHNGlyHNGlyHNGly',
'HNGly-HNGly': 'HNGlyHNGly',
'LbA-LbA': 'LbALbA',
'HbA-HbA': 'HbAHbA',
'LbA-HbA': 'LbAHbA',
'Acetyl': 'Acetyl',
'Propionyl': 'Propionyl',
'Phenylisocyanate': 'Phenylisocyanate'}
#print self.currentFile["SearchType"]
if self.currentFile["SearchType"] in ["Mascot", "Proteome Discoverer"]:
#print "NTERM MODS!"
if fixedmod == None:
fixedmod = ''
for mod in fixedmod.split(","):
mod = mod.strip()
#print mod
if mod.lower().find("n-term") > -1:
mod = mod.split(" ")[0]
mod = mod.strip()
#print mod_dict[mod]
self.bpc.b.FindWindowByName("nTerm").SetValue(mod_dict[mod])
for mod in fixedmod.split(","):
mod = mod.strip()
#print mod
if mod.lower().find("c-term") > -1:
mod = mod.split(" ")[0]
mod = mod.strip()
#print mod_dict[mod]
self.bpc.b.FindWindowByName("cTerm").SetValue(c_mod_dict[mod])
for mod in varmod.split(";"): #N-term: Acetyl
mod = mod.strip()
#print mod
if mod.lower().find("n-term") > -1:
mod = mod.split(" ")[1]
mod = mod.strip()
#print mod_dict[mod]
self.bpc.b.FindWindowByName("nTerm").SetValue(mod_dict[mod])
if mod.lower().find("c-term") > -1:
if self.currentFile["SearchType"] == "Mascot":
mod = mod.split(" ")[1]
mod = mod.strip()
#print mod_dict[mod]
self.bpc.b.FindWindowByName("cTerm").SetValue(c_mod_dict[mod])
if self.currentFile["SearchType"] == 'Proteome Discoverer':
self.bpc.b.FindWindowByName("cTerm").SetValue(c_mod_dict[mod])
self.bpc.b.OnCalculate(None)
#----------------------IF SILAC MODE, UPDATE SILAC PEAKS
if self.currentFile["SILAC"]["mode"]:
#calc peaks for SILAC!
#light medium heavy
multimod = False
if self.currentFile["SILAC"]["method"]=='SILAC K+4 K+8 R+6 R+10 [MD]':
charge = int(float(self.grid.GetCellValue(row, self.currentFile["mzSheetcols"].index("Charge"))))
light = ''
for member in peptide_container:
if member[-1:] not in ["R","K"]:
light += member
else:
if member in ['pK', 'pdK', 'pseK']:
light += ['pK']
multimod = True
else:
light += member[-1:]
#light = ''.join([x[-1:] for x in peptide_container])
if not multimod:
medium = light.replace("K", "deutK").replace("R", "silacR")
heavy = light.replace("K", "seK").replace("R", "sR")
else:
#NEED TO ACCOUNT FOR POSSIBILITY OF MIXED pK and regular K.
#ONLY CONVERT pK
# K would go to deutK or seK while prK would go to pdK or pseK
medium = ''
heavy = ''
for member in peptide_container:
if member == 'K':
medium += 'deutK'
heavy += 'seK'
elif member == 'pK':
medium += 'pdK'
heavy += 'pseK'
else:
medium += member
heavy += member
medium = medium.replace("R", "silacR")
heavy = heavy.replace("R", "sR")
light_mz, b ,y = mz_core.calc_pep_mass_from_residues(light, cg=charge)
medium_mz, b, y = mz_core.calc_pep_mass_from_residues(medium, cg=charge)
heavy_mz, b, y = mz_core.calc_pep_mass_from_residues(heavy, cg=charge)
self.currentFile["SILAC"]["peaks"]=(light_mz, medium_mz, heavy_mz)
self.currentPage.Refresh()
self.dump_bank()
self.currentPage.Window.UpdateDrawing()
|
BlaisProteomics/mzStudio
|
mzStudio/dbFrame.py
|
Python
|
gpl-3.0
| 37,267
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Raspberry Pi Internet Radio playlist utility
# $Id: create_playlists.py,v 1.21 2014/07/12 10:35:53 bob Exp $
#
# Create playlist files from the following url formats
# iPhone stream files (.asx)
# Playlist files (.pls)
# Extended M3U files (.m3u)
# Direct media streams (no extension)
#
# See Raspberry PI Radio constructors manual for instructions
#
# Author : Bob Rathbone
# Web site : http://www.bobrathbone.com
#
#
# License: GNU V3, See https://www.gnu.org/copyleft/gpl.html
#
# Disclaimer: Software is provided as is and absolutly no warranties are implied or given.
# The authors shall not be liable for any loss or damage however caused.
#
import os
import sys
import urllib2
from xml.dom.minidom import parseString
# Output errors to STDERR
stderr = sys.stderr.write;
# File locations
PlsDirectory = '/var/lib/mpd/playlists/'
RadioDir = '/home/pi/radio/'
RadioLibDir = '/var/lib/radiod/'
StationList = RadioLibDir + 'stationlist'
DistFile = '/home/pi/radio/station.urls'
TempDir = '/tmp/radio_stream_files/'
PlaylistsDir = '/home/pi/radio/playlists/'
PodcastsFile = '/var/lib/radiod/podcasts'
duplicateCount = 0
# Execute system command
def execCommand(cmd):
p = os.popen(cmd)
return p.readline().rstrip('\n')
# Create the initial list of files
def createList():
if not os.path.isfile(StationList):
print 'Creating ' + StationList + '\n'
execCommand ("mkdir -p " + RadioLibDir )
print ("cp " + DistFile + ' ' + StationList )
execCommand ("cp " + DistFile + ' ' + StationList)
print
return
# Create the output from the title and URL
def createPlsOutput(title,url,filenumber):
lines = []
lines.append('File%s=%s' % (filenumber,url))
lines.append('Title%s=%s' % (filenumber,title))
lines.append('Length%s=-1' % filenumber)
return lines
# Create the PLS or M3U file
def createPlsFile(filename,output,nlines):
global duplicateCount
uniqueCount = 1
if len(filename) < 1:
filename = 'unknown'
outfile = TempDir + filename + '.pls'
# Create unique files
exists = True
while exists:
if os.path.exists(outfile):
print "Warning: " + outfile + ' already exists'
outfile = TempDir + filename + '[' + str(uniqueCount) + '].pls'
uniqueCount += 1
duplicateCount += 1
else:
exists = False
try:
print 'Creating ' + outfile + '\n'
outfile = open(outfile,'w')
outfile.writelines("[playlist]\n")
outfile.writelines("NumberOfEntries=%s\n"% nlines)
outfile.writelines("Version=2\n")
for item in output:
outstr = item.encode('utf8', 'replace')
outfile.write(outstr + "\n")
outfile.close()
except:
print "Failed to create",outfile
return
# Beautify HTML convert tags to lower case
def parseHTML(data):
lcdata = ''
for line in data:
lcline = ''
line = line.rstrip()
line = line.lstrip()
line.replace('href =', 'href=')
length = len(line)
if length < 1:
continue
tag1right = line.find('>')
if tag1right > 1:
start_tag = line[0:tag1right+1]
lcline = lcline + start_tag.lower()
tag2left = line.find('<', tag1right+1)
if tag2left > 1:
end_tag = line[tag2left:length]
parameter = line[tag1right+1:tag2left]
lcline = lcline + parameter + end_tag.lower()
lcdata = lcdata + lcline
return lcdata
# Get XML/HTML parameter
def getParameter(line):
tag1right = line.find('>')
tag2left = line.find('<', tag1right+1)
parameter = line[tag1right+1:tag2left]
return parameter
# Create a PLS file from an ASX(XML) file
def parseAsx(title,url,data,filenumber):
global errorCount
global warningCount
lcdata = parseHTML(data)
try:
dom = parseString(lcdata)
except Exception,e:
print "Error:",e
print "Error: Could not parse XML data from,", url + '\n'
errorCount += 1
return
try:
# If title undefined in the station list get it from the file
if len(title) < 1:
titleTag = dom.getElementsByTagName('title')[0].toxml()
title = getParameter(titleTag)
except:
print "Warning: Title not found in", url
pass
finally:
try:
urlTag = dom.getElementsByTagName('ref')[0].toxml()
url = urlTag.replace('<ref href=\"','').replace('\"/>','')
urls = url.split('?')
url = urls[0]
print 'Title:',title
plsfile = title.replace(' ','_')
output = createPlsOutput(title,url,filenumber)
except IndexError,e:
print "Error:",e
print "Error parsing", url
errorCount += 1
return "# DOM Error"
return output
# Create filename from URL
def createFileName(title,url):
if len(title) > 0:
name = title
name = name.replace('.',' ')
name = name.replace(' ','_')
else:
try:
urlparts = url.rsplit('/',1)
site = urlparts[0]
siteparts = site.split('/')
name = siteparts[2]
siteparts = name.split(':')
name = siteparts[0]
except:
name = url
name = name.replace('www.','')
name = name.replace('.com','')
name = name.replace('.','_')
name = name.replace('__','_')
return name
# Create default title
def createTitle(url):
urlparts = url.rsplit('/',1)
site = urlparts[0]
siteparts = site.split('/')
name = siteparts[2]
siteparts = name.split(':')
title = siteparts[0]
return title
# Direct radio stream (MP3 AAC etc)
def parseDirect(title,url,filenumber):
url = url.replace('(stream)', '')
if len(title) < 1:
title = createTitle(url)
print "Title:",title
output = createPlsOutput(title,url,filenumber)
return output
# Create PLS file in the temporary directory
def parsePls(title,url,lines,filenumber):
plstitle = ''
plsurl = ''
for line in lines:
if line.startswith('Title1='):
titleline = line.split('=')
plstitle = titleline[1]
if line.startswith('File1='):
fileline = line.split('=')
plsurl = fileline[1]
# If title undefined in the station list get it from the file
if len(title) < 1:
if len(plstitle) > 1:
title = plstitle
else:
title = createTitle(url)
plsfile = createFileName(title,url)
print 'Title:',title
plsfile = title.replace(' ','_')
output = createPlsOutput(title,plsurl,filenumber)
return output
# Convert M3U file to PLS output
def parseM3u(title,lines,filenumber):
info = 'Unknown'
output = []
for line in lines:
line = line.replace('\r','')
line = line.replace('\n','')
if line.startswith('http:'):
url = line
elif line.startswith('#EXTINF:'):
info = line
if len(title) < 1:
title = info
if len(title) < 1:
filename = createFileName(title,url)
else:
filename = title.replace(' ','_')
print 'Title:',title
output.append('Title%s=%s'% (filenumber,title))
output.append('File%s=%s'% (filenumber,url))
output.append('Length%s=-1'% filenumber)
return output
# Usage message
def usage():
stderr("\nUsage: %s [--delete_old] [--no_delete] [--help]\n" % sys.argv[0])
stderr("\tWhere: --delete_old Delete old playlists\n")
stderr("\t --no_delete Don't delete old playlists\n")
stderr("\t --help Display help message\n\n")
return
# Station definition help message
def format():
stderr ("Start a playlist with the name between brackets. For example:\n")
stderr ("(BBC Radio Stations)\n")
stderr ("This will create a playlist called BBC_Radio_Stations.pls)\n")
stderr ("\nThe above is followed by station definitions which take the following format:\n")
stderr ("\t[title] http://<url>\n")
stderr ("\tExample:\n")
stderr ("\t[BBC Radio 3] http://bbc.co.uk/radio/listen/live/r3.asx\n\n")
stderr ("End a playlist by inserting a blank line at the end of the list of stations\n")
stderr ("or start a new playlist definition.\n\n")
return
# Start of MAIN script
if os.getuid() != 0:
print "This program can only be run as root user or using sudo"
sys.exit(1)
deleteOld = False
noDelete = False
if len(sys.argv) > 2:
stderr("\nError: you may not define more than one parameter at a time\n")
usage()
sys.exit(1)
if len(sys.argv) > 1:
param = sys.argv[1]
if param == '--delete_old':
deleteOld = True
elif param == '--no_delete':
noDelete = True
elif param == '--help':
usage()
format()
sys.exit(0)
else:
stderr("Invalid parameter %s\n" % param)
usage()
sys.exit(1)
# Create station URL list
createList()
# Temporary directory - if it exists then delete all pls files from it
execCommand ("mkdir -p " + TempDir )
execCommand ("rm -f " + TempDir + '*' )
# Open the list of URLs
print "Creating PLS files from", StationList + '\n'
lineCount = 0 # Line being processed (Including comments)
errorCount = 0 # Errors
duplicateCount = 0 # Duplicate file names
warningCount = 0 # Warnings
processedCount = 0 # Processed station count
# Copy user stream files to temporary directory
print "Copying user PLS and M3U files from " + PlaylistsDir + " to " + TempDir + '\n'
if os.listdir(PlaylistsDir):
execCommand ("cp -f " + PlaylistsDir + '* ' + TempDir )
# Playlist file name
filename = ''
pls_output = []
filenumber = 1
writeFile = False
url = ''
# Main processing loop
for line in open(StationList,'r'):
lineCount += 1
lines = []
newplaylist = ''
# Set url types to False
isASX = False
isM3U = False
isPLS = False
# Skip commented out or blank lines
line = line.rstrip() # Remove line feed
if line[:1] == '#':
continue
# Handle playlist definition in () brackets
elif line[:1] == '(':
newplaylist = line.strip('(') # Remove left bracket
newplaylist = newplaylist.strip(')') # Remove right bracket
playlistname = newplaylist
newplaylist = newplaylist.replace(' ', '_')
if len(filename) > 0:
writeFile = True
else:
print "Playlist:", playlistname
filename = newplaylist
filenumber = 1
continue
if len(line) < 1 or writeFile:
if len(filename) < 1 and len(url) > 0:
filename = createFileName(title,url)
if len(filename) > 0 and len(pls_output) > 0:
createPlsFile(filename,pls_output,filenumber-1)
filenumber = 1
pls_output = []
filename = ''
url = ''
if len(newplaylist) > 0:
filename = newplaylist
continue
if writeFile and len(line) > 0:
writeFile = False
else:
continue
# Check start of title defined
elif line[:1] != '[':
stderr("Error: Missing left bracket [ in line %s in %s\n" % (lineCount,StationList))
format()
errorCount += 1
continue
processedCount += 1
line = line.lstrip('[')
# Get title and URL parts
line = line.strip()
lineparts = line.split(']')
# Should be 2 parts (title and url)
if len(lineparts) != 2:
stderr("Error: Missing right bracket [ in line %s in %s\n" % (lineCount,StationList))
format()
errorCount += 1
continue
# Get title and URL from station definition
title = lineparts[0].lstrip()
url = lineparts[1].lstrip()
# Get the published URL and determine its type
print 'Processing line ' + str(lineCount) + ': ' + url
# Extended M3U (MPEG 3 URL) format
if url.endswith('.m3u'):
isM3U = True
# Advanced Stream Redirector (ASX)
elif url.endswith('.asx'):
isASX = True
# Playlist format
elif url.endswith('.pls'):
isPLS = True
# Advanced Audio Coding stream (Don't retrieve any URL)
else:
# Remove redundant (stream) parameter
url = url.replace('(stream)', '')
pls_output += parseDirect(title,url,filenumber)
if len(filename) < 1:
filename = createFileName(title,url)
writeFile = True
filenumber += 1
continue
# Get the published URL to the stream file
try:
file = urllib2.urlopen(url)
data = file.read()
file.close()
except:
print "Error: Failed to retrieve ", url
errorCount += 1
continue
# Creat list from data
lines = data.split('\n')
firstline = lines[0].rstrip()
# process lines accoording to URL type
if isPLS:
pls_output += parsePls(title,url,lines,filenumber)
elif isM3U:
pls_output += parseM3u(title,lines,filenumber)
elif isASX:
if firstline.startswith('<ASX'):
pls_output += parseAsx(title,url,lines,filenumber)
else:
print url,"didn't return XML data"
continue
if len(filename) < 1:
filename = createFileName(title,url)
writeFile = True
filenumber += 1
# End of for line
# Write last file
if len(filename) < 1:
filename = createFileName(title,url)
if len(filename) > 0 and len(pls_output) > 0:
createPlsFile(filename,pls_output,filenumber-1)
print ("Processed %s station URLs from %s" % (processedCount,StationList))
# Copy files from temporary directory to playlist directory
oldfiles = len(os.listdir(PlsDirectory))
if oldfiles > 0:
if not deleteOld and not noDelete:
stderr("There are %s old playlist files in the %s directory.\n" % (oldfiles,PlsDirectory))
stderr("Do you wish to remove the old files y/n: ")
answer = raw_input("")
if answer == 'y':
deleteOld = True
if deleteOld:
stderr ("\nRemoving old playlists from directory %s\n" % PlsDirectory)
execCommand ("rm -f " + PlsDirectory + "*.pls" )
execCommand ("rm -f " + PlsDirectory + "*.m3u" )
else:
print "Old playlist files not removed"
copiedCount = len(os.listdir(TempDir))
print "Copying %s new playlist files to directory %s" % (copiedCount,PlsDirectory)
execCommand ("cp -f " + TempDir + '* ' + PlsDirectory )
if os.path.isfile(PodcastsFile):
print "\nCreating Podcast playlists from " + PodcastsFile
execCommand(RadioDir + "create_podcasts.py")
# Create summary report
print "\nNew radio playlist files will be found in " + PlsDirectory
if errorCount > 0:
print str(errorCount) + " error(s)"
if duplicateCount > 0:
print str(duplicateCount) + " duplicate file name(s) found and renamed."
warningCount += duplicateCount
if warningCount > 0:
print str(warningCount) + " warning(s)"
# End of script
|
pabloest/piradio
|
create_playlists.py
|
Python
|
gpl-3.0
| 13,494
|
from fabric.api import run, local, hosts, cd
from fabric.contrib import django
#Descarga y arranca docker
def install_run():
run('sudo apt-get update')
run('sudo apt-get install -y docker.io')
run('sudo docker pull pmmre/bares:bares')
run('sudo docker run -i -t pmmre/bares:bares /bin/bash')
#Ejecucion de la aplicacion en modo desarrollo
def runApp():
run('cd Bares && sudo python manage.py runserver 0.0.0.0:80')
#Actualizar la aplicacion
def actApp():
run('cd Bares && sudo git pull')
|
pmmre/Bares
|
fabfile.py
|
Python
|
gpl-3.0
| 499
|
#!/usr/bin/python3
from .ruliweb_spiders import RuliwebSpider
class RuliwebSpiderPSP(RuliwebSpider):
name = 'ps'
|
munhyunsu/UsedMarketAnalysis
|
ruliweb_crawl/ruliweb/spiders/ps_spiders.py
|
Python
|
gpl-3.0
| 118
|
# -*- coding: utf-8 -*-
"""
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
import os
def mul3and5(n):
result = 0
for num in range(1, n):
if num % 3 == 0 or num % 5 == 0:
result += num
return result
def test_():
assert(mul3and5(10) == 23)
print(mul3and5(1000))
print('Tests Passed!')
if __name__ == '__main__':
test_()
os.system("pause")
|
NicovincX2/Python-3.5
|
Project Euler/1.list_sum_of_multiples_3-5.py
|
Python
|
gpl-3.0
| 536
|
# Copyright (C) 2014 Oleh Prypin <blaxpirit@gmail.com>
#
# This file is part of UniversalQt.
#
# UniversalQt is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UniversalQt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with UniversalQt. If not, see <http://www.gnu.org/licenses/>.
from . import _prefer
exception = None
for module in _prefer:
if module=='pyside':
try:
from PySide.QtGui import *
except Exception as e:
if exception is None:
exception = e
else:
exception = None
break
elif module=='pyqt4':
try:
from PyQt4.QtGui import *
except Exception as e:
if exception is None:
exception = e
else:
exception = None
break
elif module=='pyqt5':
try:
from PyQt5.QtGui import *
except Exception as e:
if exception is None:
exception = e
else:
exception = None
break
if exception is not None:
raise exception
del exception
|
BlaXpirit/steam-notifier
|
universal-qt/qt/gui.py
|
Python
|
gpl-3.0
| 1,555
|
def add(x: float, y: float) -> float:
return x + y
def main() -> int:
print("2.0 + 3.0 =", add(2.0, 3.0))
return 0
|
mugwort-rc/py2cpp
|
samples/add.py
|
Python
|
gpl-3.0
| 128
|
#!/Programs/Python34/python
import cgi
import os
import cgitb
import sys
from importlib.machinery import SourceFileLoader
import core.web as web
cgitb.enable()
WEB_ROOT = os.environ["SCRIPT_NAME"].replace("index.py", "")
ROOT = os.environ["SCRIPT_FILENAME"].replace("index.py", "")
getMethod = cgi.FieldStorage()
cmd = getMethod.getvalue("cmd")
html = ""
if cmd is None:
file = SourceFileLoader("home", "controllers/home.py").load_module()
controller = file.Controller
html = controller.index()
else:
params = getMethod.getvalue("cmd").split('/')
error_d = ROOT + "controllers/" + params[0] + ".py"
if params[0] != "" and params[0] != "error" and os.path.exists(ROOT + "controllers/" + params[0] + ".py"):
controller = params[0]
file = SourceFileLoader(params[0], "controllers/" + params[0] + ".py").load_module()
controller = file.Controller
html = controller.index()
elif params[0] != "" and params[0] != "error":
file = SourceFileLoader("special", "controllers/special.py").load_module()
controller = file.Controller
html = controller.index(params[0])
else:
file = SourceFileLoader("error", "controllers/error.py").load_module()
controller = file.Controller(WEB_ROOT)
# controller.WEB_ROOT = WEB_ROOT
html = controller.index()
html = html.format(bundle=web.bundles, **locals())
print(html)
|
JLesuperb/PythonWeb
|
framework-mvc/index.py
|
Python
|
gpl-3.0
| 1,455
|
import bottle, json, time
from gevent.socket import IPPROTO_TCP, TCP_NODELAY
from gevent.pywsgi import WSGIServer
from gevent.fileobject import FileObject as gevent_open
from geventwebsocket.handler import WebSocketHandler
from sakura.hub.web.manager import rpc_manager
from sakura.hub.web.bottle import bottle_get_wsock
from sakura.hub.web.cache import webcache_serve
from sakura.hub.web.csvtools import export_table_as_csv, export_stream_as_csv
from sakura.hub.web.video import serve_video_stream
from sakura.hub.db import db_session_wrapper
from sakura.common.tools import monitored
from pathlib import Path
from bottle import template
from collections import namedtuple
from sakura.hub import conf
def to_namedtuple(clsname, d):
return namedtuple(clsname, d.keys())(**d)
class NoDelayWSHandler(WebSocketHandler):
def __init__(self, sock, *args, **kwargs):
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
super().__init__(sock, *args, **kwargs)
def web_greenlet(context, webapp_path):
app = bottle.Bottle()
allowed_startup_urls = ('/', '/index.html')
@monitored
def ws_handle(proto_name):
wsock = bottle_get_wsock()
with db_session_wrapper():
rpc_manager(context, wsock, proto_name)
@app.route('/websocket')
@app.route('/api-websocket')
def ws_create():
proto_name = bottle.request.query.protocol or 'json'
ws_handle(proto_name)
@app.route('/opfiles/<op_id:int>/<filepath:path>')
def serve_operator_file(op_id, filepath):
print('serving operator %d file %s' % (op_id, filepath), end="")
with db_session_wrapper():
resp = context.serve_operator_file(op_id, filepath)
print(' ->', resp.status_line)
return resp
@app.route('/streams/<op_id:int>/input/<in_id:int>/export.csv')
def exp_in_stream_as_csv(op_id, in_id):
with db_session_wrapper():
yield from export_stream_as_csv(context, op_id, 0, in_id)
@app.route('/streams/<op_id:int>/input/<in_id:int>/export.csv.gz')
def exp_in_stream_as_csv_gz(op_id, in_id):
with db_session_wrapper():
yield from export_stream_as_csv(context, op_id, 0, in_id, True)
@app.route('/streams/<op_id:int>/output/<out_id:int>/export.csv')
def exp_out_stream_as_csv(op_id, out_id):
with db_session_wrapper():
yield from export_stream_as_csv(context, op_id, 1, out_id)
@app.route('/streams/<op_id:int>/output/<out_id:int>/export.csv.gz')
def exp_out_stream_as_csv_gz(op_id, out_id):
with db_session_wrapper():
yield from export_stream_as_csv(context, op_id, 1, out_id, True)
@app.route('/tables/<table_id:int>/export.csv')
def exp_table_as_csv(table_id):
with db_session_wrapper():
yield from export_table_as_csv(context, table_id)
@app.route('/tables/<table_id:int>/export.csv.gz')
def exp_table_as_csv_gz(table_id):
with db_session_wrapper():
yield from export_table_as_csv(context, table_id, gzip_compression=True)
@app.route('/streams/<op_id:int>/opengl/<ogl_id:int>/video-<width:int>x<height:int>.mp4')
def route_serve_video_stream(op_id, ogl_id, width, height):
with db_session_wrapper():
yield from serve_video_stream(context, op_id, ogl_id, width, height)
@app.route('/modules/dataflows/templates/<filepath:path>', method=['POST'])
def serve_template(filepath):
params = json.loads(
bottle.request.forms['params'],
object_hook = lambda d: to_namedtuple('Params', d))
with gevent_open(Path(webapp_path) / 'modules' / 'dataflows' / 'templates' /filepath) as f:
return template(f.read(), **params._asdict())
@app.route('/webcache/cdnjs/<filepath:path>')
def serve_cdnjs_cache(filepath):
return webcache_serve('cdnjs', filepath)
# if no route was found above, look for static files in webapp subdir
@app.route('/')
@app.route('/<filepath:path>')
def serve_static(filepath = 'index.html'):
print('serving ' + filepath, end="")
open = gevent_open
resp = bottle.static_file(filepath, root = webapp_path)
print(' ->', resp.status_line)
session_id_management_post(resp)
return resp
# session-id cookie management
# ----------------------------
def get_session_id_cookie():
session_id = bottle.request.get_cookie("session-id")
if session_id is None:
return None
try:
return int(session_id)
except ValueError:
bottle.abort(401, 'Wrong session id.')
# session-id management
@app.hook('before_request')
def session_id_management_pre():
requested_session_id = get_session_id_cookie()
#print(bottle.request.path, 'requested session id:', requested_session_id)
with db_session_wrapper():
if not context.attach_session(requested_session_id):
# session-id cookie is not present or no longer valid
if bottle.request.path in (allowed_startup_urls + ('/api-websocket',)):
# create a new session
context.new_session()
print(bottle.request.path, 'created a new session', context.session.id)
if bottle.request.path == '/websocket':
bottle.abort(503, 'missing or invalid session cookie')
@app.hook('after_request')
def session_id_management_post(resp=bottle.response):
requested_session_id = get_session_id_cookie()
with db_session_wrapper():
if context.session is not None:
if requested_session_id != context.session.id and \
bottle.request.path in allowed_startup_urls:
print(bottle.request.path, 'let the browser update session id cookie', context.session.id)
resp.set_cookie("session-id", str(context.session.id))
# Ensure the browser will always request the root document
# (instead of using its cache), so that we can update the
# session-id cookie in the response if needed.
# The browser will then associate this possibly new session-id
# to subsequent page requests.
if bottle.request.path in allowed_startup_urls:
resp.set_header("Cache-Control", "no-cache, must-revalidate")
server = WSGIServer(('', conf.web_port), app,
handler_class=NoDelayWSHandler)
server.start()
ws_handle.catch_issues()
|
eduble/panteda
|
sakura/hub/web/greenlet.py
|
Python
|
gpl-3.0
| 6,600
|
# Copyright 2011 Sebastien Maccagnoni-Munch
#
# This file is part of Omoma.
#
# Omoma is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# Omoma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Omoma. If not, see <http://www.gnu.org/licenses/>.
"""
OFX import parser for Omoma
"""
# Uses the ofx2qif program (shipped with libofx)
import datetime
import os
import subprocess
import tempfile
from django import forms
from django.conf import settings
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from omoma.omoma_web.importexport import import_transaction
from omoma.omoma_web.models import Account, Transaction
def name():
"""
Return the parser's name
"""
if os.access(settings.OFX2QIF, os.X_OK):
return 'OFX (Open Financial Exchange)'
else:
return None
def check(filedata):
"""
Check if the data fits to this parser
"""
tmpfile, tmpfilename = tempfile.mkstemp('.tmp', 'omoma-ofx-import-')
os.write(tmpfile, filedata)
os.close(tmpfile)
qif = subprocess.Popen([settings.OFX2QIF, tmpfilename],
stdout=subprocess.PIPE).stdout.read()
os.remove(tmpfilename)
# Return True if there is content in "qif"
return not not qif
# pylint: disable=E1101,W0232,R0903
class DetailsForm(forms.Form):
"""
OFX details form
"""
# pylint: disable=E1002
def __init__(self, request, *args, **kwargs):
aid = kwargs.pop('aid', None)
super (DetailsForm, self).__init__(*args, **kwargs)
self.request = request
for account in request.session['importparser']['parser'].accounts():
self.fields['account%s' % slugify(account)] = \
forms.ModelChoiceField(
Account.objects.filter(owner=request.user),
initial=aid, required=False,
label=_('Account "%s"') % account)
class Parser:
"""
The parser
"""
def __init__(self, filedata):
tmpfile, tmpfilename = tempfile.mkstemp('.tmp', 'omoma-ofx-import-')
os.write(tmpfile, filedata)
os.close(tmpfile)
self.qif = subprocess.Popen([settings.OFX2QIF, tmpfilename],
stdout=subprocess.PIPE).stdout.read()
os.remove(tmpfilename)
def accounts(self):
"""
Return the list of all accounts
"""
accounts = []
for line in self.qif.split('\n'):
if line == '!Account':
inaccount = True
accountname = None
elif line == '^' and inaccount:
accounts.append(accountname)
inaccount = False
elif line.startswith('N') and inaccount:
accountname = line[1:]
return accounts
def parse(self, form):
"""
Parse an OFX file.
Tested with files from :
- Credit Mutuel (french bank)
- La Banque Postale (french bank)
"""
accounts = {}
for field in form.fields.keys():
if field.startswith('account'):
act = form.cleaned_data.get(field)
if act:
# Validate the accounts are owned by the user
if not form.request.user in act.owner.all():
return False
accounts[field[7:]] = act
msg = []
inaccount = False
account = None
transactions_added = 0
transactions_already_exist = 0
transactions_failed = 0
for line in self.qif.split('\n'):
if line:
if line == '!Account':
if account:
details = []
if transactions_added:
details.append(_('%d imported') % \
transactions_added)
if transactions_already_exist:
details.append(_('%d already existed') % \
transactions_already_exist)
if transactions_failed:
details.append(_('%d failed' % \
transactions_failed))
msg.append(_('In account "%(account)s": %(details)s.')\
% {'account':account.name,
'details':', '.join(details)})
inaccount = True
accountname = ''
account = None
transactions_added = 0
transactions_already_exist = 0
transactions_failed = 0
if inaccount and line[0] == 'N':
accountname = line[1:]
if accounts.has_key(slugify(accountname)):
account = accounts[slugify(accountname)]
if line.startswith('!Type:') and account:
transaction = Transaction(account=account)
elif line == '^':
if inaccount:
inaccount = False
elif account:
result = import_transaction(form.request, transaction)
if result == True:
transactions_added = transactions_added + 1
elif result == False:
transactions_already_exist = \
transactions_already_exist + 1
elif result == None:
transactions_failed = transactions_failed + 1
transaction = Transaction(account=account)
elif line[0] == 'D' and not inaccount:
transaction.date = datetime.datetime.strptime(\
line[1:].strip(), '%d/%m/%Y')
elif line[0] == 'T' and not inaccount:
transaction.amount = line[1:]
elif line[0] == 'P' and not inaccount:
description = line[1:].strip()
transaction.original_description = description
return ''.join(msg)
|
TheGU/omoma
|
omoma/omoma_web/importexport/ofxparser.py
|
Python
|
gpl-3.0
| 6,882
|
import sys
import array
import xml.sax
from bin.libs.xmlhandler.corpusXMLHandler import CorpusXMLHandler
from base.sentence import Sentence
from base.word import Word, WORD_ATTRIBUTES
from base.__common import ATTRIBUTE_SEPARATOR
from util import verbose
NGRAM_LIMIT=16
def copy_list(ls):
return map(lambda x: x, ls)
def make_array(initializer=None):
if initializer is None:
return array.array('i')
else:
return array.array('i', initializer)
# Taken from counter.py
def load_array_from_file( an_array, a_filename ) :
"""
Fills an existing array with the contents of a file.
"""
MAX_MEM = 10000
fd = open( a_filename )
isMore = True
while isMore :
try :
an_array.fromfile( fd, MAX_MEM )
except EOFError :
isMore = False # Did not read MAX_MEM_ITEMS items? Not a problem...
fd.close()
def save_array_to_file(array, path):
"""
Dumps an array to a file.
"""
file = open(path, "w")
array.tofile(file)
file.close()
def load_symbols_from_file(symbols, path):
"""
Fills an existing symbol table with the contents of a file.
"""
file = open(path, "rb")
id = 0
symbols.number_to_symbol = []
symbols.symbol_to_number = {}
for line in file:
sym = line.rstrip('\n').decode("utf-8")
symbols.symbol_to_number[sym] = id
symbols.number_to_symbol.append(sym)
id += 1
file.close()
def save_symbols_to_file(symbols, path):
"""
Dumps a symbol table to a file.
"""
file = open(path, "wb")
for sym in symbols.number_to_symbol:
file.write(sym.encode("utf-8") + '\n')
file.close()
#def compare_indices(corpus, max, pos1, pos2):
# while pos1<max and pos2<max and corpus[pos1] == corpus[pos2]:
# pos1 += 1
# pos2 += 1
#
# if pos1>=max:
# return -1
# elif pos2>=max:
# return 1
# else:
# return int(corpus[pos1] - corpus[pos2])
def compare_ngrams(ngram1, pos1, ngram2, pos2, ngram1_exhausted=-1, ngram2_exhausted=1, limit=NGRAM_LIMIT):
"""
Compares the ngram at position `pos1` in the word list `ngram1` with
the ngram at position `pos2` in the word list `ngram2`. Returns an
integer less than, equal or greater than 0 if the first ngram is less
than, equal or greater than the second, respectively. At most the first
`limit` words will be compared.
@param ngram1 A list or array of numbers, each representing a word.
Likewise for `ngram2`.
@param pos1 Position where the first ngram begins in `ngram1`.
Likewise for `pos2`.
@param ngram1_exhausted Value returned if the first ngram ends before
the second and the ngrams have been equal so far. The default is `-1`,
which means that an ngram `[1, 2]` will be considered lesser than
`[1, 2, 3]`. Likewise for `ngram2_exhausted`.
@param limit Compare at most `limit` words. Defaults to `NGRAM_LIMIT`.
"""
max1 = len(ngram1)
max2 = len(ngram2)
i = 0
while pos1<max1 and pos2<max2 and ngram1[pos1]==ngram2[pos2] and i<NGRAM_LIMIT:
pos1 += 1
pos2 += 1
i += 1
if pos1>=max1 and pos2>=max2:
return 0
elif pos1>=max1:
return ngram1_exhausted
elif pos2>=max2:
return ngram2_exhausted
else:
return int(ngram1[pos1] - ngram2[pos2])
def fuse_suffix_arrays(array1, array2):
"""
Returns a new `SuffixArray` fusing the `corpus` data of each input array.
This is used to generate indices for combined attributes (e.g., lemma+pos).
"""
fused_array = SuffixArray()
for i in xrange(len(array1.corpus)):
sym1 = array1.symbols.number_to_symbol[array1.corpus[i]]
sym2 = array2.symbols.number_to_symbol[array2.corpus[i]]
fused_array.append_word(sym1 + ATTRIBUTE_SEPARATOR + sym2)
return fused_array
class SymbolTable():
"""
Handles the conversion between word strings and numbers.
"""
def __init__(self):
self.symbol_to_number = {'': 0}
self.number_to_symbol = ['']
self.last_number = 0
def intern(self, symbol):
"""
Adds the string `symbol` to the symbol table.
"""
if not self.symbol_to_number.has_key(symbol):
self.last_number += 1
self.symbol_to_number[symbol] = self.last_number
#self.number_to_symbol[self.last_number] = symbol
self.number_to_symbol.append(symbol) # Risky and not intention-expressing
return self.symbol_to_number[symbol]
class SuffixArray():
"""
Class containing the corpus and suffix arrays and the symbol table
for one attribute of a corpus.
"""
def __init__(self):
self.corpus = make_array() # List of word numbers
self.suffix = make_array() # List of word positions
self.symbols = SymbolTable() # word<->number conversion table
def set_basepath(self, basepath):
"""
Sets the base path for the suffix array files.
"""
self.basepath = basepath
self.corpus_path = basepath + ".corpus"
self.suffix_path = basepath + ".suffix"
self.symbols_path = basepath + ".symbols"
def load(self):
"""
Loads the suffix array from the files at `self.basepath`.
"""
load_array_from_file(self.corpus, self.corpus_path)
load_array_from_file(self.suffix, self.suffix_path)
load_symbols_from_file(self.symbols, self.symbols_path)
def save(self):
"""
Saves the suffix array to the files at `self.basepath`.
"""
save_array_to_file(self.corpus, self.corpus_path)
save_array_to_file(self.suffix, self.suffix_path)
save_symbols_to_file(self.symbols, self.symbols_path)
def append_word(self, word):
"""
Adds a new word to the end of the corpus array, putting it in the
symbol table if necessary.
"""
self.corpus.append(self.symbols.intern(word))
# For debugging.
def append_string(self, sentence):
for w in sentence.split():
self.append_word(w)
def build_suffix_array(self):
"""
Builds the sorted suffix array from the corpus array.
"""
tmpseq = range(0, len(self.corpus))
tmpseq.sort(cmp=(lambda a,b: compare_ngrams(self.corpus, a, self.corpus, b)))
self.suffix = make_array(tmpseq)
def find_ngram_range(self, ngram, min=0, max=None):
"""
Returns a tuple `(first, last)` of matching ngram positions in
the suffix array, or `None` if there is no match.
"""
# TODO: We will need a more "incremental" approach for searching for
# patterns that use multple word attributes. (Can't be done!)
if max is None:
max = len(self.suffix) - 1
first = self.binary_search_ngram(ngram, min, max, (lambda a,b: a >= b))
last = self.binary_search_ngram(ngram, min, max, (lambda a,b: a > b))
if first is None:
return None
if last is None:
last = max
else:
last -= 1
if first <= last:
return (first, last)
else:
return None
def binary_search_ngram(self, ngram, first, last, cmp):
"""
Find the least suffix that satisfies `suffix <cmp> ngram`, or
`None` if there is none.
"""
# 'max' must be one more than 'last', for the case no suffix
# satisfies the comparison.
max = last + 1
min = first
while min < max:
mid = (min+max)/2
if cmp(compare_ngrams(self.corpus, self.suffix[mid], ngram, 0, ngram2_exhausted=0), 0):
max = mid # If 'mid' satisfies, then what we want *is* mid or *is before* mid
else:
mid += 1
min = mid # If 'mid' does not satisfy, what we want *must be after* mid.
if mid > last:
return None
else:
return mid
# For debugging.
def dump_suffixes(self, limit=10, start=0, max=None):
"""
Prints the suffix array to standard output (for debugging).
"""
if max is None:
max = len(self.suffix)
#for pos in self.suffix:
for suf in xrange(start, max):
pos = self.suffix[suf]
print "%4d:" % pos,
for i in range(pos, pos+limit):
if i < len(self.corpus):
sym = self.symbols.number_to_symbol[self.corpus[i]]
if sym == "":
sym = "#"
print sym,
else:
print "*",
print ""
class Index():
"""
This class holds the `SuffixArray`s for all attributes of a corpus,
plus metadata which is common for all attributes.
"""
def __init__(self, basepath=None, used_word_attributes=None):
self.arrays = {}
self.metadata = { "corpus_size": 0 }
self.sentence_count = 0
if used_word_attributes is not None:
self.used_word_attributes = used_word_attributes
else:
self.used_word_attributes = copy_list(WORD_ATTRIBUTES)
if basepath is not None:
self.set_basepath(basepath)
def fresh_arrays(self):
"""
Creates empty suffix arrays for each used attribute in the index.
"""
for attr in self.used_word_attributes:
self.arrays[attr] = SuffixArray()
def set_basepath(self, path):
"""
Sets the base path for the index files.
"""
self.basepath = path
self.metadata_path = path + ".info"
def load(self, attribute):
"""
Load an attribute from the corresponding index files.
If the attribute is of the form `a1+a2` and the corresponding
file does not exist, creates a new suffix array fusing the
arrays for attributes `a1` and `a2`.
"""
if self.arrays.has_key(attribute):
return self.arrays[attribute]
verbose("Loading corpus files for attribute \"%s\"." % attribute)
array = SuffixArray()
path = self.basepath + "." + attribute
array.set_basepath(path)
try:
array.load()
except IOError, err:
# If attribute is composed, fuse the corresponding suffix arrays.
if '+' in attribute:
attr1, attr2 = attribute.rsplit('+', 1)
verbose("Fusing suffix arrays for %s and %s..." % (attr1, attr2))
array = fuse_suffix_arrays(self.load(attr1), self.load(attr2))
array.set_basepath(path)
array.build_suffix_array()
array.save()
else:
raise err
self.arrays[attribute] = array
return array
def save(self, attribute):
"""
Saves the suffix array for `attribute` to the corresponding files.
"""
array = self.arrays[attribute]
array.set_basepath(self.basepath + "." + attribute)
array.save()
def load_metadata(self):
"""
Loads the index metadata from the corresponding file.
"""
metafile = open(self.metadata_path)
for line in metafile:
key, type, value = line.rstrip('\n').split(" ", 2)
if type == "int":
value = int(value)
self.metadata[key] = value
metafile.close()
def save_metadata(self):
"""
Saves the index metadata to the corresponding file.
"""
metafile = open(self.metadata_path, "w")
for key, value in self.metadata.items():
if isinstance(value, int):
type = "int"
else:
type = "string"
metafile.write("%s %s %s\n" % (key, type, value))
metafile.close()
# Load/save main (non-composite) attributes and metadata
def load_main(self):
self.load_metadata()
for attr in self.used_word_attributes:
self.load(attr)
def save_main(self):
self.save_metadata()
for attr in self.used_word_attributes:
self.save(attr)
def append_sentence(self, sentence):
"""
Adds a `Sentence` (presumably extracted from a XML file) to the index.
"""
for attr in self.used_word_attributes:
for word in sentence.word_list:
value = getattr(word, attr)
self.arrays[attr].append_word(value)
self.arrays[attr].append_word('') # '' (symbol 0) means end-of-sentence
self.metadata["corpus_size"] += len(sentence.word_list)
self.sentence_count += 1
if self.sentence_count % 100 == 0:
verbose("Processing sentence %d" % self.sentence_count)
def build_suffix_arrays(self):
"""
Build suffix arrays for all attributes in the index.
"""
for attr in self.arrays.keys():
verbose("Building suffix array for %s..." % attr)
self.arrays[attr].build_suffix_array()
def iterate_sentences(self):
"""
Returns an iterator over all sentences in the corpus.
"""
id = 1
guide = self.used_word_attributes[0] # guide?
length = len(self.arrays[guide].corpus)
words = []
for i in range(0, length):
if self.arrays[guide].corpus[i] == 0:
# We have already a whole sentence.
sentence = Sentence(words, id)
id += 1
words = []
yield sentence
else:
args = []
for attr in self.used_word_attributes:
number = self.arrays[attr].corpus[i]
symbol = self.arrays[attr].symbols.number_to_symbol[number]
args.append(symbol)
args.append([])
words.append(Word(*args))
# For debugging.
def print_sentences(self):
for sentence in self.iterate_sentences():
for word in sentence.word_list:
print word.surface,
print ""
def index_from_corpus(corpus, basepath=None, attrs=None):
"""
Generates an `Index` from a corpus file.
"""
parser = xml.sax.make_parser()
index = Index(basepath, attrs)
index.fresh_arrays()
parser.setContentHandler(CorpusXMLHandler(index.append_sentence))
parser.parse(corpus)
index.build_suffix_arrays()
index.save_main()
return index
#t = fuse_suffix_arrays(h.arrays["surface"], h.arrays["pos"])
# For debugging.
def standalone_main(argv):
if len(argv) != 3:
print >>sys.stderr, "Usage: python indexlib.py <basepath> <corpus>"
return 1
basepath = argv[1]
corpus = argv[2]
index = index_from_corpus(corpus)
index.set_basepath(basepath)
index.save_main()
print >>sys.stderr, "Done."
if __name__ == "__main__":
standalone_main(sys.argv)
|
KWARC/mwetoolkit
|
bin/old/indexlib_stable.py
|
Python
|
gpl-3.0
| 12,942
|
# -*- coding: utf-8 -*-
from __future__ import division
__author__ = 'ogaidukov'
from collections import defaultdict
from io import BytesIO
from werkzeug import secure_filename
from flask import request, Blueprint, render_template, redirect, escape, jsonify, url_for, make_response
from flask.ext.login import login_required, logout_user, current_user
from console import app, login_manager, redirectors
from console.views import forms
from console.lib import funcs
from console.lib.identity import manager_required
from sqlalchemy import func, or_
from sqlalchemy.orm import joinedload
from sqlalchemy.orm.exc import NoResultFound
from commonlib.database import session
from commonlib.model import Campaign, Creative, CreativeFormat, GeoCountries, GeoCities, \
Counter, AdLog, Person, Organization, Contractor, Site
manager_bp = Blueprint('manager', __name__)
@manager_bp.route("/organizations")
@login_required
@manager_required
def organizations():
orgs = Organization.query \
.options(joinedload(Organization.persons)) \
.all()
return render_template('manager/organizations.html',
orgs=orgs)
@manager_bp.route("/new_organization", methods=["GET", "POST"])
@login_required
@manager_required
def new_organization():
form = forms.NewOrganizationForm(request.form)
if request.method == "POST" and form.validate_on_submit():
redirect_url = request.args.get('back_url', url_for(redirectors[current_user.person.role]))
organization = Organization(form.name.data, form.full_name.data)
session.add(organization)
session.commit()
return redirect(redirect_url)
return render_template("manager/edit_organization.html",
form=form,
type='new',
back_url=request.referrer if request.referrer else None)
@manager_bp.route("/edit_organization/<int:org_id>", methods=["GET", "POST"])
@login_required
@manager_required
def edit_organization(org_id):
form = forms.EditOrganizationForm(request.form)
organization = Organization.query.filter_by(id=org_id).first()
if request.method == "POST" and form.validate_on_submit():
organization.name = form.name.data
organization.full_name = form.full_name.data
session.commit()
redirect_url = request.args.get('back_url', url_for(redirectors[current_user.person.role]))
return redirect(redirect_url)
else:
form.name.data = organization.name
form.full_name.data = organization.full_name
return render_template("manager/edit_organization.html",
form=form,
type='edit',
back_url=request.referrer if request.referrer else None)
@manager_bp.route("/select_organizations_ajax", methods=["GET"])
@login_required
@manager_required
def select_organizations_ajax():
def format_name(name, full_name):
if full_name is None or full_name == u'':
return u"{}".format(name)
else:
return u"{} ({})".format(name, full_name)
query = escape(request.args.get('query'))
like_arg = u"%{}%".format(query)
organizations = Organization.query\
.filter(or_(Organization.name.ilike(like_arg), Organization.full_name.ilike(like_arg))).all()
res = map(lambda x: {'id': x.id, 'name': format_name(x.name, x.full_name)}, organizations)
return jsonify(organizations=res)
@manager_bp.route("/organization_by_id_ajax", methods=["GET"])
@login_required
@manager_required
def organization_by_id_ajax():
org_id = request.args.get('org_id')
if org_id is not None and not org_id.isdigit():
return jsonify()
try:
organization = Organization.query\
.filter_by(id=org_id).one()
except NoResultFound:
return jsonify()
if organization.full_name is None or organization.full_name == u'':
name = u"{}".format(organization.name)
else:
name = u"{} ({})".format(organization.name, organization.full_name)
return jsonify(id=organization.id, name=name)
@manager_bp.route("/persons")
@login_required
@manager_required
def persons():
persons = Person.query \
.options(joinedload(Person.organization)) \
.all()
return render_template('manager/persons.html',
persons=persons)
@manager_bp.route("/person/<int:person_id>")
@login_required
@manager_required
def person(person_id):
persons = Person.query \
.options(joinedload(Person.organization)) \
.filter(Person.id == person_id) \
.all()
return render_template('manager/persons.html',
persons=persons)
@manager_bp.route("/new_person", methods=["GET", "POST"])
@login_required
@manager_required
def new_person():
form = forms.NewPersonForm(request.form)
if request.method == "POST" and form.validate_on_submit():
redirect_url = request.args.get('back_url', url_for(redirectors[current_user.person.role]))
person = Person(form.email.data, form.password.data, form.first_name.data, form.surname.data,
role=form.role.data, is_blocked=form.is_blocked.data)
if form.role.data == 'customer':
person.contractor_ref = None
person.organization_ref = form.organization_id.data if form.organization_id.data else None
elif form.role.data == 'contractor':
person.contractor_ref = form.contractor_id.data if form.contractor_id.data else None
person.organization_ref = None
elif form.role.data == 'manager':
person.contractor_ref = None
person.organization_ref = None
session.add(person)
session.commit()
return redirect(redirect_url)
return render_template("manager/edit_person.html",
form=form,
type='new',
back_url=request.referrer if request.referrer else None)
@manager_bp.route("/edit_person/<int:person_id>", methods=["GET", "POST"])
@login_required
@manager_required
def edit_person(person_id):
form = forms.EditPersonForm(request.form)
person = Person.query.filter_by(id=person_id).first()
if request.method == "POST" and form.validate_on_submit():
person.email = form.email.data
person.first_name = form.first_name.data
person.surname = form.surname.data
if form.password.data != '******':
person.update_password(form.password.data)
person.organization_ref = form.organization_id.data if form.organization_id.data else None
person.role = form.role.data
if form.role.data == 'customer':
person.contractor_ref = None
person.organization_ref = form.organization_id.data if form.organization_id.data else None
elif form.role.data == 'contractor':
person.contractor_ref = form.contractor_id.data if form.contractor_id.data else None
person.organization_ref = None
elif form.role.data == 'manager':
person.contractor_ref = None
person.organization_ref = None
person.is_blocked = form.is_blocked.data
session.commit()
if form.password.data != '******' and current_user.person.id == person_id:
logout_user()
redirect_url = url_for(login_manager.login_view)
else:
redirect_url = request.args.get('back_url', url_for(redirectors[current_user.person.role]))
return redirect(redirect_url)
else:
form.email.data = person.email
form.first_name.data = person.first_name
form.surname.data = person.surname
form.password.data = '******'
form.retry_password.data = '******'
form.role.data = person.role
form.organization_id.data = person.organization_ref
form.contractor_id.data = person.contractor_ref
form.is_blocked.data = person.is_blocked
return render_template("manager/edit_person.html",
form=form,
type='edit',
back_url=request.referrer if request.referrer else None)
@manager_bp.route("/campaigns_all")
@login_required
@manager_required
def campaigns_all():
min_realstart_date_query = Campaign.query \
.with_entities(Campaign.id, func.min(Counter.realstart_date).label('min_realstart_date')) \
.join(Creative.counters) \
.join(Creative.campaign) \
.group_by(Campaign.id) \
.subquery('min_realstart_date')
campaigns_list = session.query(Campaign, 'min_realstart_date') \
.options(joinedload(Campaign.organization)) \
.outerjoin(min_realstart_date_query, min_realstart_date_query.c.id == Campaign.id) \
.filter(Campaign.state != 'archived') \
.all()
impressions_summary = session.query(Campaign, func.sum(AdLog.value)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(AdLog.record_type == 'impr',
Campaign.state != 'archived') \
.group_by(Campaign) \
.all()
impressions_rej_geo = session.query(Campaign, func.sum(AdLog.value)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(AdLog.record_type == 'impr_rej_geo',
Campaign.state != 'archived') \
.group_by(Campaign) \
.all()
impressions_rej_browser = session.query(Campaign, func.sum(AdLog.value)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(AdLog.record_type == 'impr_rej_browser',
Campaign.state != 'archived') \
.group_by(Campaign) \
.all()
clicks_summary = session.query(Campaign, func.sum(AdLog.value)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(AdLog.record_type == 'clck',
Campaign.state != 'archived') \
.group_by(Campaign)\
.all()
clicks_rej_geo = session.query(Campaign, func.sum(AdLog.value)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(AdLog.record_type == 'clck_rej_geo',
Campaign.state != 'archived') \
.group_by(Campaign)\
.all()
clicks_rej_browser = session.query(Campaign, func.sum(AdLog.value)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(AdLog.record_type == 'clck_rej_browser',
Campaign.state != 'archived') \
.group_by(Campaign)\
.all()
campaigns = prepare_campaigns(campaigns_list,
impressions_summary,
impressions_rej_geo,
impressions_rej_browser,
clicks_summary,
clicks_rej_geo,
clicks_rej_browser)
return render_template('manager/campaigns/campaigns_all.html',
campaigns=campaigns)
@manager_bp.route("/campaigns_by_org/<int:org_id>")
@login_required
@manager_required
def campaigns_by_org(org_id):
min_realstart_date_query = Campaign.query \
.with_entities(Campaign.id, func.min(Counter.realstart_date).label('min_realstart_date')) \
.join(Creative.counters) \
.join(Creative.campaign) \
.filter(Organization.id == org_id) \
.group_by(Campaign.id) \
.subquery('min_realstart_date')
campaigns_list = session.query(Campaign, 'min_realstart_date') \
.join(Campaign.organization) \
.options(joinedload(Campaign.organization)) \
.outerjoin(min_realstart_date_query, min_realstart_date_query.c.id == Campaign.id) \
.filter(Organization.id == org_id,
Campaign.state != 'archived') \
.all()
try:
organization = campaigns_list[0]
org_name = organization[0].organization.name
org_full_name = organization[0].organization.full_name
except IndexError:
org = Organization.query.filter_by(id=org_id).one()
return render_template('manager/campaigns/first_campaign.html',
org_name=org.name,
org_full_name=org.full_name)
impressions_summary = session.query(Campaign, func.sum(AdLog.value)) \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(Organization.id == org_id,
AdLog.record_type == 'impr',
Campaign.state != 'archived') \
.group_by(Campaign) \
.all()
impressions_rej_geo = session.query(Campaign, func.sum(AdLog.value)) \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(Organization.id == org_id,
AdLog.record_type == 'impr_rej_geo',
Campaign.state != 'archived') \
.group_by(Campaign) \
.all()
impressions_rej_browser = session.query(Campaign, func.sum(AdLog.value)) \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(Organization.id == org_id,
AdLog.record_type == 'impr_rej_browser',
Campaign.state != 'archived') \
.group_by(Campaign) \
.all()
clicks_summary = session.query(Campaign, func.sum(AdLog.value)) \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(Organization.id == org_id,
AdLog.record_type == 'clck',
Campaign.state != 'archived') \
.group_by(Campaign)\
.all()
clicks_rej_geo = session.query(Campaign, func.sum(AdLog.value)) \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(Organization.id == org_id,
AdLog.record_type == 'clck_rej_geo',
Campaign.state != 'archived') \
.group_by(Campaign)\
.all()
clicks_rej_browser = session.query(Campaign, func.sum(AdLog.value)) \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(Organization.id == org_id,
AdLog.record_type == 'clck_rej_browser',
Campaign.state != 'archived') \
.group_by(Campaign)\
.all()
campaigns = prepare_campaigns(campaigns_list,
impressions_summary,
impressions_rej_geo,
impressions_rej_browser,
clicks_summary,
clicks_rej_geo,
clicks_rej_browser)
return render_template('manager/campaigns/campaigns_by_org.html',
campaigns=campaigns,
org_name=org_name,
org_full_name=org_full_name)
def prepare_campaigns(campaigns_list, impressions_summary, impressions_rej_geo, impressions_rej_browser,
clicks_summary, clicks_rej_geo, clicks_rej_browser):
campaign_mix = defaultdict(dict)
for campaign in campaigns_list:
campaign_mix[campaign[0].id].update({'org_id': campaign[0].organization.id,
'org_name': campaign[0].organization.name,
'org_full_name': campaign[0].organization.full_name,
'campaign_id': campaign[0].id,
'campaign_name': campaign[0].name,
'start_date': campaign[0].start_date,
'realstart_date': campaign[1],
'due_date': campaign[0].due_date,
'state': funcs.states[campaign[0].state],
'target_impr': campaign[0].target_impressions})
for summary in impressions_summary:
campaign = summary[0]
val = summary[1]
campaign_mix[campaign.id].update({'impr': val})
for summary in impressions_rej_geo:
campaign = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
campaign_mix[campaign.id].update({'impr_rej_geo': val})
for summary in impressions_rej_browser:
campaign = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
campaign_mix[campaign.id].update({'impr_rej_browser': val})
for summary in clicks_summary:
campaign = summary[0]
val = summary[1]
campaign_mix[campaign.id].update({'clck': val})
for summary in clicks_rej_geo:
campaign = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
campaign_mix[campaign.id].update({'clck_rej_geo': val})
for summary in clicks_rej_browser:
campaign = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
campaign_mix[campaign.id].update({'clck_browser': val})
campaigns = campaign_mix.values()
for d in campaigns:
try:
d['ctr'] = '{0:0.02f}'.format(d['clck'] / d['impr'] * 100.0)
except TypeError:
d['ctr'] = '{0:0.02f}'.format(0.0)
except KeyError:
d['ctr'] = u'нет'
except ZeroDivisionError:
d['ctr'] = '∞'
try:
reach = funcs.reach(d['target_impr'], d['impr'], d['frequency'])
d['reach'] = funcs.format_decimal('{0:0.0f}'.format(reach))
except (ZeroDivisionError, TypeError):
d['reach'] = '{0:0.0f}'.format(0.0)
except KeyError:
d['reach'] = u'нет'
try:
if d['target_impr']:
d['target_impr'] = funcs.format_decimal(d['target_impr'])
else:
d['target_impr'] = u'нет'
except KeyError:
pass
try:
d['impr'] = funcs.format_decimal(d['impr'])
except KeyError:
pass
try:
d['clck'] = funcs.format_decimal(d['clck'])
except KeyError:
pass
return campaigns
@manager_bp.route("/stats_by_campaign/<int:campaign_id>")
@login_required
@manager_required
def stats_by_campaign(campaign_id):
campaign = Campaign.query\
.options(joinedload(Campaign.organization)) \
.filter(Campaign.id == campaign_id) \
.one()
if campaign is None:
return redirect(url_for(redirectors[current_user.person.role]))
return render_template('manager/campaigns/graph_by_campaign.html',
start_date=campaign.start_date,
due_date=campaign.due_date,
campaign=campaign)
@manager_bp.route("/stats_by_creative/<int:creative_id>")
@login_required
@manager_required
def stats_by_creative(creative_id):
creative = Creative.query \
.options(joinedload(Creative.campaign, Campaign.organization)) \
.filter(Creative.id == creative_id) \
.one()
if creative is None:
return redirect(url_for(redirectors[current_user.person.role]))
return render_template('manager/campaigns/graph_by_creative.html',
creative=creative)
@manager_bp.route("/new_campaign", methods=["GET", "POST"])
@login_required
@manager_required
def new_campaign():
form = forms.NewCampaignForm(request.form)
if request.method == "POST" and form.validate_on_submit():
redirect_url = request.args.get('back_url', url_for(redirectors[current_user.person.role]))
campaign = Campaign(form.name.data, None, form.start_date.data, form.due_date.data,
state=form.state.data,
target_impressions=form.target_impressions.data)
campaign.organization_ref = form.organization_id.data
campaign.sites = Site.to_list(form.sites.data)
session.add(campaign)
session.commit()
return redirect(redirect_url)
return render_template("manager/campaigns/edit_campaign.html",
form=form,
type='new',
back_url=request.referrer if request.referrer else None)
@manager_bp.route("/edit_campaign/<int:campaign_id>", methods=["GET", "POST"])
@login_required
@manager_required
def edit_campaign(campaign_id):
try:
campaign = session.query(Campaign) \
.options(joinedload(Campaign.organization)) \
.filter(Campaign.id == campaign_id,
Campaign.state != 'archived')\
.one()
except NoResultFound:
return redirect(url_for(redirectors[current_user.person.role]))
form = forms.EditCampaignForm(request.form)
if request.method == "POST" and form.validate_on_submit():
campaign.name = form.name.data
campaign.organization_ref = form.organization_id.data
campaign.start_date = form.start_date.data
campaign.due_date = form.due_date.data
campaign.state = form.state.data
campaign.target_impressions = form.target_impressions.data
if form.is_archived.data is True:
campaign.state = 'archived'
campaign.sites.extend(Site.to_list(form.sites.data))
session.commit()
redirect_url = request.args.get('back_url', url_for(redirectors[current_user.person.role]))
return redirect(redirect_url)
else:
form.name.data = campaign.name
form.organization_id.data = campaign.organization_ref
form.start_date.data = campaign.start_date
form.due_date.data = campaign.due_date
form.state.data = campaign.state
form.target_impressions.data = campaign.target_impressions
form.is_archived.data = True if campaign.state == 'archived' else False
form.sites.data = ""
return render_template("manager/campaigns/edit_campaign.html",
campaign=campaign,
form=form,
type='edit',
back_url=request.referrer if request.referrer else None)
@manager_bp.route("/creatives_by_campaign/<int:campaign_id>")
@login_required
@manager_required
def creatives_by_campaign(campaign_id):
try:
campaign = session.query(Campaign) \
.options(joinedload(Campaign.organization)) \
.filter(Campaign.id == campaign_id,
Campaign.state != 'archived')\
.one()
except NoResultFound:
return redirect(url_for(redirectors[current_user.person.role]))
creatives_list = Creative.query \
.join(Campaign.creatives) \
.options(joinedload(Creative.creative_format)) \
.filter(Campaign.id == campaign_id) \
.all()
realstart_date = session.query(func.min(Counter.realstart_date)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.filter(Campaign.id == campaign_id) \
.one()[0]
if len(creatives_list) == 0:
return render_template('manager/campaigns/first_creative.html',
org_id=campaign.organization.id,
org_name=campaign.organization.name,
org_full_name=campaign.organization.full_name,
campaign_id=campaign_id,
campaign_name=campaign.name)
impressions_summary = session.query(Creative, func.sum(AdLog.value)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(Campaign.id == campaign_id,
AdLog.record_type == 'impr') \
.group_by(Creative) \
.all()
impressions_rej_geo = session.query(Creative, func.sum(AdLog.value)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(Campaign.id == campaign_id,
AdLog.record_type == 'impr_rej_geo') \
.group_by(Creative) \
.all()
impressions_rej_browser = session.query(Creative, func.sum(AdLog.value)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(Campaign.id == campaign_id,
AdLog.record_type == 'impr_rej_browser') \
.group_by(Creative) \
.all()
clicks_summary = session.query(Creative, func.sum(AdLog.value)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(Campaign.id == campaign_id,
AdLog.record_type == 'clck') \
.group_by(Creative)\
.all()
clicks_rej_geo = session.query(Creative, func.sum(AdLog.value)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(Campaign.id == campaign_id,
AdLog.record_type == 'clck_rej_geo') \
.group_by(Creative)\
.all()
clicks_rej_browser = session.query(Creative, func.sum(AdLog.value)) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.filter(Campaign.id == campaign_id,
AdLog.record_type == 'clck_rej_browser') \
.group_by(Creative)\
.all()
creative_mix = defaultdict(dict)
for creative in creatives_list:
creative_mix[creative.id].update({'creative_id': creative.id,
'creative_format': creative.creative_format,
'start_date': campaign.start_date,
'realstart_date': realstart_date,
'due_date': campaign.due_date,
'name': creative.name,
'frequency': creative.frequency,
'target_impr': creative.target_impressions,
'geo_cities': creative.geo_cities if creative.geo_countries else [],
'geo_countries': creative.geo_countries if creative.geo_countries else []})
for summary in impressions_summary:
creative = summary[0]
val = summary[1]
creative_mix[creative.id].update({'impr': val})
for summary in impressions_rej_geo:
creative = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
creative_mix[creative.id].update({'impr_rej_geo': val})
for summary in impressions_rej_browser:
creative = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
creative_mix[creative.id].update({'impr_rej_browser': val})
for summary in clicks_summary:
creative = summary[0]
val = summary[1]
creative_mix[creative.id].update({'clck': val})
for summary in clicks_rej_geo:
creative = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
creative_mix[creative.id].update({'clck_rej_geo': val})
for summary in clicks_rej_browser:
creative = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
creative_mix[creative.id].update({'clck_rej_browser': val})
creatives = creative_mix.values()
for d in creatives:
try:
d['ctr'] = '{0:0.02f}'.format(d['clck'] / d['impr'] * 100.0)
except TypeError:
d['ctr'] = '{0:0.02f}'.format(0.0)
except KeyError:
d['ctr'] = u'нет'
except ZeroDivisionError:
d['ctr'] = '∞'
try:
reach = funcs.reach(d['target_impr'], d['impr'], d['frequency'])
d['reach'] = funcs.format_decimal('{0:0.0f}'.format(reach))
except (ZeroDivisionError, TypeError):
d['reach'] = '{0:0.0f}'.format(0.0)
except KeyError:
d['reach'] = u'нет'
try:
d['target_impr'] = funcs.format_decimal(d['target_impr'])
except KeyError:
pass
try:
d['impr'] = funcs.format_decimal(d['impr'])
except KeyError:
pass
try:
d['clck'] = funcs.format_decimal(d['clck'])
except KeyError:
pass
return render_template('manager/campaigns/creatives_by_campaign.html',
creatives=creatives,
org_id=campaign.organization.id,
org_name=campaign.organization.name,
org_full_name=campaign.organization.full_name,
campaign_id=campaign.id,
campaign_name=campaign.name)
@manager_bp.route("/new_creative_to_campaign/<int:campaign_id>", methods=["GET", "POST"])
@login_required
@manager_required
def new_creative_to_campaign(campaign_id):
try:
campaign = session.query(Campaign) \
.options(joinedload(Campaign.organization)) \
.filter(Campaign.id == campaign_id,
Campaign.state != 'archived')\
.one()
except NoResultFound:
return redirect(url_for(redirectors[current_user.person.role]))
form = forms.CreativeForm(request.form)
if request.method == "POST" and form.validate_on_submit():
redirect_url = request.args.get('back_url', url_for(redirectors[current_user.person.role]))
creative = Creative(campaign, None, form.name.data, form.frequency.data, form.target_impressions.data,
form.click_target_url.data, form.impression_target_url.data,
form.geo_cities.data.split(','), form.geo_countries.data.split(','))
creative.creative_format_ref = form.creative_format_id.data
session.add(campaign)
session.commit()
return redirect(redirect_url)
return render_template("manager/campaigns/edit_creative.html",
campaign=campaign,
form=form,
type='new',
back_url=request.referrer if request.referrer else None)
@manager_bp.route("/edit_creative/<int:creative_id>", methods=["GET", "POST"])
@login_required
@manager_required
def edit_creative(creative_id):
try:
creative = Creative.query \
.join(Campaign.creatives) \
.options(joinedload(Creative.campaign)) \
.filter(Creative.id == creative_id,
Campaign.state != 'archived')\
.one()
except NoResultFound:
return redirect(url_for(redirectors[current_user.person.role]))
form = forms.CreativeForm(request.form)
if request.method == "POST" and form.validate_on_submit():
creative.creative_format_ref = form.creative_format_id.data
creative.name = form.name.data
creative.frequency = form.frequency.data
creative.target_impressions = form.target_impressions.data
creative.click_target_url = form.click_target_url.data
creative.impression_target_url = form.impression_target_url.data
creative.geo_cities = form.geo_cities.data.split(',')
creative.geo_countries = form.geo_countries.data.split(',')
session.commit()
redirect_url = request.args.get('back_url', url_for(redirectors[current_user.person.role]))
return redirect(redirect_url)
else:
form.creative_format_id.data = creative.creative_format_ref
form.name.data = creative.name
form.frequency.data = creative.frequency
form.target_impressions.data = creative.target_impressions
form.click_target_url.data = creative.click_target_url
form.impression_target_url.data = creative.impression_target_url
form.geo_countries.data = ','.join(creative.geo_countries) if creative.geo_countries else ''
form.geo_cities.data = ','.join(creative.geo_cities) if creative.geo_cities else ''
return render_template("manager/campaigns/edit_creative.html",
campaign=creative.campaign,
form=form,
type='edit',
back_url=request.referrer if request.referrer else None)
@manager_bp.route("/select_creative_formats_ajax", methods=["GET"])
@login_required
@manager_required
def select_creative_formats_ajax():
query = escape(request.args.get('query'))
like_arg = u"%{}%".format(query)
creative_formats = CreativeFormat.query\
.filter(or_(CreativeFormat.format_name.ilike(like_arg),
CreativeFormat.dimension_x.ilike(like_arg),
CreativeFormat.dimension_y.ilike(like_arg))) \
.all()
res = map(lambda x: {'id': x.id, 'name': x.get_full_name()}, creative_formats)
return jsonify(creative_formats=res)
@manager_bp.route("/creative_format_by_id_ajax", methods=["GET"])
@login_required
@manager_required
def creative_format_by_id_ajax():
creative_format_id = request.args.get('creative_format_id')
if creative_format_id is not None and not creative_format_id.isdigit():
return jsonify()
try:
creative_format = CreativeFormat.query \
.filter_by(id=creative_format_id) \
.one()
except NoResultFound:
return jsonify()
return jsonify(id=creative_format.id, name=creative_format.get_full_name())
@manager_bp.route("/select_geo_counties_by_mask_ajax", methods=["GET"])
@login_required
@manager_required
def select_geo_counties_by_mask_ajax():
query = escape(request.args.get('query'))
like_arg = u"%{}%".format(query)
countries = GeoCountries.query\
.filter(GeoCountries.country_full_name.ilike(like_arg))\
.all()
res = map(lambda x: {'id': x.country_iso_id, 'name': x.country_full_name}, countries)
return jsonify(geo_countries=res)
@manager_bp.route("/geo_country_by_id_ajax", methods=["GET"])
@login_required
@manager_required
def geo_country_by_id_ajax():
geo_country_id = request.args.get('geo_country_id')
geo_countries = geo_country_id.split(',')
try:
geo_countries_objs = GeoCountries.query \
.filter(GeoCountries.country_iso_id.in_(geo_countries)) \
.all()
except NoResultFound:
return jsonify()
res = map(lambda x: {'id': x.country_iso_id, 'name': x.country_full_name}, geo_countries_objs)
return jsonify(geo_countries=res)
@manager_bp.route("/select_geo_cities_by_mask_ajax", methods=["GET"])
@login_required
@manager_required
def select_geo_cities_by_mask_ajax():
query = escape(request.args.get('query'))
like_arg = u"%{}%".format(query)
cities = GeoCities.query\
.filter(GeoCities.city_name.ilike(like_arg))\
.all()
res = map(lambda x: {'id': x.city_name, 'name': x.city_name}, cities)
return jsonify(geo_cities=res)
@manager_bp.route("/counters_by_creative/<int:creative_id>")
@login_required
@manager_required
def counters_by_creative(creative_id):
try:
creative = Creative.query \
.join(Campaign.creatives) \
.options(joinedload(Creative.campaign, Campaign.organization),
joinedload(Creative.creative_format)) \
.filter(Creative.id == creative_id,
Campaign.state != 'archived') \
.one()
except NoResultFound:
return redirect(url_for(redirectors[current_user.person.role]))
counter_impressions = session.query(Counter, Contractor.name, func.sum(AdLog.value)) \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.join(Counter.contractor) \
.filter(Creative.id == creative_id,
AdLog.record_type == 'impr') \
.group_by(Counter, Contractor.name)\
.all()
counter_impressions_rej_geo = session.query(Counter, func.sum(AdLog.value)) \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.join(Counter.contractor) \
.filter(Creative.id == creative_id,
AdLog.record_type == 'impr_rej_geo') \
.group_by(Counter)\
.all()
counter_impressions_rej_browser = session.query(Counter, func.sum(AdLog.value)) \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.join(Counter.contractor) \
.filter(Creative.id == creative_id,
AdLog.record_type == 'impr_rej_browser') \
.group_by(Counter)\
.all()
counter_clicks = session.query(Counter, Contractor.name, func.sum(AdLog.value)) \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.join(Counter.contractor) \
.filter(Creative.id == creative_id,
AdLog.record_type == 'clck') \
.group_by(Counter, Contractor.name)\
.all()
counter_clicks_rej_geo = session.query(Counter, func.sum(AdLog.value)) \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.join(Counter.contractor) \
.filter(Creative.id == creative_id,
AdLog.record_type == 'clck_rej_geo') \
.group_by(Counter)\
.all()
counter_clicks_rej_browser = session.query(Counter, func.sum(AdLog.value)) \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.join(Counter.contractor) \
.filter(Creative.id == creative_id,
AdLog.record_type == 'clck_rej_browser') \
.group_by(Counter)\
.all()
# TODO rewrite this shit!
counter_mix = defaultdict(dict)
for impr in counter_impressions:
counter = impr[0]
contractor_name = impr[1]
val = impr[2]
counter_mix[counter.id].update({'counter_id': counter.id,
'counter_description': counter.description,
'contractor_name': contractor_name,
'realstart_date': counter.realstart_date,
'due_date': creative.campaign.due_date,
'mu_ctr': counter.mu_ctr,
'sigma_ctr': counter.sigma_ctr,
'banner_types': counter.banner_types,
'impr': val})
for summary in counter_impressions_rej_geo:
counter = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
counter_mix[counter.id].update({'impr_rej_geo': val})
for summary in counter_impressions_rej_browser:
counter = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
counter_mix[counter.id].update({'impr_rej_browser': val})
for clck in counter_clicks:
counter = clck[0]
contractor_name = clck[1]
val = clck[2]
counter_mix[counter.id].update({'counter_id': counter.id,
'contractor_name': contractor_name,
'clck': val})
for summary in counter_clicks_rej_geo:
counter = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
counter_mix[counter.id].update({'clck_rej_geo': val})
for summary in counter_clicks_rej_browser:
counter = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
counter_mix[counter.id].update({'clck_rej_browser': val})
counters = counter_mix.values()
for d in counters:
try:
d['ctr'] = '{0:0.02f}'.format(d['clck'] / d['impr'] * 100.0)
except (TypeError, KeyError):
d['ctr'] = '{0:0.02f}'.format(0.0)
except ZeroDivisionError:
d['ctr'] = '∞'
try:
d['impr'] = funcs.format_decimal(d['impr'])
except KeyError:
pass
try:
d['clck'] = funcs.format_decimal(d['clck'])
except KeyError:
pass
if len(counters) == 0:
return render_template('manager/campaigns/first_counter.html',
creative=creative)
else:
return render_template('manager/campaigns/counters.html',
counters=counters,
creative=creative)
@manager_bp.route("/counters_details_by_creative/<int:creative_id>")
@login_required
@manager_required
def counters_details_by_creative(creative_id):
try:
creative = Creative.query \
.options(joinedload(Creative.campaign, Campaign.organization)) \
.filter(Creative.id == creative_id)\
.one()
except NoResultFound:
return redirect(url_for(redirectors[current_user.person.role]))
counters = Counter.query \
.join(Counter.creative) \
.join(Counter.logs) \
.options(joinedload(Counter.contractor),
joinedload(Counter.creative,
Creative.campaign,
Campaign.organization)) \
.filter(Creative.id == creative_id) \
.all()
if len(counters) == 0:
return render_template('manager/campaigns/counters_details_empty.html',
creative=creative)
org_name = creative.campaign.organization.name
org_full_name = creative.campaign.organization.full_name
campaign_name = creative.campaign.name
ads_base = app.config['ROTABANNER_URL_BASE'].rstrip('/')
return render_template('manager/campaigns/counters_details.html',
counters=counters,
creative=creative,
ads_base=ads_base,
org_name=org_name,
org_full_name=org_full_name,
campaign_name=campaign_name)
@manager_bp.route("/one_counter_details/<int:counter_id>")
@login_required
@manager_required
def one_counter_details(counter_id):
try:
counter = Counter.query \
.options(joinedload(Counter.contractor),
joinedload(Counter.creative,
Creative.campaign,
Campaign.organization)) \
.filter(Counter.id == counter_id)\
.one()
except NoResultFound:
return redirect(url_for(redirectors[current_user.person.role]))
org_name = counter.creative.campaign.organization.name
org_full_name = counter.creative.campaign.organization.full_name
campaign_name = counter.creative.campaign.name
ads_base = app.config['ROTABANNER_URL_BASE'].rstrip('/')
return render_template('manager/campaigns/one_counter_details.html',
counter=counter,
creative=counter.creative,
ads_base=ads_base,
org_name=org_name,
org_full_name=org_full_name,
campaign_name=campaign_name)
@manager_bp.route("/swf_by_counter/<int:counter_id>", methods=["GET", "POST"])
@login_required
@manager_required
def swf_by_counter(counter_id):
counter = Counter.query \
.filter(Counter.id == counter_id)\
.one()
if counter.creative_file_swf is None:
return ""
resp = make_response(counter.creative_file_swf)
resp.headers['Content-Type'] = "application/x-shockwave-flash"
return resp
@manager_bp.route("/gif_by_counter/<int:counter_id>", methods=["GET", "POST"])
@login_required
@manager_required
def gif_by_counter(counter_id):
counter = Counter.query \
.filter(Counter.id == counter_id)\
.one()
if counter.creative_file_gif is None:
return ""
extension = counter.creative_file_extension
try:
mime_type = {'gif': 'image/gif', 'jpg': 'image/jpeg'}[extension]
except KeyError:
return ''
resp = make_response(counter.creative_file_gif)
resp.headers['Content-Type'] = mime_type
return resp
@manager_bp.route("/new_counter_to_creative/<int:creative_id>", methods=["GET", "POST"])
@login_required
@manager_required
def new_counter_to_creative(creative_id):
try:
creative = Creative.query \
.join(Campaign.creatives) \
.options(joinedload(Creative.campaign, Campaign.organization),
joinedload(Creative.creative_format)) \
.filter(Creative.id == creative_id,
Campaign.state != 'archived') \
.one()
except NoResultFound:
return redirect(url_for(redirectors[current_user.person.role]))
form = forms.CounterForm()
if request.method == "POST" and form.validate_on_submit():
redirect_url = request.args.get('back_url', url_for(redirectors[current_user.person.role]))
try:
mu_ctr = float(form.mu_ctr.data)
sigma_ctr = float(form.sigma_ctr.data)
except (ValueError, TypeError):
mu_ctr = None
sigma_ctr = None
counter = Counter(creative,
None,
mu_ctr=mu_ctr,
sigma_ctr=sigma_ctr,
description=form.description.data)
counter.contractor_ref = form.contractor_id.data
if form.creative_file_swf.has_file():
counter.creative_filename_swf = secure_filename(form.creative_file_swf.data.filename)
swf_file_object = BytesIO()
form.creative_file_swf.data.save(swf_file_object)
counter.creative_file_swf = swf_file_object.getvalue()
swf_file_object.close()
if form.creative_file_gif.has_file():
counter.creative_filename_gif = secure_filename(form.creative_file_gif.data.filename)
gif_file_object = BytesIO()
form.creative_file_gif.data.save(gif_file_object)
counter.creative_file_gif = gif_file_object.getvalue()
gif_file_object.close()
impr_log_record = AdLog(counter, 'impr', None, 0)
clck_log_record = AdLog(counter, 'clck', None, 0)
session.add_all([counter, impr_log_record, clck_log_record])
session.commit()
return redirect(redirect_url)
return render_template("manager/edit_counter.html",
form=form,
type='new',
creative=creative,
back_url=request.referrer if request.referrer else None)
@manager_bp.route("/edit_counter/<int:counter_id>", methods=["GET", "POST"])
@login_required
@manager_required
def edit_counter(counter_id):
try:
creative = Creative.query \
.join(Creative.counters) \
.options(joinedload(Creative.campaign, Campaign.organization),
joinedload(Creative.creative_format)) \
.filter(Counter.id == counter_id,
Campaign.state != 'archived') \
.one()
except NoResultFound:
return redirect(url_for(redirectors[current_user.person.role]))
# forms.CounterForm(request.form) is incorrect for Flask-WTF & file uploads because Flask-WTF's constructor
# automatically handles request.files
form = forms.CounterForm()
counter = Counter.query.filter_by(id=counter_id).first()
if request.method == "POST" and form.validate_on_submit():
counter.contractor_ref = form.contractor_id.data
try:
counter.mu_ctr = float(form.mu_ctr.data)
counter.sigma_ctr = float(form.sigma_ctr.data)
except (ValueError, TypeError):
counter.mu_ctr = None
counter.sigma_ctr = None
if form.creative_file_swf.has_file():
counter.creative_filename_swf = secure_filename(form.creative_file_swf.data.filename)
swf_file_object = BytesIO()
form.creative_file_swf.data.save(swf_file_object)
counter.creative_file_swf = swf_file_object.getvalue()
swf_file_object.close()
if form.creative_file_gif.has_file():
counter.creative_filename_gif = secure_filename(form.creative_file_gif.data.filename)
gif_file_object = BytesIO()
form.creative_file_gif.data.save(gif_file_object)
counter.creative_file_gif = gif_file_object.getvalue()
gif_file_object.close()
counter.description = form.description.data
session.commit()
redirect_url = request.args.get('back_url', url_for(redirectors[current_user.person.role]))
return redirect(redirect_url)
else:
form.contractor_id.data = counter.contractor_ref
form.mu_ctr.data = counter.mu_ctr
form.sigma_ctr.data = counter.sigma_ctr
# object.filename is my own custom field needs to carry out filename of file stored in DB
form.creative_file_swf.filename = counter.creative_filename_swf
form.creative_file_gif.filename = counter.creative_filename_gif
form.description.data = counter.description
return render_template("manager/edit_counter.html",
form=form,
type='edit',
creative=creative,
back_url=request.referrer if request.referrer else None)
@manager_bp.route("/select_contractors_ajax", methods=["GET"])
@login_required
@manager_required
def select_contractors_ajax():
query = escape(request.args.get('query'))
like_arg = u"%{}%".format(query)
contractors = Contractor.query \
.filter(or_(Contractor.name.ilike(like_arg),
Contractor.full_name.ilike(like_arg))) \
.all()
res = []
for contractor in contractors:
if contractor.full_name:
row = {'id': contractor.id, 'name': u"{} ({})".format(contractor.name, contractor.full_name)}
else:
row = {'id': contractor.id, 'name': u"{}".format(contractor.name)}
res.append(row)
return jsonify(contractors=res)
@manager_bp.route("/contractor_by_id_ajax", methods=["GET"])
@login_required
@manager_required
def contractor_by_id_ajax():
contractor_id = request.args.get('contractor_id')
if contractor_id is not None and not contractor_id.isdigit():
return jsonify()
try:
contractor = Contractor.query \
.filter_by(id=contractor_id).one()
except NoResultFound:
return jsonify()
if contractor.full_name:
name = u"{} ({})".format(contractor.name, contractor.full_name)
else:
name = u"{}".format(contractor.name)
return jsonify(id=contractor.id, name=name)
@manager_bp.route("/contractors", defaults={'contractor_id': None}, methods=["GET"])
@manager_bp.route("/contractors/<int:contractor_id>", methods=["GET"])
@login_required
@manager_required
def contractors(contractor_id):
all_contractors = Contractor.query.all()
try:
contractor = Contractor.query \
.options(joinedload(Contractor.counters,
Counter.creative,
Creative.campaign,
Campaign.organization)) \
.filter(Contractor.id == contractor_id) \
.one()
except NoResultFound:
return render_template('manager/contractors.html',
counters=[],
all_contractors=all_contractors,
contractor=None)
def query_template(query_obj):
return query_obj \
.join(Organization.campaigns) \
.join(Campaign.creatives) \
.join(Creative.counters) \
.join(Counter.logs) \
.join(Counter.contractor) \
.filter(Contractor.id == contractor_id)
counter_impressions = query_template(session.query(Counter, func.sum(AdLog.value))) \
.filter(AdLog.record_type == 'impr') \
.group_by(Counter)\
.all()
counter_impressions_rej_geo = query_template(session.query(Counter, func.sum(AdLog.value))) \
.filter(AdLog.record_type == 'impr_rej_geo') \
.group_by(Counter)\
.all()
counter_impressions_rej_browser = query_template(session.query(Counter, func.sum(AdLog.value))) \
.filter(AdLog.record_type == 'impr_rej_browser') \
.group_by(Counter)\
.all()
counter_clicks = query_template(session.query(Counter, func.sum(AdLog.value))) \
.filter(AdLog.record_type == 'clck') \
.group_by(Counter, Contractor.name)\
.all()
counter_clicks_rej_geo = query_template(session.query(Counter, func.sum(AdLog.value))) \
.filter(AdLog.record_type == 'clck_rej_geo') \
.group_by(Counter)\
.all()
counter_clicks_rej_browser = query_template(session.query(Counter, func.sum(AdLog.value))) \
.filter(AdLog.record_type == 'clck_rej_browser') \
.group_by(Counter)\
.all()
counter_mix = defaultdict(dict)
for impr in counter_impressions:
counter = impr[0]
val = impr[1]
if val == 0:
continue
geo_countries = counter.creative.geo_countries
geo_cities = counter.creative.geo_cities
counter_mix[counter.id].update({'counter_id': counter.id,
'campaign_name': counter.creative.campaign.name,
'creative_name': counter.creative.name,
'creative_format_name': counter.creative.creative_format.get_full_name(),
'counter_description': counter.description,
'realstart_date': counter.realstart_date,
'start_date': counter.creative.campaign.start_date,
'due_date': counter.creative.campaign.due_date,
'geo_cities': geo_cities if geo_cities else [],
'geo_countries': geo_countries if geo_countries else [],
'mu_ctr': counter.mu_ctr,
'sigma_ctr': counter.sigma_ctr,
'impr': val})
for summary in counter_impressions_rej_geo:
counter = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
counter_mix[counter.id].update({'impr_rej_geo': val})
for summary in counter_impressions_rej_browser:
counter = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
counter_mix[counter.id].update({'impr_rej_browser': val})
for clck in counter_clicks:
counter = clck[0]
val = clck[1]
counter_mix[counter.id].update({'counter_id': counter.id,
'clck': val})
for summary in counter_clicks_rej_geo:
counter = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
counter_mix[counter.id].update({'clck_rej_geo': val})
for summary in counter_clicks_rej_browser:
counter = summary[0]
try:
val = funcs.format_decimal(summary[1])
except KeyError:
continue
counter_mix[counter.id].update({'clck_rej_browser': val})
counters = counter_mix.values()
for d in counters:
try:
d['ctr'] = '{0:0.02f}'.format(d['clck'] / d['impr'] * 100.0)
except (TypeError, KeyError):
d['ctr'] = '{0:0.02f}'.format(0.0)
except ZeroDivisionError:
d['ctr'] = '∞'
try:
d['impr'] = funcs.format_decimal(d['impr'])
except KeyError:
pass
try:
d['clck'] = funcs.format_decimal(d['clck'])
except KeyError:
pass
return render_template('manager/contractors.html',
counters=counters,
all_contractors=all_contractors,
contractor=contractor)
|
olegvg/me-advert
|
me-advert/console/views/manager.py
|
Python
|
gpl-3.0
| 58,046
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-10-08 02:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ToDo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('done', models.BooleanField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
mcueto/djangorestframework-auth0_sample
|
sample/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 725
|
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
host = 'http://www.qserie.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Series", action="todas", url=host,
thumbnail='https://s27.postimg.org/iahczwgrn/series.png',
fanart='https://s27.postimg.org/iahczwgrn/series.png'))
itemlist.append(Item(channel=item.channel, title="Generos", action="generos", url=host,
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png'))
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="lasmas", url=host,
thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png',
fanart='https://s17.postimg.org/fwi1y99en/a-z.png', extra='letras'))
itemlist.append(Item(channel=item.channel, title="Ultimas Agregadas", action="ultimas", url=host,
thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png'))
itemlist.append(Item(channel=item.channel, title="Mas Vistas", action="lasmas", url=host,
thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png',
fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', extra='Vista'))
itemlist.append(Item(channel=item.channel, title="Mas Votadas", action="lasmas", url=host,
thumbnail='https://s7.postimg.org/9kg1nthzf/votadas.png',
fanart='https://s7.postimg.org/9kg1nthzf/votadas.png', extra='Votos'))
return itemlist
def todas(item):
logger.info()
audio = {'Latino': '[COLOR limegreen]LATINO[/COLOR]', 'Español': '[COLOR yellow]ESPAÑOL[/COLOR]',
'Sub Español': '[COLOR red]ORIGINAL SUBTITULADO[/COLOR]'}
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t| |<br>", "", data)
patron = '<h2 class=.*?><a href="([^"]+)" title="([^"]+)">.*?\/h2>.*?<img src="([^"]+)".*?\/><\/a>.*?<p>([^<]+)<\/p>.*?<strong>Genero<\/strong>: .*?, (.*?)<\/div>.*?<img src=.*?>([^<]+)<\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot, scrapedyear, scrapedidioma in matches:
idioma = scrapedidioma.strip()
idioma = scrapertools.decodeHtmlentities(idioma)
url = urlparse.urljoin(item.url, scrapedurl)
year = scrapedyear
title = scrapertools.decodeHtmlentities(scrapedtitle)
thumbnail = scrapedthumbnail
plot = scrapedplot
fanart = 'https://s31.postimg.org/dousrbu9n/qserie.png'
itemlist.append(
Item(channel=item.channel, action="temporadas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=fanart, extra=idioma, contentSerieName=scrapedtitle, infoLabels={'year': year},
language=idioma))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
siguiente = ''
title = ''
actual = scrapertools.find_single_match(data, '<li><a href=".*?"><span><b>([^<]+)<\/b><\/span><\/a><\/li>')
ultima = scrapertools.find_single_match(data, '<li><a href=".*?page=([^"]+)">Ultima<\/a><\/li>')
if 'page' in item.title:
while not item.url.endswith('='): item.url = item.url[:-1]
if actual:
siguiente = int(actual) + 1
if item.url.endswith('='):
siguiente_url = item.url + str(siguiente)
else:
siguiente_url = item.url + '?&page=' + str(siguiente)
if actual and ultima and siguiente <= int(ultima):
titlen = 'Pagina Siguiente >>> ' + str(actual) + '/' + str(ultima)
fanart = 'https://s32.postimg.org/4q1u1hxnp/qserie.png'
thumbnail = 'https://s16.postimg.org/9okdu7hhx/siguiente.png'
itemlist.append(Item(channel=item.channel, action="todas", title=titlen, url=siguiente_url, fanart=fanart,
thumbnail=thumbnail))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url_base = item.url
patron = '<a href="javascript:.*?;" class="lccn"><b>([^<]+)<\/b><\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
temp = 1
if matches:
for scrapedtitle in matches:
url = url_base
title = scrapedtitle
thumbnail = item.thumbnail
plot = item.plot
contentSeasonNumber = str(temp)
infoLabels['season'] = contentSeasonNumber
fanart = scrapertools.find_single_match(data, '<img src="([^"]+)"/>.*?</a>')
itemlist.append(
Item(channel=item.channel, action="episodiosxtemp", title=title, fulltitle=item.title, url=url,
thumbnail=thumbnail, plot=plot, fanart=fanart, contentSeasonNumber=contentSeasonNumber,
contentSerieName=item.contentSerieName, infoLabels=infoLabels))
temp = temp + 1
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library", extra="episodios",
contentSerieName=item.contentSerieName))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
else:
item.title = ''
item.modo = 'unico'
return episodiosxtemp(item)
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
if item.modo == 'unico':
itemlist += episodiosxtemp(item)
else:
for tempitem in templist:
itemlist += episodiosxtemp(tempitem)
return itemlist
def episodiosxtemp(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
infoLabels = item.infoLabels
temp = item.contentSeasonNumber
if item.title == '':
temp = '1'
item.contenSeasonNumber = temp
infoLabels['season'] = temp
patron = '<li><a href="([^"]+)" class="lcc"><b>([^<]+)<\/b>.*?<\/a><\/li>'
else:
patron = '<li><a href="([^"]+)" class="lcc"><b>([^<]+)<\/b> - Temp\. ' + temp + '<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
capitulo = re.findall(r'\d+', scrapedtitle)
contentEpisodeNumber = str(capitulo[0])
infoLabels['episode'] = contentEpisodeNumber
title = item.contentSerieName + ' ' + temp + 'x' + contentEpisodeNumber
thumbnail = item.thumbnail
plot = item.plot
fanart = item.fanart
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, plot=plot, extra=item.extra, extra1=item.extra1,
extra2=item.extra2, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if item.modo == 'unico':
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library", extra="episodios",
contentSerieName=item.contentSerieName, modo='unico',
contentSeasonNumber=item.contenSeasonNumber))
return itemlist
def generos(item):
tgenero = {"comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png",
"drama": "https://s16.postimg.org/94sia332d/drama.png",
"acción": "https://s3.postimg.org/y6o9puflv/accion.png",
"aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"aventuras": "https://s10.postimg.org/6su40czih/aventura.png",
"romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"infantil": "https://s23.postimg.org/g5rmazozv/infantil.png",
"ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"anime": 'https://s2.postimg.org/s38borokp/anime.png',
"animes": "https://s2.postimg.org/s38borokp/anime.png",
"dibujos": "https://s2.postimg.org/aqwqksyop/dibujos.png",
"documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
"fantástico": "https://s10.postimg.org/pbkbs6j55/fantastico.png",
"intriga": "https://s27.postimg.org/v9og43u2b/intriga.png",
"musical": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"secuela": "https://s7.postimg.org/bti0nauh7/secuela.png",
"thriller (suspenso)": "https://s22.postimg.org/5y9g0jsu9/thriller.png",
"western": "https://s23.postimg.org/lzyfbjzhn/western.png"}
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li><a title="([^"]+)" href="([^"]+)" onclick=.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
url = urlparse.urljoin(item.url, scrapedurl)
title = scrapedtitle.decode('cp1252')
title = title.encode('utf-8')
if title.lower() in tgenero:
thumbnail = tgenero[title.lower()]
fanart = tgenero[title.lower()]
else:
thumbnail = ''
fanart = ''
plot = ''
itemlist.append(
Item(channel=item.channel, action="todas", title=title.lower(), fulltitle=item.fulltitle, url=url,
thumbnail=thumbnail, plot=plot, fanart=fanart))
return itemlist
def ultimas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
realplot = ''
patron = '<li><a title="([^"]+)" href="([^"]+)"><strong>.*?</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
url = urlparse.urljoin(item.url, scrapedurl)
data = httptools.downloadpage(scrapedurl).data
thumbnail = scrapertools.get_match(data, '<link rel="image_src" href="([^"]+)"/>')
realplot = scrapertools.find_single_match(data, '<p itemprop="articleBody">([^<]+)<\/p> ')
plot = scrapertools.remove_htmltags(realplot)
inutil = re.findall(r' Temporada \d', scrapedtitle)
title = scrapedtitle
title = scrapertools.decodeHtmlentities(title)
realtitle = scrapedtitle.replace(inutil[0], '')
fanart = 'https://s22.postimg.org/cb7nmhwv5/ultimas.png'
itemlist.append(
Item(channel=item.channel, action="temporadas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=fanart, contentSerieName=realtitle))
return itemlist
def lasmas(item):
thumbletras = {'0-9': 'https://s32.postimg.org/drojt686d/image.png',
'0 - 9': 'https://s32.postimg.org/drojt686d/image.png',
'#': 'https://s32.postimg.org/drojt686d/image.png',
'a': 'https://s32.postimg.org/llp5ekfz9/image.png',
'b': 'https://s32.postimg.org/y1qgm1yp1/image.png',
'c': 'https://s32.postimg.org/vlon87gmd/image.png',
'd': 'https://s32.postimg.org/3zlvnix9h/image.png',
'e': 'https://s32.postimg.org/bgv32qmsl/image.png',
'f': 'https://s32.postimg.org/y6u7vq605/image.png',
'g': 'https://s32.postimg.org/9237ib6jp/image.png',
'h': 'https://s32.postimg.org/812yt6pk5/image.png',
'i': 'https://s32.postimg.org/6nbbxvqat/image.png',
'j': 'https://s32.postimg.org/axpztgvdx/image.png',
'k': 'https://s32.postimg.org/976yrzdut/image.png',
'l': 'https://s32.postimg.org/fmal2e9yd/image.png',
'm': 'https://s32.postimg.org/m19lz2go5/image.png',
'n': 'https://s32.postimg.org/b2ycgvs2t/image.png',
'o': 'https://s32.postimg.org/c6igsucpx/image.png',
'p': 'https://s32.postimg.org/jnro82291/image.png',
'q': 'https://s32.postimg.org/ve5lpfv1h/image.png',
'r': 'https://s32.postimg.org/nmovqvqw5/image.png',
's': 'https://s32.postimg.org/zd2t89jol/image.png',
't': 'https://s32.postimg.org/wk9lo8jc5/image.png',
'u': 'https://s32.postimg.org/w8s5bh2w5/image.png',
'v': 'https://s32.postimg.org/e7dlrey91/image.png',
'w': 'https://s32.postimg.org/fnp49k15x/image.png',
'x': 'https://s32.postimg.org/dkep1w1d1/image.png',
'y': 'https://s32.postimg.org/um7j3zg85/image.png',
'z': 'https://s32.postimg.org/jb4vfm9d1/image.png'}
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
realplot = ''
if item.extra == 'letras':
patron = '<li><a href="([^"]+)" title="Series que comienzan con.*?">([^<]+)</a></li>'
else:
patron = '<a href="([^"]+)" title="([^V]+)' + item.extra + '.*?">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
if item.extra != 'letras':
data = httptools.downloadpage(scrapedurl).data
thumbnail = scrapertools.get_match(data, '<link rel="image_src" href="([^"]+)"/>')
realplot = scrapertools.find_single_match(data, '<p itemprop="articleBody">([^<]+)<\/p> ')
plot = scrapertools.remove_htmltags(realplot)
action = 'temporadas'
else:
if scrapedtitle.lower() in thumbletras:
thumbnail = thumbletras[scrapedtitle.lower()]
else:
thumbnail = ''
plot = ''
action = 'todas'
title = scrapedtitle.replace(': ', '')
title = scrapertools.decodeHtmlentities(title)
if item.extra == 'letras':
fanart = 'https://s17.postimg.org/fwi1y99en/a-z.png'
elif item.extra == 'Vista':
fanart = 'https://s9.postimg.org/wmhzu9d7z/vistas.png'
else:
fanart = ''
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=fanart, contentSerieName=scrapedtitle))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
anterior = scrapertools.find_single_match(data, '<a class="left" href="([^"]+)" title="Cap.tulo Anterior"></a>')
siguiente = scrapertools.find_single_match(data, '<a class="right" href="([^"]+)" title="Cap.tulo Siguiente"></a>')
titulo = scrapertools.find_single_match(data,
'<h1 class="tithd bold fs18px lnht30px ico_b pdtop10px">([^<]+)</h1> ')
existe = scrapertools.find_single_match(data, '<center>La pel.cula que quieres ver no existe.</center>')
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
if 'youtube' in videoitem.url:
itemlist.remove(videoitem)
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
videoitem.fanart = item.fanart
videoitem.title = titulo + " " + videoitem.server
if item.extra2 != 'todos':
data = httptools.downloadpage(anterior).data
existe = scrapertools.find_single_match(data, '<center>La pel.cula que quieres ver no existe.</center>')
if not existe:
itemlist.append(Item(channel=item.channel, action="findvideos", title='Capitulo Anterior', url=anterior,
thumbnail='https://s1.postimg.org/dbq8gvldb/anterior.png', folder=True))
data = httptools.downloadpage(siguiente).data
existe = scrapertools.find_single_match(data, '<center>La pel.cula que quieres ver no existe.</center>')
if not existe:
itemlist.append(Item(channel=item.channel, action="findvideos", title='Capitulo Siguiente', url=siguiente,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', folder=True))
return itemlist
|
pitunti/alfaPitunti
|
plugin.video.alfa/channels/qserie.py
|
Python
|
gpl-3.0
| 17,287
|
#countWalk.py
#Garik Sadovy
#gcsadovy
import arcpy, sys, os
path = sys.argv[1]
for (path, dirs, files) in os.walk(path):
for file in files:
if file.endswith(".shp") == True:
count = arcpy.GetCount_management("C:\Temp\COVER63p.shp")
print "{0}/{1} has {2} entries.".format(path, file, count)
|
gcsadovy/generalPY
|
countWalk_test.py
|
Python
|
gpl-3.0
| 339
|
__module_name__ = "hexchat-oper"
__module_version__ = "2.1"
__module_description__ = "Python 3 Windows"
import os
import sys
import hexchat
import threading
sys.path.append(os.path.join(os.path.dirname("__file__"), "extras"))
path = os.path.join(os.path.dirname("__file__"), "extras")
geoip_dat = os.path.join(path,"GeoIP" + "." + 'dat')
import pygeoip
if(sys.version_info > (3, 0)):
import urllib.request
from urllib.error import HTTPError
if os.name == "nt":
import ctypes
else:
raise Exception("Unknown/unsupported OS")
import json
import re
# Configs below
email = 'irrgit@gmail.com'
flags = 'm'
script_path = os.getcwd()
exempt_file_path = script_path + '/excludeip.txt'
shun_time ='5m'
shun_reason ='Pushim'
akill_time = '+2d' #2 days
akill_reason ='Proxy/Ofendime/Flood/Abuse'
sqline_reason = 'Nick/Banal'
check_proxy = False # set to check each IP if its a proxy, Warning this could get slow
edited = False
#Channel Ban text event
edited_ban = False
#Channel UnBan text event
edited_unban = False
#used to recompile a hostmask to a regex that matches said mask
wildcards = {'?':r'.', '*': r'.*'}
mydata = {}
#these will be the network tabs to print to.
TABS = [
r'{CONNECTIONS}',
r'{OVERRIDES}' ,
r'{SPAMFILTER}',
r'{NICKCHANGES}'
]
filter_tabs = True
network_contexts = []
def open_tabs():
server_context = hexchat.get_info("server")
tab_options = hexchat.get_prefs('tab_new_to_front')
hexchat.command("set -quiet tab_new_to_front 0")
for tab in TABS:
hexchat.command("NEWSERVER -noconnect %s" % tab)
open_tabs()
network_contexts =[hexchat.find_context(tab) for tab in TABS]
def pad_nick(nick):
nick = (nick + " " *30)[:30]
return nick
TEXT = {
'blue': '\00318',
'green': '\00319',
'red': '\00320',
'brown': '\00321',
'purple': '\00322',
'orange': '\00323',
'lightgreen': '\00325',
'gray': '\00330',
'bold':'\002',
'underline':'\037'
}
IRCCloud = [
'192.184.9.108' ,
'192.184.9.110' ,
'192.184.9.112' ,
'192.184.10.118',
'192.184.10.9' ,
'170.178.187.131'
]
numerics = ["311","379","378","319","312","313",
"310","317","318","307","335","320"]
exempt_list = []
gi = pygeoip.GeoIP(geoip_dat)
# End configs
def load_exempt_ips():
global exempt_list
#empty the list
exempt_list[:] = []
try:
with open(exempt_file_path) as f:
for line in f:
if '.' in line:
ip = line.rstrip()
ip = ip.replace("*","")
exempt_list.append(ip)
except:
print("Error loading file.")
load_exempt_ips()
def getclip():
CF_TEXT = 1
kernel32 = ctypes.windll.kernel32
user32 = ctypes.windll.user32
ret = None
user32.OpenClipboard(0)
if user32.IsClipboardFormatAvailable(CF_TEXT):
data = user32.GetClipboardData(CF_TEXT)
data_locked = kernel32.GlobalLock(data)
text = ctypes.c_char_p(data_locked)
ret = text.value
kernel32.GlobalUnlock(data_locked)
else:
print('no text in clipboard')
user32.CloseClipboard()
return ret
def get_data_py3(nick,ip):
try:
country_code = gi.country_code_by_addr(ip)
country_name = gi.country_name_by_addr(ip)
if(any(exempt_ip in ip for exempt_ip in exempt_list) or country_name == 'Albania'):
user_info = [ip,country_name,country_code,'Exempt']
mydata[nick] = user_info
else:
try:
proxy = ''
ipintel_api_link = "http://check.getipintel.net/check.php?ip=%s&contact=%s&flags=%s" % (ip,email,flags)
request_obj = urllib.request.Request(ipintel_api_link,data=None, headers={'User-Agent': 'Mozilla'})
ipintel_response = urllib.request.urlopen(request_obj).read().decode('utf-8')
proxy_data = str(ipintel_response)
if(str(proxy_data) =='1'):
proxy = 'Proxy'
user_info = [ip,country_name,country_code,proxy]
mydata[nick] = user_info
except HTTPError as err:
print("Something went wrong when trying to get Proxy data, PY3")
except:
print("Print something went wrong when trying to get IP data , PY3")
return
def on_server_join(word,word_eol,userdata):
global mydata
notice = word[0]
if 'Client connecting' in notice:
nickname = re.findall(r"\: (.*)\ \(",notice)[0]
ip = re.findall(r"\@(.*)\)",notice)[0]
if 'irccloud.com' in ip:
user_info = [ip,'IRCCloud','US']
mydata[nickname] = user_info
elif 'mibbit.com' in ip:
user_info = [ip,'Mibbit','US']
mydata[nickname] = user_info
else:
send_to_thread = threading.Thread(target=get_data_py3,args=(nickname,ip,))
send_to_thread.start()
# Print to appropriate tab
try:
server = TEXT['underline'] + TEXT['lightgreen'] + str(re.findall(r"at (\ ?.*)\:",notice)).strip("[]'")
nickname = TEXT['bold']+TEXT['blue']+ str(re.findall(r"\: (.*)\ \(",notice)).strip("[]'")
ip = TEXT['orange'] + str(re.findall(r"\@(.*)\)",notice)).strip("[]'")
if server:
msg = pad_nick(nickname) +TEXT['gray']+" from "+ ip +" at "+ server
else:
msg = pad_nick(nickname) +TEXT['gray']+" from "+ ip
connected = TEXT['green'] + 'Connected'
conn_tab = hexchat.find_context(r'{CONNECTIONS}')
if conn_tab:
conn_tab.emit_print("Channel Message", connected, msg)
else:
return
except:
print("Error when trying to print to filter tab")
return
elif 'Client exiting' in notice:
nickname = re.findall(r": ([^!(]+)",notice)[0]
nickname = nickname.replace(" ","")
if nickname in mydata:
mydata.pop(nickname, None)
else:
print("Not in the dictionary")
server = TEXT['underline'] + TEXT['red'] + str(re.findall(r"at (\ ?.*)\:",notice)).strip("[]'")
nickname = TEXT['bold']+TEXT['blue']+ str(re.findall(r"at .*\:\ (.*)\!\w|exiting\: (.*)\ \(",notice)).strip("[]'")
return
elif 'forced to change his/her nickname' in notice:
oldnick = re.findall(r"-- ([^()]+) ",notice)[0]
newnick = re.findall(r"nickname to ([^()]+)",notice)[0]
ip = re.findall(r"@([^)]+)",notice)[0]
if oldnick in mydata:
v = mydata.pop(oldnick)
mydata[newnick] = v
else:
if(sys.version_info > (3, 0)):
send_to_thread = threading.Thread(target=get_data_py3,args=(newnick,ip,))
send_to_thread.start()
return
elif 'has changed his/her nickname' in notice:
ip = re.findall(r"@([^)]+)",notice)[0]
oldnick = re.findall(r"-- ([^()]+) ",notice)[0]
newnick = re.findall(r"nickname to ([^()]+)",notice)[0]
if oldnick in mydata:
v = mydata.pop(oldnick)
mydata[newnick] = v
else:
if(sys.version_info > (3, 0)):
send_to_thread = threading.Thread(target=get_data_py3, args=(newnick,ip,))
send_to_thread.start()
return
else:
return
def on_chan_join(word,word_eol,event, attr):
global edited
if edited or attr.time or not len(word) > 1:
return
nick = word[0]
chan = word[1]
chan_context = hexchat.find_context(channel=chan)
try:
ident = re.findall(r"(.*)\@",word[2])[0]
except:
return
if nick in mydata:
chan_context = hexchat.find_context(channel=chan)
user_info = mydata[nick]
ip_from_data = user_info[0]
country_name = user_info[1]
country_code = user_info[2]
additional_info = ''
if len(user_info) == 4:
if 'Exempt' in user_info[3]:
additional_info = user_info[3]
if 'Proxy' in user_info[3]:
additional_info = user_info[3]
location = " "+ ident +" "+ ip_from_data +" " + "\00318" +country_name +"/"+ "\00318" + country_code +" "+ "\00320" + additional_info
edited = True
chan_context.emit_print("Join",nick,chan,location)
edited = False
return hexchat.EAT_ALL
else:
hexchat.command("USERIP "+ nick)
def unhook():
hexchat.unhook(userip_hook)
hexchat.unhook(timer_handle)
def userip_callback(word,word_eol,_):
global edited
try:
nick_cb = re.findall(r":([^*=]+)", str(word[3]))[0]
except:
return
if(word[1] == '340' and nick == nick_cb):
unhook()
try:
ip = re.findall(r"\@(.*)",str(word[3]))[0]
except:
return
if(ip == '<unknown>'):
user_info = ['Bot','Earth','']
mydata[nick] = user_info
edited = True
chan_context.emit_print("Join",nick_cb,chan,mydata[nick][0])
edited = False
return hexchat.EAT_ALL
elif(ip in IRCCloud):
user_info = [ip,'IRCCloud','']
mydata[nick] = user_info
location = " " + ident + " " + mydata[nick][1]
edited = True
chan_context.emit_print("Join",nick_cb,chan,location)
edited = False
return hexchat.EAT_ALL
elif (any(exempt_ip in ip for exempt_ip in exempt_list)):
response = None
data = None
country_name = None
country_code = None
if (sys.version_info >(3, 0)):
try:
country_code = gi.country_code_by_addr(ip)
country_name = gi.country_name_by_addr(ip)
user_info = [ip,country_name,country_code,'Exempt']
mydata[nick] = user_info
except:
print("Error getting country info from GeoIP DB.")
location = " "+ident +" "+ ip +" "+ "\00318"+ country_name +"/"+ "\00318"+ country_code + " "+ "\00320Exempt"
edited = True
chan_context.emit_print("Join",nick_cb,chan,location)
edited = False
return hexchat.EAT_ALL
else:#below needs to be done almost the same as above but add getipintel
response = None
data = None
country_name = None
country_code = None
proxy =''
if(sys.version_info > (3, 0)):
try:
country_code = gi.country_code_by_addr(ip)
country_name = gi.country_name_by_addr(ip)
proxy = ''
ipintel_api_link = "http://check.getipintel.net/check.php?ip=%s&contact=%s&flags=%s" % (ip,email,flags)
request_obj = urllib.request.Request(ipintel_api_link,data=None, headers={'User-Agent': 'Mozilla'})
ipintel_response = urllib.request.urlopen(request_obj).read().decode('utf-8')
proxy_data = str(ipintel_response)
if(str(proxy_data) =='1'):
proxy = 'Proxy'
user_info = [ip,country_name,country_code,proxy]
mydata[nick] = user_info
except HTTPError as err:
print(err.code)
print("Error trying to get Proxy Info from external API")
location =""
try:
location = " "+ident +" "+ ip +" "+ "\00318"+ country_name +"/"+ "\00318"+country_code +" "+ "\00320"+proxy
except:
print("Error in trying to setup location")
return
edited = True
chan_context.emit_print("Join", nick_cb, chan, location)
edited = False
return hexchat.EAT_ALL
else:
return
def onjoin_timeout_cb(_):
unhook()
userip_hook = hexchat.hook_server("340", userip_callback)
timer_handle = hexchat.hook_timer(1000, onjoin_timeout_cb)
return hexchat.EAT_ALL
def match_mask(mask, searchmask ):
if searchmask is None:
searchmask = ''
for match, repl in wildcards.items():
mask = mask.replace(match,repl)
return bool(re.match(mask,searchmask,re.IGNORECASE))
def get_user_list(context):
list = context.get_list("users")
return list
def on_chan_ban(word,word_eol,event,attr):
mask_list = []
nicks_matching = []
chan_context = hexchat.get_context()
emit_nicks = ""
global edited_ban
if edited_ban or attr.time or not len(word) > 1:
return
bnick = word[0]
banmask = word[1]
# ban of type nick below, nick!*@* , no need to edit check if last 4 chars match
if banmask[-4:] == '!*@*':
return
user_list = get_user_list(chan_context)
for user in user_list:
fullhost = '*!*' + user.host
toappend = (user.nick, fullhost)
mask_list.append(toappend)
if len(mask_list) > 0:
for user_nick , user_mask in mask_list:
if match_mask(banmask,user_mask) == True:
nicks_matching.append(user_nick)
if len(nicks_matching) > 0:
for nick in nicks_matching:
emit_nicks += ("\00320"+str(nick) +" ")
mask_addendum = "\00317 | " + banmask
emit_nicks += mask_addendum
edited_ban = True
chan_context.emit_print("Channel Ban",bnick,emit_nicks)
edited_ban = False
return hexchat.EAT_ALL
def on_chan_unban(word,word_eol,event,attr):
mask_list = []
nicks_matching = []
chan_context = hexchat.get_context()
emit_nicks = ""
global edited_unban
if edited_unban or attr.time or not len(word) > 1:
return
bnick = word[0]
unban_mask = word[1]
if unban_mask[-4:] == '!*@*':
return
user_list = get_user_list(chan_context)
for user in user_list:
fullhost = '*!*' + user.host
toappend = (user.nick,fullhost)
mask_list.append(toappend)
if len(mask_list) > 0:
for user_nick, user_mask in mask_list:
if match_mask(unban_mask,user_mask) == True:
nicks_matching.append(user_nick)
if len(nicks_matching) > 0:
for nick in nicks_matching:
emit_nicks += ("\00320" + str(nick) + " ")
mask_addendum = "\00317 | " + unban_mask
emit_nicks += mask_addendum
edited_unban = True
chan_context.emit_print("Channel UnBan",bnick,emit_nicks)
edited_unban = False
return hexchat.EAT_ALL
def xsqline(word,word_eol, _):
xsqline_nick = None
if os.name =="nt":
xsqline_nick = getclip()
xsqline_nick = str(xsqline_nick)
#unicode fix
if(sys.version_info > (3, 0)):
xsqline_nick = xsqline_nick[2:-1]
#issue an sqline on that nickname
command = "os sqline add +30d *%s* %s" % (xsqline_nick, sqline_reason)
hexchat.command(command)
def xshun_cb(word,word_eol, _):
global numerics
xshun_timer_handle = None
xshun_nick = None
xshun_hooks = []
if os.name =="nt":
xshun_nick = getclip()
xshun_nick = str(xshun_nick)
if(sys.version_info > (3, 0)):
xshun_nick = xshun_nick[2:-1]
#issue whois on nickname
hexchat.command("whois " + str(xshun_nick))
#function to be called later to unhook all numeric hooks
def xshun_unhook():
for hook in xshun_hooks:
hexchat.unhook(hook)
hexchat.unhook(xshun_timer_handle)
def xshun_notice_cb(word, word_eol, _):
if word[1] == '378':
connecting_ip = str(word[8])
if(connecting_ip not in str (IRCCloud)):
hexchat.command("shun +*@%s %s %s" % (str(connecting_ip),shun_time,shun_reason))
return hexchat.EAT_ALL
def xshun_timeout_cb(_):
xshun_unhook()
xshun_hooks = [hexchat.hook_server(numeric, xshun_notice_cb) for numeric in numerics]
xshun_timer_handle = hexchat.hook_timer(1000, xshun_timeout_cb)
return hexchat.EAT_ALL
def xline_cb(word,word_eol, _):
global numerics
xline_nick = None
xline_timer_handle = None
xline_hooks = []
if os.name =="nt":
xline_nick = getclip()
xline_nick = str(xline_nick)
if(sys.version_info > (3, 0)):
xline_nick = xline_nick[2:-1]
#issue whois on nickname
hexchat.command("whois " + str(xline_nick))
#function to be called later to unhook all numeric hooks
def xline_unhook():
for hook in xline_hooks:
hexchat.unhook(hook)
hexchat.unhook(xline_timer_handle)
def xline_notice_cb(word, word_eol, _):
if word[1] == '378':
connecting_ip = str(word[8])
if(connecting_ip not in str (IRCCloud)):
hexchat.command("os akill add %s *@%s %s" % (akill_time,str(connecting_ip),akill_reason))
return hexchat.EAT_ALL
def xline_timeout_cb(_):
xline_unhook()
xline_hooks = [hexchat.hook_server(numeric, xline_notice_cb) for numeric in numerics]
xline_timer_handle = hexchat.hook_timer(1000, xline_timeout_cb)
return hexchat.EAT_ALL
hexchat.hook_command("xline", xline_cb, help="/xline <nick> , Akills user from the server.")
hexchat.hook_command("xshun", xshun_cb, help="/xshun <nick> , Shuns user from the server.")
hexchat.hook_command("xsqline", xsqline, help="/xsqline <nick> , Places an sqline on the nick")
hexchat.hook_print("Server Notice", on_server_join)
hexchat.hook_print_attrs("Join", on_chan_join, "Join",priority=hexchat.PRI_NORM)
hexchat.hook_print_attrs("Channel Ban", on_chan_ban, "Channel Ban",priority=hexchat.PRI_NORM)
hexchat.hook_print_attrs("Channel UnBan", on_chan_unban, "Channel UnBan",priority=hexchat.PRI_NORM)
print(__module_version__ + " version " + __module_name__ + " loaded.")
'''
TODO
Add keyboard shortcuts inside this plugin if possible.
Move GeoIP checking to a local DB instead of using a web API.
Add country to whois info
'''
|
irrgit/hexchat-plugins
|
hexchat-oper/hexchat-oper.py
|
Python
|
gpl-3.0
| 18,986
|
from controller.controller_unit import Controller
from colorama import Fore, init as fore_init
from arg_parser import init_parser
from benchmark.run_benchmark import Benchmark
fore_init()
args = init_parser()
def run():
try:
print(Fore.LIGHTGREEN_EX, "Welcome to the game Logik!", Fore.RESET)
print("This implementation was created by Michal Polovka in 2017.\n"
"Shareable under GPL-3.0 licence.\n"
"See the game repository for details: https://github.com/miskopo/Logik")
print()
if args.interactive is None and args.benchmark is None:
print(Fore.LIGHTYELLOW_EX, "You didn't choose whether you want to play or to run benchmark", Fore.RESET)
print("0. Play the game!\n1. Run benchmark!")
choice = -1
while choice != '0' and choice != '1':
choice = input("Your choice? ")
if choice != '0' and choice != '1':
print(Fore.LIGHTRED_EX, "Invalid choice. try again.",Fore.RESET)
if choice == '0':
# play the game
args.interactive = True
elif choice == '1':
# run benchmark
args.benchmark = True
if args.interactive:
# run interactive game
pass
elif args.benchmark:
benchmark = Benchmark(args)
benchmark()
except KeyboardInterrupt:
print(Fore.LIGHTYELLOW_EX, "\nExiting.", Fore.RESET)
exit(0)
if __name__ == '__main__':
run()
|
miskopo/Logik
|
run.py
|
Python
|
gpl-3.0
| 1,562
|
# -*- coding: utf8 -*-
from ffcgi import cgi_event
del(cgi_event['id'])
SQL = (
("lists", """select SQL_CALC_FOUND_ROWS r.*,u1.FIO as customer, u2.FIO as performer, (select count(1) from delo_order where requirement_id=r.id )cnt
FROM `requirement` r left join users u1 on(u1.id=id_customer)
left join users u2 on(u2.id=id_performer)
%(where)s
ORDER BY date1 DESC
limit %(offset)d,%(limit)d;"""),
)
FOUND_ROWS = True
ROOT = "requirement_lists"
ROOT_PREFIX = None
ROOT_POSTFIX = None
XSL_TEMPLATE = "data/af-web.xsl"
EVENT = None
WHERE = ("id_customer", "id_performer", "id")
PARAM = None
MESSAGE = "Страница отчётов не доступна"
TITLE="Страница заказанных дел"
ORDER = None
|
ffsdmad/af-web
|
cgi-bin/plugins2/report/order/requirement_list.py
|
Python
|
gpl-3.0
| 787
|
#!/usr/bin/env python
import re
import os
from os import path
import logging
import sqlalchemy
import taxtastic
import taxtastic.ncbi
from taxtastic.ncbi import read_names, read_archive
from . import config
from .config import TestBase
log = logging
outputdir = config.outputdir
datadir = config.datadir
ncbi_master_db = config.ncbi_master_db
ncbi_data = config.ncbi_data
class TestDbconnect(TestBase):
def test01(self):
engine = sqlalchemy.create_engine('sqlite:///' + ncbi_master_db)
taxtastic.ncbi.db_connect(engine)
with engine.begin() as con:
result = con.execute(
'select name from sqlite_master where type = "table"')
tables = set(i[0] for i in result)
self.assertTrue(
set(['nodes', 'names', 'merged', 'source']).issubset(tables))
class TestLoadData(TestBase):
def setUp(self):
outdir = self.mkoutdir()
self.db_path = os.path.join(outdir, 'taxonomy.db')
self.url = 'sqlite:///' + self.db_path
def test01(self):
# we should be starting from scratch
self.assertFalse(path.isfile(self.db_path))
engine = sqlalchemy.create_engine(self.url)
taxtastic.ncbi.db_connect(engine)
self.assertTrue(path.isfile(self.db_path))
class TestReadNames(TestBase):
def setUp(self):
self.zipfile = ncbi_data
def test02(self):
"""
is_classified always None
"""
rows = read_names(rows=read_archive(self.zipfile, 'names.dmp'))
headers = next(rows)
is_classified = headers.index('is_classified')
self.assertEqual(
set(row[is_classified] for row in rows), set([None]))
class TestUnclassifiedRegex(TestBase):
"""
Test the heuristic used to determine if a taxonomic name is meaningful.
"""
def setUp(self):
self.pieces = taxtastic.ncbi.UNCLASSIFIED_REGEX_COMPONENTS
self.regexes = [re.compile(piece) for piece in self.pieces]
with open(config.data_path('type_strain_names.txt')) as fp:
self.type_strain_names = [i.rstrip() for i in fp]
def test_no_type_strains_match(self):
for strain_name in self.type_strain_names:
for regex in self.regexes:
m = regex.search(strain_name)
if m:
self.fail('"{0}" matches "{1}"'.format(
strain_name, regex.pattern))
# def generate_test_unclassified_regex():
#"""
# Generate a test class verifying that none of the type strains in
# type_strain_names.txt match the unclassified regex.
#"""
# def generate_test(strain_name):
# def do_test(self):
# for regex in self.regexes:
#m = regex.search(strain_name)
# if m:
#self.fail('"{0}" matches "{1}"'.format(strain_name, regex.pattern))
# return do_test
# class TestUnclassifiedRegex(TestBase):
# def setUp(self):
#self.pieces = taxtastic.ncbi.UNCLASSIFIED_REGEX_COMPONENTS
#self.regexes = [re.compile(piece) for piece in self.pieces]
# with open(config.data_path('type_strain_names.txt')) as fp:
#type_strain_names = [i.rstrip() for i in fp]
# for s in type_strain_names:
#test_fn = generate_test(s)
#func_name = 'test_{0}_no_match'.format(re.sub(r'[ -.]', '_', s).lower())
#test_fn.__name__ = func_name
#setattr(TestUnclassifiedRegex, func_name, test_fn)
# return TestUnclassifiedRegex
#TestUnclassifiedRegex = generate_test_unclassified_regex()
#del generate_test_unclassified_regex
|
fhcrc/taxtastic
|
tests/test_ncbi.py
|
Python
|
gpl-3.0
| 3,662
|
#!/usr/bin/python
# continue : its skips an iteration.
for student in ['rajni','madhuri','priya','kumar','sunil','raj','praveen']:
if student == 'kumar':
continue
#break
#pass
print "results for the student - {}".format(student)
|
tuxfux-hlp-notes/python-batches
|
archieves/batch-58/exams.py
|
Python
|
gpl-3.0
| 237
|
import glob
import os
import pickle
import unittest
import numpy as np
from scipy import sparse
from sklearn import datasets
from sklearn.exceptions import NotFittedError
from sklearn.externals import joblib
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.validation import check_random_state
from rgf.sklearn import RGFClassifier, RGFRegressor
from rgf.sklearn import FastRGFClassifier, FastRGFRegressor
from rgf.utils import cleanup, TEMP_PATH
class RGFClassfierBaseTest(object):
def setUp(self):
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
self.X_train, self.X_test, self.y_train, self.y_test = \
train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
y_str_train = np.array(self.y_train, dtype=str)
y_str_train[y_str_train == '0'] = 'Zero'
y_str_train[y_str_train == '1'] = 'One'
y_str_train[y_str_train == '2'] = 'Two'
self.y_str_train = y_str_train
y_str_test = np.array(self.y_test, dtype=str)
y_str_test[y_str_test == '0'] = 'Zero'
y_str_test[y_str_test == '1'] = 'One'
y_str_test[y_str_test == '2'] = 'Two'
self.y_str_test = y_str_test
self.accuracy = 0.9
def test_classifier(self):
clf = self.classifier_class(**self.kwargs)
clf.fit(self.X_train, self.y_train)
proba_sum = clf.predict_proba(self.X_test).sum(axis=1)
np.testing.assert_almost_equal(proba_sum, np.ones(self.y_test.shape[0]))
score = clf.score(self.X_test, self.y_test)
self.assertGreaterEqual(score, self.accuracy, "Failed with score = {0:.5f}".format(score))
def test_softmax_classifier(self):
clf = self.classifier_class(calc_prob='softmax', **self.kwargs)
clf.fit(self.X_train, self.y_train)
proba_sum = clf.predict_proba(self.X_test).sum(axis=1)
np.testing.assert_almost_equal(proba_sum, np.ones(self.y_test.shape[0]))
score = clf.score(self.X_test, self.y_test)
self.assertGreaterEqual(score, self.accuracy, "Failed with score = {0:.5f}".format(score))
def test_bin_classifier(self):
clf = self.classifier_class(**self.kwargs)
bin_target_train = (self.y_train == 2).astype(int)
bin_target_test = (self.y_test == 2).astype(int)
clf.fit(self.X_train, bin_target_train)
proba_sum = clf.predict_proba(self.X_test).sum(axis=1)
np.testing.assert_almost_equal(proba_sum, np.ones(bin_target_test.shape[0]))
score = clf.score(self.X_test, bin_target_test)
self.assertGreaterEqual(score, self.accuracy, "Failed with score = {0:.5f}".format(score))
def test_string_y(self):
clf = self.classifier_class(**self.kwargs)
clf.fit(self.X_train, self.y_str_train)
y_pred = clf.predict(self.X_test)
score = accuracy_score(self.y_str_test, y_pred)
self.assertGreaterEqual(score, self.accuracy, "Failed with score = {0:.5f}".format(score))
def test_bin_string_y(self):
self.accuracy = 0.75
clf = self.classifier_class(**self.kwargs)
bin_X_train = self.X_train[self.y_train != 0]
bin_X_test = self.X_test[self.y_test != 0]
y_str_train = self.y_str_train[self.y_str_train != 'Zero']
y_str_test = self.y_str_test[self.y_str_test != 'Zero']
clf.fit(bin_X_train, y_str_train)
y_pred = clf.predict(bin_X_test)
score = accuracy_score(y_str_test, y_pred)
self.assertGreaterEqual(score, self.accuracy, "Failed with score = {0:.5f}".format(score))
def test_sklearn_integration(self):
check_estimator(self.classifier_class)
def test_classifier_sparse_input(self):
clf = self.classifier_class(calc_prob='softmax', **self.kwargs)
for sparse_format in (sparse.bsr_matrix, sparse.coo_matrix, sparse.csc_matrix,
sparse.csr_matrix, sparse.dia_matrix, sparse.dok_matrix, sparse.lil_matrix):
sparse_X_train = sparse_format(self.X_train)
sparse_X_test = sparse_format(self.X_test)
clf.fit(sparse_X_train, self.y_train)
score = clf.score(sparse_X_test, self.y_test)
self.assertGreaterEqual(score, self.accuracy, "Failed with score = {0:.5f}".format(score))
def test_sample_weight(self):
clf = self.classifier_class(**self.kwargs)
y_pred = clf.fit(self.X_train, self.y_train).predict_proba(self.X_test)
y_pred_weighted = clf.fit(self.X_train,
self.y_train,
np.ones(self.y_train.shape[0])
).predict_proba(self.X_test)
np.testing.assert_allclose(y_pred, y_pred_weighted)
weights = np.ones(self.y_train.shape[0]) * np.nextafter(np.float32(0), np.float32(1))
weights[0] = 1
y_pred_weighted = clf.fit(self.X_train, self.y_train, weights).predict(self.X_test)
np.testing.assert_equal(y_pred_weighted, np.full(self.y_test.shape[0], self.y_test[0]))
def test_input_arrays_shape(self):
clf = self.classifier_class(**self.kwargs)
n_samples = self.y_train.shape[0]
self.assertRaises(ValueError, clf.fit, self.X_train, self.y_train[:(n_samples - 1)])
self.assertRaises(ValueError, clf.fit, self.X_train, self.y_train, np.ones(n_samples - 1))
self.assertRaises(ValueError,
clf.fit,
self.X_train,
self.y_train,
np.ones((n_samples, 2)))
def test_pickle(self):
clf1 = self.classifier_class(**self.kwargs)
clf1.fit(self.X_train, self.y_train)
y_pred1 = clf1.predict(self.X_test)
s = pickle.dumps(clf1)
# Remove model file
cleanup()
clf2 = pickle.loads(s)
y_pred2 = clf2.predict(self.X_test)
np.testing.assert_allclose(y_pred1, y_pred2)
def test_joblib_pickle(self):
clf1 = self.classifier_class(**self.kwargs)
clf1.fit(self.X_train, self.y_train)
y_pred1 = clf1.predict(self.X_test)
joblib.dump(clf1, 'test_clf.pkl')
# Remove model file
cleanup()
clf2 = joblib.load('test_clf.pkl')
y_pred2 = clf2.predict(self.X_test)
np.testing.assert_allclose(y_pred1, y_pred2)
def test_cleanup(self):
clf1 = self.classifier_class(**self.kwargs)
clf1.fit(self.X_train, self.y_train)
clf2 = self.classifier_class(**self.kwargs)
clf2.fit(self.X_train, self.y_train)
self.assertNotEqual(clf1.cleanup(), 0)
self.assertEqual(clf1.cleanup(), 0)
for est in clf1.estimators_:
glob_file = os.path.join(TEMP_PATH, est._file_prefix + "*")
self.assertFalse(glob.glob(glob_file))
self.assertRaises(NotFittedError, clf1.predict, self.X_test)
clf2.predict(self.X_test)
def test_parallel_gridsearch(self):
self.kwargs['n_jobs'] = 1
param_grid = dict(min_samples_leaf=[5, 10])
grid = GridSearchCV(self.classifier_class(**self.kwargs),
param_grid=param_grid, refit=True, cv=2, verbose=0, n_jobs=-1)
grid.fit(self.X_train, self.y_train)
y_pred = grid.best_estimator_.predict(self.X_test)
score = accuracy_score(self.y_test, y_pred)
self.assertGreaterEqual(score, self.accuracy, "Failed with score = {0:.5f}".format(score))
class TestRGFClassfier(RGFClassfierBaseTest, unittest.TestCase):
def setUp(self):
self.classifier_class = RGFClassifier
self.kwargs = {}
super(TestRGFClassfier, self).setUp()
def test_params(self):
clf = self.classifier_class(**self.kwargs)
valid_params = dict(max_leaf=300,
test_interval=100,
algorithm='RGF_Sib',
loss='Log',
reg_depth=1.1,
l2=0.1,
sl2=None,
normalize=False,
min_samples_leaf=0.4,
n_iter=None,
n_tree_search=2,
opt_interval=100,
learning_rate=0.4,
calc_prob='sigmoid',
n_jobs=-1,
memory_policy='conservative',
verbose=True)
clf.set_params(**valid_params)
clf.fit(self.X_train, self.y_train)
non_valid_params = dict(max_leaf=0,
test_interval=0,
algorithm='RGF_Test',
loss=True,
reg_depth=0.1,
l2=11,
sl2=-1.1,
normalize='False',
min_samples_leaf=0.7,
n_iter=11.1,
n_tree_search=0,
opt_interval=100.1,
learning_rate=-0.5,
calc_prob=True,
n_jobs='-1',
memory_policy='Generos',
verbose=-1)
for key in non_valid_params:
clf.set_params(**valid_params) # Reset to valid params
clf.set_params(**{key: non_valid_params[key]}) # Pick and set one non-valid parametr
self.assertRaises(ValueError, clf.fit, self.X_train, self.y_train)
def test_attributes(self):
clf = self.classifier_class(**self.kwargs)
attributes = ('estimators_', 'classes_', 'n_classes_', 'n_features_', 'fitted_',
'sl2_', 'min_samples_leaf_', 'n_iter_')
for attr in attributes:
self.assertRaises(NotFittedError, getattr, clf, attr)
clf.fit(self.X_train, self.y_train)
self.assertEqual(len(clf.estimators_), len(np.unique(self.y_train)))
np.testing.assert_array_equal(clf.classes_, sorted(np.unique(self.y_train)))
self.assertEqual(clf.n_classes_, len(clf.estimators_))
self.assertEqual(clf.n_features_, self.X_train.shape[-1])
self.assertTrue(clf.fitted_)
if clf.sl2 is None:
self.assertEqual(clf.sl2_, clf.l2)
else:
self.assertEqual(clf.sl2_, clf.sl2)
if clf.min_samples_leaf < 1:
self.assertLessEqual(clf.min_samples_leaf_, 0.5 * self.X_train.shape[0])
else:
self.assertEqual(clf.min_samples_leaf_, clf.min_samples_leaf)
if clf.n_iter is None:
if clf.loss == "LS":
self.assertEqual(clf.n_iter_, 10)
else:
self.assertEqual(clf.n_iter_, 5)
else:
self.assertEqual(clf.n_iter_, clf.n_iter)
class TestFastRGFClassfier(RGFClassfierBaseTest, unittest.TestCase):
def setUp(self):
self.classifier_class = FastRGFClassifier
self.kwargs = {}
super(TestFastRGFClassfier, self).setUp()
def test_params(self):
clf = self.classifier_class(**self.kwargs)
valid_params = dict(n_estimators=50,
max_depth=3,
max_leaf=20,
tree_gain_ratio=0.3,
min_samples_leaf=0.5,
loss="LOGISTIC",
l1=0.6,
l2=100.0,
opt_algorithm='rgf',
learning_rate=0.05,
max_bin=150,
min_child_weight=9.0,
data_l2=9.0,
sparse_max_features=1000,
sparse_min_occurences=2,
calc_prob="sigmoid",
n_jobs=-1,
verbose=True)
clf.set_params(**valid_params)
clf.fit(self.X_train, self.y_train)
non_valid_params = dict(n_estimators=0,
max_depth=-3.0,
max_leaf=0,
tree_gain_ratio=1.3,
min_samples_leaf=0.55,
loss="LOG",
l1=6,
l2=-10.0,
opt_algorithm='RGF',
learning_rate=0.0,
max_bin=0.5,
min_child_weight='auto',
data_l2=None,
sparse_max_features=0,
sparse_min_occurences=-2.0,
calc_prob=None,
n_jobs=None,
verbose=-3)
for key in non_valid_params:
clf.set_params(**valid_params) # Reset to valid params
clf.set_params(**{key: non_valid_params[key]}) # Pick and set one non-valid parametr
self.assertRaises(ValueError, clf.fit, self.X_train, self.y_train)
def test_attributes(self):
clf = self.classifier_class(**self.kwargs)
attributes = ('estimators_', 'classes_', 'n_classes_', 'n_features_', 'fitted_',
'max_bin_', 'min_samples_leaf_')
for attr in attributes:
self.assertRaises(NotFittedError, getattr, clf, attr)
clf.fit(self.X_train, self.y_train)
self.assertEqual(len(clf.estimators_), len(np.unique(self.y_train)))
np.testing.assert_array_equal(clf.classes_, sorted(np.unique(self.y_train)))
self.assertEqual(clf.n_classes_, len(clf.estimators_))
self.assertEqual(clf.n_features_, self.X_train.shape[-1])
self.assertTrue(clf.fitted_)
if clf.max_bin is None:
if sparse.isspmatrix(self.X_train):
self.assertEqual(clf.max_bin_, 200)
else:
self.assertEqual(clf.max_bin_, 65000)
else:
self.assertEqual(clf.max_bin_, clf.max_bin)
if clf.min_samples_leaf < 1:
self.assertLessEqual(clf.min_samples_leaf_, 0.5 * self.X_train.shape[0])
else:
self.assertEqual(clf.min_samples_leaf_, clf.min_samples_leaf)
def test_sklearn_integration(self):
# TODO(fukatani): FastRGF bug?
# FastRGF doesn't work if the number of sample is too small.
# check_estimator(self.classifier_class)
pass
class RGFRegressorBaseTest(object):
def setUp(self):
self.X, self.y = datasets.make_friedman1(n_samples=500,
random_state=1,
noise=1.0)
self.X_train, self.y_train = self.X[:400], self.y[:400]
self.X_test, self.y_test = self.X[400:], self.y[400:]
def test_regressor(self):
reg = self.regressor_class(**self.kwargs)
reg.fit(self.X_train, self.y_train)
y_pred = reg.predict(self.X_test)
mse = mean_squared_error(self.y_test, y_pred)
self.assertLess(mse, self.mse, "Failed with MSE = {0:.5f}".format(mse))
def test_sklearn_integration(self):
check_estimator(self.regressor_class)
def test_regressor_sparse_input(self):
reg = self.regressor_class(**self.kwargs)
for sparse_format in (sparse.bsr_matrix, sparse.coo_matrix, sparse.csc_matrix,
sparse.csr_matrix, sparse.dia_matrix, sparse.dok_matrix, sparse.lil_matrix):
X_sparse_train = sparse_format(self.X_train)
X_sparse_test = sparse_format(self.X_test)
reg.fit(X_sparse_train, self.y_train)
y_pred = reg.predict(X_sparse_test)
mse = mean_squared_error(self.y_test, y_pred)
self.assertLess(mse, self.mse, "Failed with MSE = {0:.5f}".format(mse))
def test_sample_weight(self):
reg = self.regressor_class(**self.kwargs)
y_pred = reg.fit(self.X_train, self.y_train).predict(self.X_test)
y_pred_weighted = reg.fit(self.X_train,
self.y_train,
np.ones(self.y_train.shape[0])
).predict(self.X_test)
np.testing.assert_allclose(y_pred, y_pred_weighted)
np.random.seed(42)
idx = np.random.choice(400, 80, replace=False)
self.X_train[idx] = -99999 # Add some outliers
y_pred_corrupt = reg.fit(self.X_train, self.y_train).predict(self.X_test)
mse_corrupt = mean_squared_error(self.y_test, y_pred_corrupt)
weights = np.ones(self.y_train.shape[0])
weights[idx] = np.nextafter(np.float32(0), np.float32(1)) # Eliminate outliers
y_pred_weighted = reg.fit(self.X_train, self.y_train, weights).predict(self.X_test)
mse_fixed = mean_squared_error(self.y_test, y_pred_weighted)
self.assertLess(mse_fixed, mse_corrupt)
def test_input_arrays_shape(self):
reg = self.regressor_class(**self.kwargs)
n_samples = self.y_train.shape[0]
self.assertRaises(ValueError, reg.fit, self.X_train, self.y_train[:(n_samples - 1)])
self.assertRaises(ValueError, reg.fit, self.X_train, self.y_train, np.ones(n_samples - 1))
self.assertRaises(ValueError,
reg.fit,
self.X_train,
self.y_train,
np.ones((n_samples, 2)))
def test_pickle(self):
reg1 = self.regressor_class(**self.kwargs)
reg1.fit(self.X_train, self.y_train)
y_pred1 = reg1.predict(self.X_test)
s = pickle.dumps(reg1)
# Remove model file
cleanup()
reg2 = pickle.loads(s)
y_pred2 = reg2.predict(self.X_test)
np.testing.assert_allclose(y_pred1, y_pred2)
def test_joblib_pickle(self):
reg1 = self.regressor_class(**self.kwargs)
reg1.fit(self.X_train, self.y_train)
y_pred1 = reg1.predict(self.X_test)
joblib.dump(reg1, 'test_reg.pkl')
# Remove model file
cleanup()
reg2 = joblib.load('test_reg.pkl')
y_pred2 = reg2.predict(self.X_test)
np.testing.assert_allclose(y_pred1, y_pred2)
def test_cleanup(self):
reg1 = self.regressor_class(**self.kwargs)
reg1.fit(self.X_train, self.y_train)
reg2 = self.regressor_class(**self.kwargs)
reg2.fit(self.X_train, self.y_train)
self.assertNotEqual(reg1.cleanup(), 0)
self.assertEqual(reg1.cleanup(), 0)
glob_file = os.path.join(TEMP_PATH, reg1._file_prefix + "*")
self.assertFalse(glob.glob(glob_file))
self.assertRaises(NotFittedError, reg1.predict, self.X_test)
reg2.predict(self.X_test)
def test_parallel_gridsearch(self):
param_grid = dict(min_samples_leaf=[5, 10])
grid = GridSearchCV(self.regressor_class(**self.kwargs),
param_grid=param_grid, refit=True, cv=2, verbose=0, n_jobs=-1)
grid.fit(self.X_train, self.y_train)
y_pred = grid.best_estimator_.predict(self.X_test)
mse = mean_squared_error(self.y_test, y_pred)
self.assertLess(mse, self.mse, "Failed with MSE = {0:.5f}".format(mse))
class TestRGFRegressor(RGFRegressorBaseTest, unittest.TestCase):
def setUp(self):
self.regressor_class = RGFRegressor
self.kwargs = {}
self.mse = 2.0353275768
super(TestRGFRegressor, self).setUp()
def test_params(self):
reg = self.regressor_class(**self.kwargs)
valid_params = dict(max_leaf=300,
test_interval=100,
algorithm='RGF_Sib',
loss='Log',
reg_depth=1.1,
l2=0.1,
sl2=None,
normalize=False,
min_samples_leaf=0.4,
n_iter=None,
n_tree_search=2,
opt_interval=100,
learning_rate=0.4,
memory_policy='conservative',
verbose=True)
reg.set_params(**valid_params)
reg.fit(self.X_train, self.y_train)
non_valid_params = dict(max_leaf=0,
test_interval=0,
algorithm='RGF_Test',
loss=True,
reg_depth=0.1,
l2=11,
sl2=-1.1,
normalize='False',
min_samples_leaf=0.7,
n_iter=11.1,
n_tree_search=0,
opt_interval=100.1,
learning_rate=-0.5,
memory_policy='Generos',
verbose=-1)
for key in non_valid_params:
reg.set_params(**valid_params) # Reset to valid params
reg.set_params(**{key: non_valid_params[key]}) # Pick and set one non-valid parametr
self.assertRaises(ValueError, reg.fit, self.X_train, self.y_train)
def test_attributes(self):
reg = self.regressor_class(**self.kwargs)
attributes = ('n_features_', 'fitted_', 'sl2_', 'min_samples_leaf_', 'n_iter_')
for attr in attributes:
self.assertRaises(NotFittedError, getattr, reg, attr)
reg.fit(self.X_train, self.y_train)
self.assertEqual(reg.n_features_, self.X_train.shape[-1])
self.assertTrue(reg.fitted_)
if reg.sl2 is None:
self.assertEqual(reg.sl2_, reg.l2)
else:
self.assertEqual(reg.sl2_, reg.sl2)
if reg.min_samples_leaf < 1:
self.assertLessEqual(reg.min_samples_leaf_, 0.5 * self.X_train.shape[0])
else:
self.assertEqual(reg.min_samples_leaf_, reg.min_samples_leaf)
if reg.n_iter is None:
if reg.loss == "LS":
self.assertEqual(reg.n_iter_, 10)
else:
self.assertEqual(reg.n_iter_, 5)
else:
self.assertEqual(reg.n_iter_, reg.n_iter)
def test_abs_regressor(self):
reg = self.regressor_class(loss="Abs")
reg.fit(self.X_train, self.y_train)
y_pred = reg.predict(self.X_test)
mae = mean_absolute_error(self.y_test, y_pred)
self.assertLess(mae, 1.9916427774, "Failed with MAE = {0:.5f}".format(mae))
class TestFastRGFRegressor(RGFRegressorBaseTest, unittest.TestCase):
def setUp(self):
self.regressor_class = FastRGFRegressor
self.kwargs = {}
self.mse = 2.5522511545
super(TestFastRGFRegressor, self).setUp()
def test_params(self):
reg = self.regressor_class(**self.kwargs)
valid_params = dict(n_estimators=50,
max_depth=3,
max_leaf=20,
tree_gain_ratio=0.3,
min_samples_leaf=0.5,
l1=0.6,
l2=100.0,
opt_algorithm='rgf',
learning_rate=0.05,
max_bin=150,
min_child_weight=9.0,
data_l2=9.0,
sparse_max_features=1000,
sparse_min_occurences=2,
n_jobs=-1,
verbose=True)
reg.set_params(**valid_params)
reg.fit(self.X_train, self.y_train)
non_valid_params = dict(n_estimators=0,
max_depth=-3.0,
max_leaf=0,
tree_gain_ratio=1.3,
min_samples_leaf=0.55,
l1=6,
l2=-10.0,
opt_algorithm='RGF',
learning_rate=0.0,
max_bin=0.5,
min_child_weight='auto',
data_l2=None,
sparse_max_features=0,
sparse_min_occurences=-2.0,
n_jobs=None,
verbose=-3)
for key in non_valid_params:
reg.set_params(**valid_params) # Reset to valid params
reg.set_params(**{key: non_valid_params[key]}) # Pick and set one non-valid parametr
self.assertRaises(ValueError, reg.fit, self.X_train, self.y_train)
def test_attributes(self):
reg = self.regressor_class(**self.kwargs)
attributes = ('n_features_', 'fitted_', 'max_bin_', 'min_samples_leaf_')
for attr in attributes:
self.assertRaises(NotFittedError, getattr, reg, attr)
reg.fit(self.X_train, self.y_train)
self.assertEqual(reg.n_features_, self.X_train.shape[-1])
self.assertTrue(reg.fitted_)
if reg.max_bin is None:
if sparse.isspmatrix(self.X_train):
self.assertEqual(reg.max_bin_, 200)
else:
self.assertEqual(reg.max_bin_, 65000)
else:
self.assertEqual(reg.max_bin_, reg.max_bin)
if reg.min_samples_leaf < 1:
self.assertLessEqual(reg.min_samples_leaf_, 0.5 * self.X_train.shape[0])
else:
self.assertEqual(reg.min_samples_leaf_, reg.min_samples_leaf)
def test_parallel_gridsearch(self):
self.kwargs['n_jobs'] = 1
super(TestFastRGFRegressor, self).test_parallel_gridsearch()
def test_sklearn_integration(self):
# TODO(fukatani): FastRGF bug?
# FastRGF discretization doesn't work if the number of sample is too
# small.
# check_estimator(self.regressor_class)
pass
|
StrikerRUS/rgf_python
|
tests/test_rgf_python.py
|
Python
|
gpl-3.0
| 26,813
|
#!/usr/bin/env python
# Copyright 2010-2013 by Alexander O'Neill
# Project home page: http://github.com/alxp/subspeech.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re, os, random, subprocess, struct, sys
from datetime import datetime
from time import mktime
from tempfile import mkdtemp
from optparse import OptionParser
from HTMLParser import HTMLParser
from shutil import rmtree
from wavlen import wavLen
global currenttime
global basename
global scriptpath
global temppath
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, data):
self.fed.append(data)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def get_yes_or_no(message):
sys.stdout.write(message + ' (y/n) [default: n] ' )
answers = {'y': True, 'yes':True, 'n':False, 'no': False}
while True:
user_input = raw_input().lower()
if user_input in ['y', 'yes', 'n', 'no']:
return answers[user_input]
elif user_input in ['']:
# Default to true.
return False
else:
print 'Please enter y for yes or n for no.'
def check_output_file(basename, force_overwrite, quiet):
mp3file = basename + '.mp3'
if os.path.isfile(mp3file):
if (force_overwrite == False):
if (quiet == False):
user_overwrite = get_yes_or_no('File ' + mp3file + ' exists. Overwite it?')
else:
user_overwrite = False
if (user_overwrite == False):
print 'Aborting.'
exit(1)
os.remove(mp3file)
def get_start_time(line):
"""'line' is of the format '00:00:12,487 --> 00:00:14,762'
Return the number of milliseconds that the start time evaluates to."""
starttimestamp = re.findall(r'([0-9]+):([0-9]+):([0-9]+),([0-9]+)', line)[0]
seconds = int(starttimestamp[0]) * 3600 + int(starttimestamp[1]) * 60 + int(starttimestamp[2])
ms = seconds * 1000 + int(starttimestamp[3])
return ms
def get_snippet(f):
""" Read text starting at the current position in file f.
Return a tuple containing:
The line number, read from the file
The start time
The text following the time stamp"""
snippetnumber = 0
starttime = 0
snippettext = ''
# Eat blank or invalid lines until a line number is found.
while True:
l = f.readline()
if l == '':
return None
line = l.split()
# We are expecting a line number, ignore anything that isn't a number.
if len(line) == 0 or len(line) > 1 or line[0].isdigit() == False:
continue
snippetnumber = int(line[0])
break
starttime = get_start_time(f.readline())
if type( starttime ) != int:
return None # If the file isn't formatted well just bail.
l = f.readline()
while len(l.split()) != 0:
line = l.split()
if len(line) == 0:
break
snippettext = snippettext + ' ' + l
l = f.readline()
return [snippetnumber, starttime, strip_tags(snippettext)]
def generate_silence(timediff, seqnum):
""" Returns the filename of a newly-created MP3 file containing silence"""
# We are generating files at 23.2kHz.
ticks = timediff / 23.22
filename = basename + '_' + str(seqnum) + '_silence.wav'
os.system('dd if=/dev/zero of=' + temppath + '/silence.raw bs=1k count='+str(int(round(ticks))) + '>/dev/null 2>/dev/null')
os.system('ffmpeg -v 0 -y -f s16le -ac 1 -ar 22050 -i ' + temppath + '/' + 'silence.raw' + ' -f wav ' + temppath + '/' + filename)#+ ' >/dev/null 2>/dev/null')
return temppath + '/' + filename
def create_speech_file (snippettext, snippetnumber, voice, rate):
speechaifffile = basename + '_' + str(snippetnumber) + '_text.aiff'
speechtxtfile = basename + '_' + str(snippetnumber) + '_text.txt'
speechfile = basename + '_' + str(snippetnumber) + '_text.wav'
txtout = open(temppath + "/" + speechtxtfile, 'w')
txtout.write(snippettext)
txtout.close()
say_params = ["say", "-o", temppath + "/" + speechfile, "--data-format=LEI16@22050", '-f', temppath + "/" + speechtxtfile]
if (voice):
say_params += ["-v", voice]
if (rate):
say_params += ["-r", str(rate)]
subprocess.call(say_params)
os.remove(temppath + "/" + speechtxtfile)
return temppath + '/' + speechfile
def parse_subtitles(srtfile, quiet, voice, rate):
f = open(srtfile)
currenttime = 0
done = False
sound_files = []
while done == False:
snippet = get_snippet(f)
if snippet == None:
done = True
break
snippetnumber = snippet[0]
starttime = snippet[1]
snippettext = snippet[2]
gap = starttime - currenttime
# Too-small gaps, like 4ms, create invalid .wav files.
if (gap > 50):
silence_file = generate_silence(gap, snippetnumber)
sound_files.append(silence_file)
else:
silence_file = None
currenttime = starttime
if (quiet == False):
print snippettext
speechfile = create_speech_file(snippettext, snippetnumber, voice, rate)
currenttime += wavLen(speechfile)
sound_files.append(speechfile)
if (silence_file):
os.remove(temppath + '/silence.raw')
return sound_files
os.environ['PATH'] += ':/usr/local/bin'
scriptpath = os.path.abspath( os.path.dirname( sys.argv[0]) )
temppath = mkdtemp()
def combine_sound_files(sound_files):
output_file = open(temppath + '/soundfiles.txt', 'w')
for sound_file in sound_files:
output_file.write('file \'' + sound_file + '\'\n')
output_file.close()
combined_filename = temppath + '/' + basename + '.wav'
os.system('ffmpeg -f concat -i ' + temppath + '/soundfiles.txt -c copy ' + combined_filename + ' >/dev/null 2>/dev/null')
return combined_filename
def compress_combined_file(wav_file, quiet):
if (quiet):
quietstr = ' --quiet'
else:
quietstr = ''
os.system('lame -v ' + wav_file + ' ' + basename + '.mp3' + quietstr)
def main():
global basename
usage = "Usage: %prog [options] subtitlefile"
description = "Parse .srt (SubRip) format subtitles files and "\
+ "create a .mp3 file with a text-to-speech rendition "\
+ "of the content."
version = "SubSpeech version 1.0"
parser = OptionParser(usage=usage, description=description,version=version)
parser.add_option("-q", "--quiet",
action="store_true", dest="quiet", default=False,
help="Don't print the subtitles as they are read.")
parser.add_option("-f", "--force",
action="store_true", dest="force_overwrite", default=False,
help="Force overwrite if the output file exists.")
parser.add_option("-v", "--voice",
action="store", dest="voice",
help="Which synthesized voice to use. Passed along to "\
+ "the 'say' command. Run 'say -v ?' for a list of "\
+ "available voices.")
parser.add_option("-r", "--rate",
action="store", type='int', dest="rate",
help="Speech rate. Passed along to 'say' directly.\n"\
+ "100 = Slow, 300 = Fast, 500 = Very Fast")
options, arguments = parser.parse_args()
if len(arguments) != 1:
parser.error("No subtitles file specified.")
basename = os.path.basename(os.path.splitext(arguments[0])[0])
check_output_file(basename, options.force_overwrite, options.quiet)
sound_files = parse_subtitles(arguments[0], options.quiet, options.voice, options.rate)
wav_file = combine_sound_files(sound_files)
compress_combined_file(wav_file, options.quiet)
rmtree(temppath)
if __name__ == '__main__':
main()
|
alxp/subspeech
|
subspeech.py
|
Python
|
gpl-3.0
| 8,733
|
'''
SASMOL: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from sasmol.test_sasmol.util import env,util
from unittest import main, skipIf
from mocker import Mocker, MockerTestCase, ANY, ARGS
import sasmol.sasmol as sasmol
import sasmol.sasop as sasop
import sasmol.sascalc as sascalc
import numpy
import warnings; warnings.filterwarnings('ignore')
import os
floattype=os.environ['SASSIE_FLOATTYPE']
DataPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','data','pdb_common')+os.path.sep
class Test_intg_sasop_Move_rotate(MockerTestCase):
def setUp(self):
self.o=sasmol.SasMol(0)
def assert_list_almost_equal(self,a,b,places=5):
if (len(a)!=len(b)):
raise TypeError
else:
for i in range(len(a)):
if isinstance(a[i],(int,float,numpy.generic)):
if (numpy.isnan(a[i]) and numpy.isnan(b[i])): continue
self.assertAlmostEqual(a[i],b[i],places)
else:
self.assert_list_almost_equal(a[i],b[i],places)
def test_one_atom_pdb(self):
self.o.read_pdb(DataPath+'1ATM.pdb')
axis = 'x'
frame = 0
theta=numpy.pi/2.0
#
self.o.rotate(frame,axis,theta)
result_coor = self.o.coor()
result_com = self.o.calccom(0)
print '\nresult_coor:\n'; util.printfl([result_coor]); print '\nresult_com:\n',util.printfl([result_com])
#
expected_coor = numpy.array([[[73.944, -41.652, 41.799]]], floattype)
expected_com = numpy.array([73.944, -41.652, 41.799], floattype)
self.assert_list_almost_equal(expected_coor, result_coor,3)
self.assert_list_almost_equal(expected_com, result_com,3)
def test_two_aa_pdb(self):
self.o.read_pdb(DataPath+'2AAD.pdb')
axis = 'y'
frame = 0
theta=numpy.pi/2.0
#
self.o.rotate(frame,axis,theta)
result_coor = self.o.coor()
result_com = self.o.calccom(0)
print '\nresult_coor:\n'; util.printfl([result_coor]); print '\nresult_com:\n',util.printfl([result_com])
#
expected_coor = numpy.array([[[41.652, 41.799, -73.944], [40.456, 42.563, -74.229], [40.463, 43.093, -75.667], [39.401, 43.279, -76.264], [40.336, 43.734, -73.210], [39.926, 43.168, -71.856], [39.354, 44.782, -73.67], [39.946, 44.177, -70.721], [41.647, 43.330, -76.231], [41.730, 43.852, -77.592], [42.184, 42.820, -78.617], [42.656, 43.169, -79.712], [42.648, 45.097, -77.671], [43.910, 44.816, -77.054], [42.000, 46.273, -76.970]]], floattype)
expected_com = numpy.array([41.276, 43.708, -75.680], floattype)
self.assert_list_almost_equal(expected_coor, result_coor,1)
self.assert_list_almost_equal(expected_com, result_com,2)
def test_rna_pdb(self):
self.o.read_pdb(DataPath+'rna.pdb')
axis = 'z'
frame = 0
theta=numpy.pi/2.0
#
self.o.rotate(frame,axis,theta)
result_com = self.o.calccom(0)
print '\nresult_com:\n',util.printfl([result_com])
#
expected_com = numpy.array([-4.352, -8.033, 9.231], floattype)
self.assert_list_almost_equal(expected_com, result_com,2)
def test_1CRN_pdb(self):
self.o.read_pdb(DataPath+'1CRN.pdb')
axis = 'z'
frame = 0
theta=numpy.pi/2.0
#
self.o.rotate(frame,axis,theta)
result_com = self.o.calccom(0)
print '\nresult_com:\n',util.printfl([result_com])
#
expected_com = numpy.array([-9.775, 9.300, 6.978], floattype)
self.assert_list_almost_equal(expected_com, result_com,2)
@skipIf(os.environ['SASSIE_LARGETEST']=='n',"I am not testing large files")
def test_1KP8_pdb(self):
self.o.read_pdb(DataPath+'1KP8.pdb')
axis = 'x'
frame = 0
theta=12.0
#
self.o.rotate(frame,axis,theta)
result_com = self.o.calccom(0)
print '\nresult_com:\n',util.printfl([result_com])
#
expected_com = numpy.array([83.286, 14.288, 22.003], floattype)
self.assert_list_almost_equal(expected_com, result_com,2)
def tearDown(self):
pass
if __name__ == '__main__':
main()
|
madscatt/sasmol
|
src/python/test_sasmol/test_sasop/test_intg_sasop_Move_rotate.py
|
Python
|
gpl-3.0
| 4,901
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EstLanUser'
db.create_table(u'accounts_estlanuser', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('gender', self.gf('django.db.models.fields.CharField')(default=u'm', max_length=1)),
('date_of_birth', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=16, blank=True)),
('selected_avatar', self.gf('django.db.models.fields.CharField')(default=u'fb', max_length=2)),
))
db.send_create_signal(u'accounts', ['EstLanUser'])
# Adding M2M table for field groups on 'EstLanUser'
m2m_table_name = db.shorten_name(u'accounts_estlanuser_groups')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('estlanuser', models.ForeignKey(orm[u'accounts.estlanuser'], null=False)),
('group', models.ForeignKey(orm[u'auth.group'], null=False))
))
db.create_unique(m2m_table_name, ['estlanuser_id', 'group_id'])
# Adding M2M table for field user_permissions on 'EstLanUser'
m2m_table_name = db.shorten_name(u'accounts_estlanuser_user_permissions')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('estlanuser', models.ForeignKey(orm[u'accounts.estlanuser'], null=False)),
('permission', models.ForeignKey(orm[u'auth.permission'], null=False))
))
db.create_unique(m2m_table_name, ['estlanuser_id', 'permission_id'])
def backwards(self, orm):
# Deleting model 'EstLanUser'
db.delete_table(u'accounts_estlanuser')
# Removing M2M table for field groups on 'EstLanUser'
db.delete_table(db.shorten_name(u'accounts_estlanuser_groups'))
# Removing M2M table for field user_permissions on 'EstLanUser'
db.delete_table(db.shorten_name(u'accounts_estlanuser_user_permissions'))
models = {
u'accounts.estlanuser': {
'Meta': {'object_name': 'EstLanUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "u'm'", 'max_length': '1'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'selected_avatar': ('django.db.models.fields.CharField', [], {'default': "u'fb'", 'max_length': '2'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '16', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
|
Jyrno42/EstLan-Web
|
EstLan/accounts/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 7,021
|
#!/usr/bin/env python
'''
Written by Dejanira Araiza Illan, January 2016
'''
import re
import os
legcount=[]
badlegcount=[]
timeout2 = []
activations = []
boredoms = []
activations2 = []
for jj in range(10,51):
for i in range(1,161):
legs = 0
badlegs = 0
to2 = 0
for num,line in enumerate(open(os.getcwd()+'/data_diffseeds_table_mc/seed'+str(jj)+'/robotout'+str(i),'r')):
if re.search("GPL is OK",line):
legs = legs + 1
if re.search("GPL is not OK", line):
badlegs = badlegs + 1
if re.search('Timeout2',line) and not re.search('Discard',line):
to2 = to2 + 1
legcount.append(legs)
badlegcount.append(badlegs)
timeout2.append(to2)
activ = 0
bored = 0
activ2 = 0
for num,line in enumerate(open(os.getcwd()+'/data_diffseeds_table_mc/seed'+str(jj)+'/humanout'+str(i),'r')):
if re.search('State machine transitioning \'SendA1',line):
activ = activ + 1
#print line
if re.search('Bored', line):
bored = bored + 1
if re.search('State machine transitioning \'SendA2',line):
activ2 = activ2 + 1
activations.append(activ)
boredoms.append(bored)
activations2.append(activ2)
#print str(legs) +' , '+ str(badlegs) + ' , ' + str(to2) + ' ,,, '+ str(activ) +' , '+ str(bored) + ' , ' + str(activ2)
print "Robot reports:"
print legcount
print badlegcount
print timeout2
print "Human reports:"
print activations
print boredoms
print activations2
print "---"
c1 = 0
c2 = 0
c3 = 0
c4 = 0
c5 = 0
c6 = 0
c7 = 0
c8 = 0
c9 = 0
c10 = 0
c11 = 0
c12 = 0
c13 = 0
c14 = 0
for i in range(1,6561):
a1 = legcount.pop(0)
a2 = badlegcount.pop(0)
a3 = timeout2.pop(0)
a4 = activations.pop(0)
a5 = boredoms.pop(0)
a6 = activations2.pop(0)
if a4 == 0: #0 legs, timeout
c14 = c14 + 1
if a4 >= 1 and a5 >= 1 and a1 == 0 and a2 == 0: # 1 to 4 legs, timeout1 or 2
c13 = c13 + 1
if a4 >= 1 and a5 ==0 and a1 == 0 and a2 == 0: # 1 to 4 legs, timeout1
c13 = c13 + 1
if a4 == 1 and a5 == 1:
if a3 == 1: #1 leg 1 boredom, 1 timeout2
c12 = c12 + 1
else: #Timeout1
c12 = c12 + 1
if a4 == 1 and a2 == 1: # 1 leg, 1 bad
c11 = c11 + 1
if a4 == 1 and a1 == 1: # 1 leg, 1 good
c10 = c10 + 1
if a4 == 2 and a5 >= 1:
if a3 >= 1: #2 legs 1 boredom, 1 timeout2
c9 = c9 + 1
else:#Timeout1
c10 = c10 + 1
if a4 == 2 and a2 >= 1: # 2 legs, 1 bad
c8 = c8 + 1
if a4 == 2 and a1 >= 1: # 2 legs, 1 good
c7 = c7 + 1
if a4 == 3 and a5 >= 1:
if a3>=1:#3 legs 1 boredom, 1 timeout2
c6 = c6 + 1
else: #Timeout1
c6 = c6 + 1
if a4 == 3 and a2 >= 1: # 3 legs, 1 bad
c5 = c5 + 1
if a4 == 3 and a1 >= 1: # 3 legs, 1 good
c4 = c4 + 1
if a4 >= 4 and a5 >=1:
if a3 >= 1: # 4 legs 1 boredom, 1 timeout2
c3 = c3 + 1
else: #Timeout1
c3 = c3 + 1
if a4 >= 4 and a2 >=1: # 4 legs, 1 bad
c2 = c2 + 1
if a4 >= 4 and a1 >=1: #4 legs, 1 good
c1 = c1 + 1
print "item 1: ", c1
print "item 2: ", c2
print "item 3: ", c3
print "item 4: ", c4
print "item 5: ", c5
print "item 6: ", c6
print "item 7: ", c7
print "item 8: ", c8
print "item 9: ", c9
print "item 10: ", c10
print "item 11: ", c11
print "item 12: ", c12
print "item 13: ", c13
print "item 14: ", c14
|
robosafe/mc-vs-bdi
|
data/xproduct_mc_table.py
|
Python
|
gpl-3.0
| 3,144
|
#/usr/bin/env python
#coding:utf-8
# Author : tuxpy
# Email : q8886888@qq.com.com
# Last modified : 2015-05-19 17:09:43
# Filename : utils.py
# Description :
from __future__ import unicode_literals, print_function
import os
def get_tmp_filepath(_file):
"""生成一个针对_file的临时文件名"""
_path = os.path.dirname(_file)
_tmp_filename = os.path.basename(_file)
if not _tmp_filename.startswith('.'):
_tmp_filename = '.' + _tmp_filename
_tmp_filename += '_tmp'
_tmp_filepath = os.path.join(_path, _tmp_filename)
if os.path.exists(_tmp_filepath):
return get_tmp_filepath(_tmp_filepath + '_1')
return _tmp_filepath
|
lujinda/replace
|
replace/utils.py
|
Python
|
gpl-3.0
| 701
|
#!/usr/bin/env python
"""
======================
Compare 70/24 um Plots
======================
Plot a comparison between the Hi-GAL and MIPSGAL images for a clump. For now,
Specifically for clump 5253.
"""
from __future__ import division
import aplpy
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patheffects as PathEffects
from astropy.io import fits
from astropy.wcs import WCS
import catalog
import image
fdir = '/home/svobodb/research/temp/hg70_mips/'
class Panel(object):
cnum = 5253 # v2.1.0
glon = 30.62426 # deg
glat = 0.5472881 # deg
ra = 281.3195 # deg
dec = -1.803885 # deg
width = 0.05 # deg
def __init__(self, fig):
self.fig = fig
self.cat = None
self.arc = None
self.gc = None
def _get_cats(self):
print ':: Reading in MIPSGAL cats'
self.cat = catalog.read_cat('mipsgal_catalog_lclip')
self.arc = catalog.read_cat('mipsgal_archive_lclip')
llo = self.glon - self.width
lhi = self.glon + self.width
blo = self.glat - self.width
bhi = self.glat + self.width
self.cat = self.cat.query('@llo < l < @lhi & @blo < b < @bhi')
self.arc = self.arc.query('@llo < l < @lhi & @blo < b < @bhi')
def get_image(self):
print ':: Reading in FITS'
# FIXME uses hard path
self.img = fits.open(fdir + self.filen)
self.gc = aplpy.FITSFigure(self.img, figure=self.fig,
subplot=self.subplot)
def set_label(self):
txt = plt.annotate(self.label, xy=(0.05, 0.055),
xycoords='axes fraction', color='black', fontsize=10)
txt.set_path_effects([PathEffects.withStroke(linewidth=1.5,
foreground='w')])
def show(self):
self.get_image()
self.gc.show_grayscale(**self.color_kwargs)
self.show_markers()
self.recenter()
self.set_label()
self.adjust()
class BgpsPanel(Panel):
label = r'${\rm BGPS \ 1.1 \ mm}$'
subplot = [0.13, 0.1, 0.35, 0.7]
color_kwargs = {
'vmin': -0.2,
'vmax': 0.3,
'stretch': 'linear',
'smooth': None,
}
def show_markers(self):
pass
def get_image(self):
print ':: Reading in FITS'
self.img = image.get_bgps_img(self.cnum, exten='map20')
self.gc = aplpy.FITSFigure(self.img, figure=self.fig,
subplot=self.subplot,
convention='calabretta')
def show_rind(self):
pass
def recenter(self):
self.gc.recenter(self.glon, self.glat, radius=self.width)
def adjust(self):
self.gc.tick_labels.set_font(size='x-small')
self.gc.axis_labels.set_font(size='small')
self.gc.tick_labels.set_xformat('dd.dd')
self.gc.tick_labels.set_yformat('d.dd')
self.gc.ticks.set_color('black')
class HigalPanel(Panel):
filen = 'destripe_l030_blue_wgls_rcal.fits'
label = r'${\rm Hi-GAL \ 70 \ \mu m}$'
subplot = [0.13, 0.1, 0.35, 0.7]
color_kwargs = {
'vmin': 450,
'vmax': 1300,
'stretch': 'arcsinh',
'smooth': None,
}
def show_markers(self):
pass
def show_rind(self):
eps = 9e-1 # fudge factor for contouring
rind = image.get_bgps_img(self.cnum, exten='labelmask')
rind[0].data[np.isnan(rind[0].data)] = 0
rind[0].data[rind[0].data != self.cnum] = 0
rind[0].data[rind[0].data == self.cnum] = 1
rind[0].data = rind[0].data.astype(float)
self.gc.show_contour(rind, levels=[eps], colors='black',
convention='calabretta')
self.gc.show_contour(rind, levels=[eps], colors='white',
linestyles='dashed', convention='calabretta')
rind.close()
def recenter(self):
self.gc.recenter(self.glon, self.glat, radius=self.width)
def adjust(self):
self.gc.tick_labels.hide()
self.gc.axis_labels.hide()
self.gc.ticks.set_color('black')
class MipsgalPanel(Panel):
filen = 'MG0310p005_024.fits'
label = r'${\rm MIPSGAL \ 24 \ \mu m}$'
subplot = [0.5, 0.1, 0.35, 0.7]
color_kwargs = {
'vmin': 25,
'vmax': 41,
'stretch': 'log',
'smooth': None,
}
def show_markers(self):
# markers
self._get_cats()
dx = -0.5 * 8e-3
dy = -1.0 * 8e-3
cdx = np.zeros(self.cat.ra.values.shape[0]) + dx * 0.7
cdy = np.zeros(self.cat.ra.values.shape[0]) + dy * 0.7
adx = np.zeros(self.arc.ra.values.shape[0]) + dx * 0.7
ady = np.zeros(self.arc.ra.values.shape[0]) + dy * 0.7
arrow = {'width': 2.5, 'head_width': 5, 'head_length': 3}
self.gc.show_arrows(self.cat.ra.values - dx, self.cat.dec.values - dy,
cdx, cdy, edgecolor='black', facecolor='black',
linewidths=0.3, **arrow)
self.gc.show_arrows(self.arc.ra.values - dx, self.arc.dec.values - dy,
cdx, cdy, edgecolor='white', facecolor='white',
linewidths=0.3, **arrow)
def show_rind(self):
pass
def recenter(self):
self.gc.recenter(self.ra, self.dec, radius=self.width / 2.35)
def adjust(self):
self.gc.tick_labels.hide()
self.gc.axis_labels.hide()
self.gc.ticks.hide()
class Plot(object):
def save(self, outfilen='comp70'):
for ext in ['png', 'pdf', 'eps']:
name = outfilen + '.' + ext
print ':: Saving to {0}'.format(name)
self.fig.savefig(name, bbox_inches='tight', dpi=900)
plt.close()
class TwoPlot(Plot):
def __init__(self):
self.fig = plt.figure(figsize=(8, 4))
self.hp = HigalPanel(self.fig)
self.mp = MipsgalPanel(self.fig)
self.hp.show()
self.mp.show()
class ThreePlot(Plot):
def __init__(self):
self.fig = plt.figure(figsize=(8, 2.7))
BgpsPanel.subplot = [0.05, 0.1, 0.27, 0.8]
HigalPanel.subplot = [0.35, 0.1, 0.27, 0.8]
MipsgalPanel.subplot = [0.65, 0.1, 0.27, 0.8]
self.bp = BgpsPanel(self.fig)
self.hp = HigalPanel(self.fig)
self.mp = MipsgalPanel(self.fig)
self.bp.show()
self.hp.show()
self.mp.show()
def make_twoplot():
tp = TwoPlot()
tp.save(outfilen='comp70_twoplot')
def make_threeplot():
tp = ThreePlot()
tp.save(outfilen='comp70_threeplot')
|
autocorr/besl
|
besl/bplot/comp70.py
|
Python
|
gpl-3.0
| 6,645
|
#!/usr/bin/python2
'''
This is an exapmle of how to use the closure feature to do some
oo work.
Notice:
- We treat the 'Person' function as a constructor.
- We call it with a capital first letter.
- We pass arguments to it needed to create the instance.
- In order to have lots of data in the closure we simply
store a flexible and big data structure in the closure (in this
case a dictionary).
- Because we want to return many methods and dont want tuples with
dozens of elements we return all method pointers in a dictionary as
well.
B This allows the user to call our methods by name instead of by
position in some returned tuple.
'''
def Person(name, age):
data = {}
data['name'] = name
data['age'] = age
def setName(iname):
data['name'] = iname
def getName():
return data['name']
def setAge(iage):
data['age'] = iage
def getAge():
return data['age']
def printMe():
print('name', data['name'])
print('age', data['age'])
methods = {}
methods['setName'] = setName
methods['getName'] = getName
methods['setAge'] = setAge
methods['getAge'] = getAge
methods['printMe'] = printMe
return methods
p1 = Person('Bilbo', 111)
p1['setName']('Sam')
p1['printMe']()
p2 = Person('Frodo', 33)
p2['setName']('Sauron')
p2['printMe']()
p1['printMe']()
|
nonZero/demos-python
|
src/examples/short/closure/closure_oo.py
|
Python
|
gpl-3.0
| 1,354
|
''' Password based key-derivation function - PBKDF2 '''
# This module is for Python 3
# Copyright (c) 2011, Stefano Palazzo <stefano.palazzo@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import hmac
import hashlib
import os
import struct
def pbkdf2(digestmod, password: 'bytes', salt, count, dk_length) -> 'bytes':
'''
PBKDF2, from PKCS #5 v2.0:
http://tools.ietf.org/html/rfc2898
For proper usage, see NIST Special Publication 800-132:
http://csrc.nist.gov/publications/PubsSPs.html
The arguments for this function are:
digestmod
a crypographic hash constructor, such as hashlib.sha256
which will be used as an argument to the hmac function.
Note that the performance difference between sha1 and
sha256 is not very big. New applications should choose
sha256 or better.
password
The arbitrary-length password (passphrase) (bytes)
salt
A bunch of random bytes, generated using a cryptographically
strong random number generator (such as os.urandom()). NIST
recommend the salt be _at least_ 128bits (16 bytes) long.
count
The iteration count. Set this value as large as you can
tolerate. NIST recommend that the absolute minimum value
be 1000. However, it should generally be in the range of
tens of thousands, or however many cause about a half-second
delay to the user.
dk_length
The lenght of the desired key in bytes. This doesn't need
to be the same size as the hash functions digest size, but
it makes sense to use a larger digest hash function if your
key size is large.
'''
def pbkdf2_function(pw, salt, count, i):
# in the first iteration, the hmac message is the salt
# concatinated with the block number in the form of \x00\x00\x00\x01
r = u = hmac.new(pw, salt + struct.pack(">i", i), digestmod).digest()
for i in range(2, count + 1):
# in subsequent iterations, the hmac message is the
# previous hmac digest. The key is always the users password
# see the hmac specification for notes on padding and stretching
u = hmac.new(pw, u, digestmod).digest()
# this is the exclusive or of the two byte-strings
r = bytes(i ^ j for i, j in zip(r, u))
return r
dk, h_length = b'', digestmod().digest_size
# we generate as many blocks as are required to
# concatinate to the desired key size:
blocks = (dk_length // h_length) + (1 if dk_length % h_length else 0)
for i in range(1, blocks + 1):
dk += pbkdf2_function(password, salt, count, i)
# The length of the key wil be dk_length to the nearest
# hash block size, i.e. larger than or equal to it. We
# slice it to the desired length befor returning it.
return dk[:dk_length]
def test():
'''
PBKDF2 HMAC-SHA1 Test Vectors:
http://tools.ietf.org/html/rfc6070
'''
# One of the test vectors has been removed because it takes
# too long to calculate. This was a test vector of 2^24 iterations.
# Since there is no difference between integers and long integers
# in python3, this will work as well as the others.
rfc6070_test_vectors = (
(b"password", b"salt", 1, 20),
(b"password", b"salt", 2, 20),
(b"password", b"salt", 4096, 20),
(b"passwordPASSWORDpassword",
b"saltSALTsaltSALTsaltSALTsaltSALTsalt", 4096, 25),
(b"pass\0word", b"sa\0lt", 4096, 16),
)
rfc6070_results = (
b"\x0c\x60\xc8\x0f\x96\x1f\x0e\x71\xf3\xa9\xb5\x24\xaf\x60\x12\x06"
b"\x2f\xe0\x37\xa6",
b"\xea\x6c\x01\x4d\xc7\x2d\x6f\x8c\xcd\x1e\xd9\x2a\xce\x1d\x41\xf0"
b"\xd8\xde\x89\x57",
b"\x4b\x00\x79\x01\xb7\x65\x48\x9a\xbe\xad\x49\xd9\x26\xf7\x21\xd0"
b"\x65\xa4\x29\xc1",
b"\x3d\x2e\xec\x4f\xe4\x1c\x84\x9b\x80\xc8\xd8\x36\x62\xc0\xe4\x4a"
b"\x8b\x29\x1a\x96\x4c\xf2\xf0\x70\x38",
b"\x56\xfa\x6a\xa7\x55\x48\x09\x9d\xcc\x37\xd7\xf0\x34\x25\xe0\xc3",
)
for v, r in zip(rfc6070_test_vectors, rfc6070_results):
assert pbkdf2(hashlib.sha1, *v) == r, v
if __name__ == '__main__':
test()
print("all tests passed")
|
bobintetley/asm3
|
src/asm3/pbkdf2/pbkdf23.py
|
Python
|
gpl-3.0
| 5,091
|
# Copyright (C) 2011, 2012, 2014, 2015 David Maxwell and Constantine Khroulev
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Module containing helper functions for using the SIA stress balance
model."""
import PISM
def computeSIASurfaceVelocities(modeldata, siasolver=PISM.SIAFD):
"""Generates surface horizontal velocities corresponding to solving
the SIA with zero basal sliding.
:param `modeldata`: :class:`PISM.model.ModelData` containing
variables and model physics
:param `siasolver`: specific class used for solving the SIA
"""
md = modeldata
grid = md.grid
sia = siasolver(md.grid, md.enthalpyconverter)
sia.init()
zero_sliding = PISM.IceModelVec2V()
zero_sliding.create(grid, 'basal_velocity', False)
zero_sliding.set(0.)
sia.update(zero_sliding, False)
u = sia.velocity_u()
v = sia.velocity_v()
vel_sia = PISM.model.create2dVelocityVec(grid, name="_sia", stencil_width=1)
tmp = PISM.IceModelVec2S()
tmp.create(grid, 'tmp', False)
u.getSurfaceValues(tmp, md.vecs.thk)
vel_sia.set_component(0, tmp)
v.getSurfaceValues(tmp, md.vecs.thk)
vel_sia.set_component(1, tmp)
return vel_sia
|
talbrecht/pism_pik07
|
site-packages/PISM/sia.py
|
Python
|
gpl-3.0
| 1,881
|
#!/home/vyos/vyos-api/bin/python
import pytest
import sys
import os
sys.path.append('/home/vyos/vyos-api/ServiceManager')
from ConfigInterfaces import configinterface as ifacecfg
import validation as vld
def test_ethernet_config():
action=["set","delete"]
for act in action:
if act not in action:
with pytest.raises(vld.ActionError) as e :
ifacecfg.ethernet_config(action)
assert e.value.message == "[Critical] unrecognized action!"
def test_addr_interface():
pass
def test_hwid():
pass
def test_iface_desc():
pass
def ttest_fw_iface():
pass
def test_vlan_desc():
pass
def test_vlan_addr():
pass
def test_del_vlan():
pass
|
abessifi/pyatta
|
tests/servicemanager/test_config_interfaces.py
|
Python
|
gpl-3.0
| 722
|
# -*- coding: utf-8 -*-
import subprocess
import os
import sys
import re
import errno
# This variable contains a reference version of the current code-base. It is
# updated by release and dev-cycle scripts.
BASE_VERSION = '2021.12.21'
# This commit is the reference commit of the BASE_VERSION above. Technically, it
# is the commit right before the BASE_VERSION, because the release script will
# change these fields and onlt create the actual release commit after the changes.
BASE_COMMIT = '5e2c7a82890fe274291b231e5ae0ea3491d7d8c1'
# These file is created as part of our Docker build and is looked at as
# fall-back, should no git environment be available. The VERSION_INFO_PATH file
# contains the "git describe" output of the build environment.
VERSION_INFO_PATH = '/home/git-version'
def get_version():
"""
Return output of "git describe" executed in the directory of this file. If
this results in an error, "unknown" is returned.
"""
try:
dir = os.path.dirname(os.path.realpath(__file__))
# Universal newlines is used to get both Python 2 and 3 to use text mode.
p = subprocess.Popen("/usr/bin/git describe", cwd=os.path.dirname(dir),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
(out, error) = p.communicate()
if error:
# Fall-back to docker version file, if it exists
version_file = open(VERSION_INFO_PATH, 'r')
describe_info = version_file.read().rstrip().encode('utf-8').decode('utf-8')
else:
describe_info = out.rstrip().encode('utf-8').decode('utf-8')
return describe_info
except:
return '{}-unknown'.format(BASE_VERSION)
def relative(*path_components):
"""
Returns a path relative to the directory this file is in
"""
base = os.path.abspath(os.path.dirname(__file__))
all_parts = [base] + list(path_components)
return os.path.realpath(os.path.join(*all_parts))
|
catmaid/CATMAID
|
django/projects/mysite/utils.py
|
Python
|
gpl-3.0
| 2,015
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Mouse handling for a browser tab."""
from PyQt5.QtCore import QObject, QEvent, Qt, QTimer
from qutebrowser.config import config
from qutebrowser.utils import message, log, usertypes, qtutils, objreg
from qutebrowser.keyinput import modeman
class ChildEventFilter(QObject):
"""An event filter re-adding MouseEventFilter on ChildEvent.
This is needed because QtWebEngine likes to randomly change its
focusProxy...
FIXME:qtwebengine Add a test for this happening
Attributes:
_filter: The event filter to install.
_widget: The widget expected to send out childEvents.
"""
def __init__(self, eventfilter, widget, win_id, parent=None):
super().__init__(parent)
self._filter = eventfilter
assert widget is not None
self._widget = widget
self._win_id = win_id
def eventFilter(self, obj, event):
"""Act on ChildAdded events."""
if event.type() == QEvent.ChildAdded:
child = event.child()
log.mouse.debug("{} got new child {}, installing filter".format(
obj, child))
assert obj is self._widget
child.installEventFilter(self._filter)
if qtutils.version_check('5.11', compiled=False, exact=True):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-68076
pass_modes = [usertypes.KeyMode.command,
usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno]
if modeman.instance(self._win_id).mode not in pass_modes:
tabbed_browser = objreg.get('tabbed-browser',
scope='window',
window=self._win_id)
current_index = tabbed_browser.widget.currentIndex()
try:
widget_index = tabbed_browser.widget.indexOf(
self._widget.parent())
except RuntimeError:
widget_index = -1
if current_index == widget_index:
QTimer.singleShot(0, self._widget.setFocus)
elif event.type() == QEvent.ChildRemoved:
child = event.child()
log.mouse.debug("{}: removed child {}".format(obj, child))
return False
class MouseEventFilter(QObject):
"""Handle mouse events on a tab.
Attributes:
_tab: The browsertab object this filter is installed on.
_handlers: A dict of handler functions for the handled events.
_ignore_wheel_event: Whether to ignore the next wheelEvent.
_check_insertmode_on_release: Whether an insertmode check should be
done when the mouse is released.
"""
def __init__(self, tab, *, parent=None):
super().__init__(parent)
self._tab = tab
self._handlers = {
QEvent.MouseButtonPress: self._handle_mouse_press,
QEvent.MouseButtonRelease: self._handle_mouse_release,
QEvent.Wheel: self._handle_wheel,
QEvent.ContextMenu: self._handle_context_menu,
}
self._ignore_wheel_event = False
self._check_insertmode_on_release = False
def _handle_mouse_press(self, e):
"""Handle pressing of a mouse button."""
is_rocker_gesture = (config.val.input.rocker_gestures and
e.buttons() == Qt.LeftButton | Qt.RightButton)
if e.button() in [Qt.XButton1, Qt.XButton2] or is_rocker_gesture:
self._mousepress_backforward(e)
return True
self._ignore_wheel_event = True
if e.button() != Qt.NoButton:
self._tab.elements.find_at_pos(e.pos(),
self._mousepress_insertmode_cb)
return False
def _handle_mouse_release(self, _e):
"""Handle releasing of a mouse button."""
# We want to make sure we check the focus element after the WebView is
# updated completely.
QTimer.singleShot(0, self._mouserelease_insertmode)
return False
def _handle_wheel(self, e):
"""Zoom on Ctrl-Mousewheel.
Args:
e: The QWheelEvent.
"""
if self._ignore_wheel_event:
# See https://github.com/qutebrowser/qutebrowser/issues/395
self._ignore_wheel_event = False
return True
if e.modifiers() & Qt.ControlModifier:
divider = config.val.zoom.mouse_divider
if divider == 0:
return False
factor = self._tab.zoom.factor() + (e.angleDelta().y() / divider)
if factor < 0:
return False
perc = int(100 * factor)
message.info("Zoom level: {}%".format(perc), replace=True)
self._tab.zoom.set_factor(factor)
elif e.modifiers() & Qt.ShiftModifier:
if e.angleDelta().y() > 0:
self._tab.scroller.left()
else:
self._tab.scroller.right()
return True
return False
def _handle_context_menu(self, _e):
"""Suppress context menus if rocker gestures are turned on."""
return config.val.input.rocker_gestures
def _mousepress_insertmode_cb(self, elem):
"""Check if the clicked element is editable."""
if elem is None:
# Something didn't work out, let's find the focus element after
# a mouse release.
log.mouse.debug("Got None element, scheduling check on "
"mouse release")
self._check_insertmode_on_release = True
return
if elem.is_editable():
log.mouse.debug("Clicked editable element!")
if config.val.input.insert_mode.auto_enter:
modeman.enter(self._tab.win_id, usertypes.KeyMode.insert,
'click', only_if_normal=True)
else:
log.mouse.debug("Clicked non-editable element!")
if config.val.input.insert_mode.auto_leave:
modeman.leave(self._tab.win_id, usertypes.KeyMode.insert,
'click', maybe=True)
def _mouserelease_insertmode(self):
"""If we have an insertmode check scheduled, handle it."""
if not self._check_insertmode_on_release:
return
self._check_insertmode_on_release = False
def mouserelease_insertmode_cb(elem):
"""Callback which gets called from JS."""
if elem is None:
log.mouse.debug("Element vanished!")
return
if elem.is_editable():
log.mouse.debug("Clicked editable element (delayed)!")
modeman.enter(self._tab.win_id, usertypes.KeyMode.insert,
'click-delayed', only_if_normal=True)
else:
log.mouse.debug("Clicked non-editable element (delayed)!")
if config.val.input.insert_mode.auto_leave:
modeman.leave(self._tab.win_id, usertypes.KeyMode.insert,
'click-delayed', maybe=True)
self._tab.elements.find_focused(mouserelease_insertmode_cb)
def _mousepress_backforward(self, e):
"""Handle back/forward mouse button presses.
Args:
e: The QMouseEvent.
"""
if e.button() in [Qt.XButton1, Qt.LeftButton]:
# Back button on mice which have it, or rocker gesture
if self._tab.history.can_go_back():
self._tab.history.back()
else:
message.error("At beginning of history.")
elif e.button() in [Qt.XButton2, Qt.RightButton]:
# Forward button on mice which have it, or rocker gesture
if self._tab.history.can_go_forward():
self._tab.history.forward()
else:
message.error("At end of history.")
def eventFilter(self, obj, event):
"""Filter events going to a QWeb(Engine)View."""
evtype = event.type()
if evtype not in self._handlers:
return False
if obj is not self._tab.event_target():
log.mouse.debug("Ignoring {} to {}".format(
event.__class__.__name__, obj))
return False
return self._handlers[evtype](event)
|
airodactyl/qutebrowser
|
qutebrowser/browser/mouse.py
|
Python
|
gpl-3.0
| 9,296
|
# test cigar strings
#############################################################################
#############################################################################
# #
# Copyright (C) 2013 - 2014 Genome Research Ltd. #
# #
# Author: Hannes Ponstingl (hp3@sanger.ac.uk) #
# #
# This file is part of SMALT. #
# #
# SMALT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# This program is distributed in the hope that it will be useful, but #
# WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
#############################################################################
PROGNAM = "../src/smalt"
FNAM_REF = "cigar_ref.fa.gz"
FNAM_READ1 = "cigar_read1.fq"
FNAM_READ2 = "cigar_read2.fq"
TMPFIL_PREFIX = "TMPcig"
KMER = 13
NSKIP = 2
def smalt_index(df,index_name, fasta_name, kmer, nskip):
from sys import exit
from subprocess import call
tup = (PROGNAM, 'index',
'-k', '%i' % (int(kmer)),
'-s', '%i' % (int(nskip)),
index_name,
fasta_name)
df.call(tup, "when indexing")
def smalt_map(df, oufilnam, indexnam, readfil, matefil, typ="fastq", flags=[]):
from sys import exit
from subprocess import call
tup = [PROGNAM, 'map']
if len(flags) > 0:
tup.extend(flags)
tup.extend([
'-f', typ,
'-o', oufilnam,
indexnam,
readfil, matefil])
df.call(tup, "when mapping")
if __name__ == '__main__':
from testdata import DataFiles
df = DataFiles()
refnam = df.joinData(FNAM_REF)
readnamA = df.joinData(FNAM_READ1)
readnamB = df.joinData(FNAM_READ2)
indexnam = df.addIndex(TMPFIL_PREFIX)
oufilnam = df.addTMP(TMPFIL_PREFIX + ".sam")
smalt_index(df,indexnam, refnam, KMER, NSKIP)
smalt_map(df,oufilnam, indexnam, readnamA, readnamB, "sam", ["-x"])
#print "Test ok."
df.cleanup()
exit()
|
rcallahan/smalt
|
test/cigar_test.py
|
Python
|
gpl-3.0
| 3,208
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014, David Poulter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import click
from . import configurator
from ..commands import remove_mediafile
from ..models import MediaFile
@click.command("rm", short_help="Remove mediafles from a database.")
@click.argument("files", nargs=-1)
@configurator
def command(config, files):
if len(files) == 1 and files[0] == "all":
files = [mediafile.id for mediafile in
config.session.query(MediaFile).all()]
for param in files:
mediafile = MediaFile.by_id_or_name(config.session, param)
if not mediafile:
click.secho("MediaFile {} not found".format(param), fg="red")
continue
remove_mediafile(config.session, mediafile)
if "sqlite://" in config.database:
config.session.execute("VACUUM")
|
davebrent/consyn
|
consyn/cli/rm.py
|
Python
|
gpl-3.0
| 1,493
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
UF = [
('AC', 'Acre'),
('AL', 'Alagoas'),
('AP', 'Amapá'),
('AM', 'Amazonas'),
('BA', 'Bahia'),
('CE', 'Ceará'),
('DF', 'Distrito Federal'),
('ES', 'Espírito Santo'),
('GO', 'Goiás'),
('MA', 'Maranhão'),
('MT', 'Mato Grosso'),
('MS', 'Mato Grosso do Sul'),
('MG', 'Minas Gerais'),
('PR', 'Paraná'),
('PB', 'Paraíba'),
('PA', 'Pará'),
('PE', 'Pernambuco'),
('PI', 'Piauí'),
('RJ', 'Rio de Janeiro'),
('RN', 'Rio Grande do Norte'),
('RS', 'Rio Grande do Sul'),
('RO', 'Rondônia'),
('RR', 'Roraima'),
('SC', 'Santa Catarina'),
('SE', 'Sergipe'),
('SP', 'São Paulo'),
('TO', 'Tocantins'),
('EX', 'Exterior'),
]
YES_NO_CHOICES = [(None, _('----')), (False, _('Não')), (True, _('Sim'))]
def str2bool(v):
return v in ('Sim', 'True')
SEXO_CHOICES = [('M', _('Masculino')), ('F', _('Feminino'))]
def from_to(start, end):
return list(range(start, end + 1))
def make_pagination(index, num_pages):
PAGINATION_LENGTH = 10
if num_pages <= PAGINATION_LENGTH:
return from_to(1, num_pages)
else:
if index - 1 <= 5:
tail = [num_pages - 1, num_pages]
head = from_to(1, PAGINATION_LENGTH - 3)
else:
if index + 1 >= num_pages - 3:
tail = from_to(index - 1, num_pages)
else:
tail = [index - 1, index, index + 1,
None, num_pages - 1, num_pages]
head = from_to(1, PAGINATION_LENGTH - len(tail) - 1)
return head + [None] + tail
|
interlegis/atendimento
|
atendimento/utils.py
|
Python
|
gpl-3.0
| 1,685
|
# -*- coding: utf-8 -*-
from rest_framework import serializers
from .models import (
Offer,
Questionnaire,
Submission)
class OfferSerializer(serializers.ModelSerializer):
class Meta:
model = Offer
fields = (
'pk',
'created',
'modified',
'rotation_started',
'rotation_ended',
'offer_name',
'offer_type',
'score_min',
'score_max',
'bank',
)
class QuestionnaireSerializer(serializers.ModelSerializer):
class Meta:
model = Questionnaire
fields = (
'pk',
'created',
'modified',
'name',
'birthday',
'phone',
'passport',
'score',
)
class SubmissionSerializer(serializers.ModelSerializer):
offer = OfferSerializer(read_only=True)
questionnaire = QuestionnaireSerializer(read_only=True)
class Meta:
model = Submission
fields = (
'pk',
'created',
'submitted',
'offer',
'questionnaire',
'status',
)
class SubmissionSerializerPost(serializers.ModelSerializer):
class Meta:
model = Submission
fields = (
'pk',
'created',
'submitted',
'offer',
'questionnaire',
'status',
)
|
mxmaslin/Test-tasks
|
django_test_tasks/old_django_test_tasks/apps/loans/serializers.py
|
Python
|
gpl-3.0
| 1,466
|
#!/usr/bin/env python
# Copyright 2013 National Renewable Energy Laboratory, Golden CO, USA
# This file is part of NREL MatDB.
#
# NREL MatDB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NREL MatDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NREL MatDB. If not, see <http://www.gnu.org/licenses/>.
import datetime, re, sys, traceback, os.path
import xml.etree.cElementTree as etree
import numpy as np
#====================================================================
def badparms( msg):
print '\nError: %s' % (msg,)
print 'Parms:'
print ' -bugLev <int> debug level'
print ' -inFile <string> input file'
print ' -maxLev <int> max xml print level'
print ''
sys.exit(1)
#====================================================================
def main():
'''
Test driver: Extracts info from a VASP vasprun.xml file.
Command line parameters:
================ ========= ==============================================
Parameter Type Description
================ ========= ==============================================
**-bugLev** integer Debug level. Normally 0.
**-inFile** string Input file
**-maxLev int max xml print level
================ ========= ==============================================
'''
bugLev = 0
inFile = None
maxLev = 0
if len(sys.argv) % 2 != 1:
badparms('Parms must be key/value pairs')
for iarg in range( 1, len(sys.argv), 2):
key = sys.argv[iarg]
val = sys.argv[iarg+1]
if key == '-bugLev': bugLev = int( val)
elif key == '-inFile': inFile = val
elif key == '-maxLev': maxLev = int( val)
else: badparms('unknown key: "%s"' % (key,))
if bugLev == None: badparms('parm not specified: -bugLev')
if inFile == None: badparms('parm not specified: -inFile')
if maxLev == None: badparms('parm not specified: -maxLev')
resObj = ResClass()
parseXml( bugLev, inFile, maxLev, resObj)
#====================================================================
#====================================================================
class ResClass:
'''
An empty class used as a data container for results.
'''
def __str__(self):
keys = self.__dict__.keys()
keys.sort()
msg = ''
for key in keys:
val = self.__dict__[key]
stg = str( val)
if stg.find('\n') >= 0: sep = '\n'
else: sep = ' '
msg += ' %s: type: %s val:%s%s\n' % (key, type(val), sep, val,)
return msg
#====================================================================
#====================================================================
# Fills resObj.
def parseXml( bugLev, inFile, maxLev, resObj):
'''
Extracts info from the vasprun.xml file from a VASP run,
using the Python xml.etree.cElementTree API.
**Parameters**:
* bugLev (int): Debug level. Normally 0.
* inFile (str): Path of the input vasprun.xml file.
* resObj (class ResClass): data object: we set attributes here.
**Returns**:
* None
'''
try:
tree = etree.parse( inFile)
except Exception, exc:
throwerr(('parseXml: invalid xml in file: "%s"\n'
+ ' Msg: %s\n') % (inFile, repr(exc),))
root = tree.getroot()
if bugLev >= 1: printNode( root, 0, maxLev) # node, curLev, maxLev
if bugLev >= 5: print '\n===== program, version, date etc =====\n'
# xxx program, version, subversion, etc
# PyLada: vasp/extract/base.py: datetime()
# OUTCAR: use the 1 occurance of:
# executed on LinuxIFC date 2013.03.11 09:32:24
dtStg = getString( root, 'generator/i[@name=\'date\']')
tmStg = getString( root, 'generator/i[@name=\'time\']')
dateFmtIn = '%Y %m %d %H:%M:%S'
dateFmtOut = '%Y-%m-%d %H:%M:%S'
resObj.runDate = datetime.datetime.strptime(
'%s %s' % (dtStg, tmStg), dateFmtIn)
if bugLev >= 5: print 'runDate: %s' % (resObj.runDate.strftime( dateFmtOut),)
# iterTimes
# Each node is has cpuTime, wallTime:
# <time name='totalsc'>22.49 24.43</time>
nodes = root.findall('calculation/time[@name=\'totalsc\']')
iterCpuTimes = []
iterRealTimes = []
for node in nodes:
txt = node.text
toks = txt.split()
if len(toks) == 1:
# Kluge: sometimes the two time fields run together:
# <time name="totalsc">18560.1718566.89</time>
# should be:
# <time name="totalsc">18560.17 18566.89</time>
# In this case, try to split it,
# or just use the first time for both values.
tok = toks[0]
ix = tok.find('.')
if ix < 0: throwerr('invalid times: %s' % (etree.tostring(node),))
iy = tok.find('.', ix + 1)
if iy < 0 or iy != len(tok) - 3:
throwerr('invalid times: %s' % (etree.tostring(node),))
tmStga = tok[:ix+3]
tmStgb = tok[ix+3:]
elif len(toks) == 2:
tmStga = toks[0]
tmStgb = toks[1]
else: throwerr('invalid times: %s' % (etree.tostring(node),))
iterCpuTimes.append( float( tmStga))
iterRealTimes.append( float( tmStgb))
resObj.iterCpuTimes = iterCpuTimes
resObj.iterRealTimes = iterRealTimes
resObj.iterTotalTime = np.sum( iterRealTimes)
if bugLev >= 5:
print 'iterCpuTimes: %s' % (resObj.iterCpuTimes,)
print 'iterRealTimes: %s' % (resObj.iterRealTimes,)
print 'iterTotalTime: %s' % (resObj.iterTotalTime,)
if bugLev >= 5: print '\n===== incar parameters =====\n'
# algo
# PyLada: vasp/extract/base.py: algo()
# OUTCAR: use the 1 occurance of:
# ALGO = Fast
resObj.algo = getString( root, 'incar/i[@name=\'ALGO\']')
if bugLev >= 5: print 'algo: "%s"' % (resObj.algo,)
ediff = getScalar( root, 'incar/i[@name=\'EDIFF\']', float)
resObj.ediff = ediff
if bugLev >= 5: print 'ediff: %g' % (ediff,)
# encut
# PyLada: vasp/extract/base.py: encut()
# OUTCAR: use the first occurance of:
# ENCUT = 252.0 eV 18.52 Ry 4.30 a.u. 4.08 4.08 15.92*2*pi/ulx,y,z
# ENCUT = 252.0
resObj.encut_ev = getScalar( root, 'incar/i[@name=\'ENCUT\']', float)
if bugLev >= 5: print 'encut_ev: %g' % (resObj.encut_ev,)
resObj.isif = getScalar( root, 'incar/i[@name=\'ISIF\']', int)
if bugLev >= 5: print 'isif: %g' % (resObj.isif,)
# ldauType
# PyLada: vasp/extract/base.py: LDAUType()
# OUTCAR: use the first occurance of:
# LDA+U is selected, type is set to LDAUTYPE = 2
# LDAUTYPE = 2
#rawLdauType = getScalar( root, 'incar/v[@name=\'LDAUTYPE\']', int)
#if rawLdauType == 1: resObj.ldauType = 'liechtenstein'
#elif rawLdauType == 2: resObj.ldauType = 'dudarev'
#else: throwerr('unknown rawLdauType: %d' % (rawLdauType,))
#if bugLev >= 5:
# print 'rawLdauType: %d ldauType: %s' % (rawLdauType, resObj.ldauType,)
resObj.systemName = getString( root, 'incar/i[@name=\'SYSTEM\']')
if bugLev >= 5: print 'systemName: "%s"' % (resObj.systemName,)
if bugLev >= 5: print '\n===== general parameters =====\n'
resObj.generalName = getString(
root, 'parameters/separator[@name=\'general\']/i[@name=\'SYSTEM\']')
if bugLev >= 5: print 'generalName: "%s"' % (resObj.generalName,)
if bugLev >= 5: print '\n===== electronic parameters =====\n'
lst = root.findall('parameters/separator[@name=\'electronic\']')
if len(lst) != 1: throwerr('electronic parameters not found')
elecNode = lst[0]
# ialgo
# PyLada: use the 1 occurance of:
# Electronic relaxation 2 (details)
# IALGO = 68 algorithm
resObj.ialgo = getScalar( elecNode, 'i[@name=\'IALGO\']', int)
if bugLev >= 5: print 'ialgo: %d' % (resObj.ialgo,)
# numBand = nbands
# Caution: in some cases NBANDS != eigenMrr['eigene'].shape[2]
# So we use the eigene dimension instead.
# See further below.
prmNumBand = getScalar( elecNode, 'i[@name=\'NBANDS\']', int)
if bugLev >= 5: print 'prmNumBand: %d' % (prmNumBand,)
# numElectron = nelect
# PyLada: vasp/extract/base.py: nelect()
# OUTCAR: use the 1 occurance of:
# NELECT = 48.0000 total number of electrons
resObj.numElectron = getScalar( elecNode, 'i[@name=\'NELECT\']', float)
if bugLev >= 5: print 'numElectron: %d' % (resObj.numElectron,)
# icharg
resObj.icharg = getScalar(
elecNode,
'separator[@name=\'electronic startup\']/i[@name=\'ICHARG\']',
int)
if bugLev >= 5: print 'icharg: %g' % (resObj.icharg,)
# numSpin == ispin
resObj.numSpin = getScalar(
elecNode, 'separator[@name=\'electronic spin\']/i[@name=\'ISPIN\']', int)
if bugLev >= 5: print 'numSpin: %g' % (resObj.numSpin,)
if bugLev >= 5: print '\n===== ionic parameters =====\n'
# Some parameters like IBRION are also found in INCAR, sometimes.
# But apparently they are always in the parameters section.
lst = root.findall('parameters/separator[@name=\'ionic\']')
if len(lst) != 1: throwerr('ionic parameters not found')
ionNode = lst[0]
resObj.ibrion = getScalar( ionNode, 'i[@name=\'IBRION\']', int)
if bugLev >= 5: print 'ibrion: %g' % (resObj.ibrion,)
if bugLev >= 5: print '\n===== atom info =====\n'
# atomTypeMrr = map containing array. Example (some whitespace omitted):
# _dimLens: [2]
# _dimNames: ['type']
# _fieldNames: ['atomspertype' 'element' 'mass' 'valence' 'pseudopotential']
# _fieldTypes: ['i' 's' 'f' 'f' 's']
# atomspertype: [1 4]
# element: ['C ' 'Fe']
# mass: [ 12.011 55.847]
# valence: [ 4. 8.]
# pseudopotential: [' PAW_PBE C_s 06Sep2000 ' ' PAW_PBE Fe 06Sep2000 ']
atomTypeMrr = getArrayByPath(
bugLev, root, 'atominfo/array[@name=\'atomtypes\']')
resObj.typeNames = atomTypeMrr['element']
resObj.typeNums = atomTypeMrr['atomspertype']
resObj.typeMasses_amu = atomTypeMrr['mass']
resObj.typeValences = atomTypeMrr['valence']
resObj.typePseudos = atomTypeMrr['pseudopotential']
if bugLev >= 5:
print '\natomTypeMrr:'
printMrr( atomTypeMrr)
print '\nunsorted atomTypes:'
print 'typeNames: %s' % ( resObj.typeNames,)
print 'typeNums: %s' % ( resObj.typeNums,)
print 'typeMasses_amu: %s' % ( resObj.typeMasses_amu,)
print 'typeValences: %s' % ( resObj.typeValences,)
print 'typePseudos: %s' % ( resObj.typePseudos,)
# Sort parallel arrays typeNames, typeNums, etc,
# by typeNames alphabetic order,
# using an index sort with tpIxs.
# In rare cases like icsd_024360.cif/hs-ferro
# the names are out of order.
# Sort to set tpIxs[newIx] == oldIx.
ntype = len( resObj.typeNames)
tpIxs = range( ntype)
def sortFunc( ia, ib):
return cmp( resObj.typeNames[ia], resObj.typeNames[ib])
tpIxs.sort( sortFunc)
if bugLev >= 5:
print 'tpIxs: %s' % (tpIxs,)
resObj.typeNames = [resObj.typeNames[ix] for ix in tpIxs]
resObj.typeNums = [resObj.typeNums[ix] for ix in tpIxs]
resObj.typeMasses_amu = [resObj.typeMasses_amu[ix] for ix in tpIxs]
resObj.typeValences = [resObj.typeValences[ix] for ix in tpIxs]
resObj.typePseudos = [resObj.typePseudos[ix] for ix in tpIxs]
if bugLev >= 5:
print '\nsorted atomTypes:'
print 'typeNames: %s' % ( resObj.typeNames,)
print 'typeNums: %s' % ( resObj.typeNums,)
print 'typeMasses_amu: %s' % ( resObj.typeMasses_amu,)
print 'typeValences: %s' % ( resObj.typeValences,)
print 'typePseudos: %s' % ( resObj.typePseudos,)
# totalValence = sum( count[i] * valence[i])
# PyLada calls this valence.
resObj.totalValence = np.dot( resObj.typeNums, resObj.typeValences)
if bugLev >= 5: print 'totalValence: %g' % (resObj.totalValence,)
if resObj.numElectron != resObj.totalValence:
throwerr('%g == numElectron != totalValence == %g' \
% (resObj.numElectron, resObj.totalValence,))
# atomMrr = map containing array. Example:
# _dimLens: [5]
# _dimNames: ['ion']
# _fieldNames: ['element' 'atomtype']
# _fieldTypes: ['s' 'i']
# element: ['C ' 'Fe' 'Fe' 'Fe' 'Fe']
# atomtype: [1 2 2 2 2]
atomMrr = getArrayByPath(
bugLev, root, 'atominfo/array[@name=\'atoms\']')
atomNames = atomMrr['element']
atomTypes = [ix - 1 for ix in atomMrr['atomtype']] # change to origin 0
natom = len( atomTypes)
if bugLev >= 5:
print '\natomMrr:'
printMrr( atomMrr)
print '\nunsorted atoms:'
print 'atomNames: %s' % ( atomNames,)
print 'atomTypes: %s' % ( atomTypes,)
# The permutation array tpIxs maps tpIxs[newIx] = oldIx.
# Invert it to get tpIxInvs[oldIx] = newIx.
tpIxInvs = ntype * [0]
for ii in range( ntype):
tpIxInvs[ tpIxs[ii]] = ii
if bugLev >= 5:
print 'tpIxInvs: %s' % (tpIxInvs,)
# Sort atomNames, atomTypes by tpIxInvs[atomtype] so they
# are in the same order as typenames, typenums, etc, above.
# Currently atomType[i] = old index num into atomTypes.
# We want to sort by new index num into atomTypes.
atomIxs = range( natom)
def sortFunc( ia, ib):
return cmp( tpIxInvs[ atomTypes[ ia]], tpIxInvs[ atomTypes[ ib]])
atomIxs.sort( sortFunc)
atomNames = [atomNames[ix] for ix in atomIxs]
atomTypes = [tpIxInvs[ atomTypes[ix]] for ix in atomIxs]
if bugLev >= 5:
print '\natomIxs: %s' % (atomIxs,)
print '\nsorted atoms:'
print 'atomNames: %s' % ( atomNames,)
print 'atomTypes: %s' % ( atomTypes,)
resObj.atomNames = atomNames
resObj.atomTypes = atomTypes
resObj.atomMasses_amu = natom * [None]
resObj.atomValences = natom * [None]
resObj.atomPseudos = natom * [None]
for ii in range( natom):
ix = atomTypes[ii]
if resObj.atomNames[ii] != resObj.typeNames[ix]:
throwerr('name mismatch')
resObj.atomMasses_amu[ii] = resObj.typeMasses_amu[ix]
resObj.atomValences[ii] = resObj.typeValences[ix]
resObj.atomPseudos[ii] = resObj.typePseudos[ix]
if bugLev >= 5:
print 'atomNames: %s' % ( resObj.atomNames,)
print 'atomTypes: %s' % ( resObj.atomTypes,)
print 'atomMasses_amu: %s' % ( resObj.atomMasses_amu,)
print 'atomValences: %s' % ( resObj.atomValences,)
print 'atomPseudos: %s' % ( resObj.atomPseudos,)
# Make sure typenames are in alphabetic order
for ii in range(len(resObj.typeNames) - 1):
if resObj.typeNames[ii] > resObj.typeNames[ii+1]:
throwerr('typeNames not in order')
# Make sure atomnames are in alphabetic order
for ii in range(len(resObj.atomNames) - 1):
if resObj.atomNames[ii] > resObj.atomNames[ii+1]:
throwerr('atomNames not in order')
if bugLev >= 5: print '\n===== initial structure =====\n'
# Initial structure
# PyLada: vasp/extract/base.py: initial_structure()
# OUTCAR: uses the appended INITIAL STRUCTURE section.
lst = root.findall(
'structure[@name=\'initialpos\']/crystal/varray[@name=\'basis\']/v')
if bugLev >= 5: print 'len(lst) a:', len(lst)
# initial_structure
# POSCAR specifies each basis vector as one row.
# So does vasprun.xml.
# But PyLada's structure.cell is the transpose: each basis vec is a column.
resObj.initialBasisMat = getRawArray(
root,
'structure[@name=\'initialpos\']/crystal/varray[@name=\'basis\']/v',
3, 3, float)
resObj.initialRecipBasisMat = getRawArray(
root,
'structure[@name=\'initialpos\']/crystal/varray[@name=\'rec_basis\']/v',
3, 3, float)
resObj.initialFracPosMat = getRawArray(
root,
'structure[@name=\'initialpos\']/varray[@name=\'positions\']/v',
0, 3, float) # xxx nrow should be natom
resObj.initialCartPosMat = np.dot(
resObj.initialFracPosMat, resObj.initialBasisMat)
# xxx mult by scale factor?
if bugLev >= 5:
print 'initialBasisMat:\n%s' % (repr(resObj.initialBasisMat),)
print 'initialRecipBasisMat:\n%s' % (repr(resObj.initialRecipBasisMat),)
print 'initialFracPosMat:\n%s' % (repr(resObj.initialFracPosMat),)
print 'initialCartPosMat:\n%s' % (repr(resObj.initialCartPosMat),)
if bugLev >= 5: print '\n===== final structure =====\n'
# structure == final pos
# POSCAR and OUTCAR specify each basis vector as one row.
# So does vasprun.xml.
# But PyLada's structure.cell is the transpose: each basis vec is a column.
#
# In vasprun.xml and OUTCAR, the basis vectors are rows.
resObj.finalBasisMat = getRawArray(
root,
'structure[@name=\'finalpos\']/crystal/varray[@name=\'basis\']/v',
3, 3, float)
resObj.finalRecipBasisMat = getRawArray(
root,
'structure[@name=\'finalpos\']/crystal/varray[@name=\'rec_basis\']/v',
3, 3, float)
resObj.finalFracPosMat = getRawArray(
root,
'structure[@name=\'finalpos\']/varray[@name=\'positions\']/v',
0, 3, float) # xxx nrow should be natom
resObj.finalCartPosMat = np.dot(
resObj.finalFracPosMat, resObj.finalBasisMat)
# xxx mult by scale factor?
if bugLev >= 5:
print 'finalBasisMat:\n%s' % (repr(resObj.finalBasisMat),)
print 'finalRecipBasisMat:\n%s' % (repr(resObj.finalRecipBasisMat),)
print 'finalFracPosMat:\n%s' % (repr(resObj.finalFracPosMat),)
print 'finalCartPosMat:\n%s' % (repr(resObj.finalCartPosMat),)
if bugLev >= 5: print '\n===== kpoints =====\n'
# kpoint coordinates.
# Not in PyLada?
resObj.kpointFracMat = getRawArray(
root, 'kpoints/varray[@name=\'kpointlist\']/v',
0, 3, float)
resObj.numKpoint = resObj.kpointFracMat.shape[0]
resObj.kpointCartMat \
= np.dot( resObj.kpointFracMat, resObj.initialRecipBasisMat)
if bugLev >= 5:
print 'numKpoint: %g' % (resObj.numKpoint,)
print 'kpointFracMat:\n%s' % (repr(resObj.kpointFracMat),)
print 'kpointCartMat:\n%s' % (repr(resObj.kpointCartMat),)
# This is what PyLada calls multiplicity.
# The only diff is the scaling.
# sum( Pylada multiplicity) = numKpoint
# sum( our kpointWeights) = 1.0
resObj.kpointWeights = getRawArray(
root, 'kpoints/varray[@name=\'weights\']/v',
0, 1, float)
resObj.kpointWeights = resObj.kpointWeights[:,0] # Only 1 col in 2d array
if resObj.kpointWeights.shape[0] != resObj.numKpoint:
throwerr('numKpoint mismatch')
if bugLev >= 5:
print 'kpointWeights:\n%s' % (repr(resObj.kpointWeights),)
print 'kpointWeights sum: %g' % (sum(resObj.kpointWeights),)
if bugLev >= 5: print '\n===== final volume and density =====\n'
# volume, Angstrom^3
# The scale is hard coded as 1.0 in PyLada crystal/read.py,
# in both icsd_cif_a and icsd_cif_b.
volScale = 1.0
resObj.finalVolumeCalc_ang3 = abs( np.linalg.det(
volScale * resObj.finalBasisMat))
if bugLev >= 5:
print 'finalVolumeCalc_ang3: %g' % (resObj.finalVolumeCalc_ang3,)
resObj.finalVolume_ang3 = getScalar(
root, 'structure[@name=\'finalpos\']/crystal/i[@name=\'volume\']', float)
if bugLev >= 5:
print 'finalVolume_ang3: %g' % (resObj.finalVolume_ang3,)
# reciprocal space volume, * (2*pi)**3
# As in PyLada.
invMat = np.linalg.inv( volScale * resObj.finalBasisMat)
resObj.recipVolume = abs( np.linalg.det( invMat)) * (2 * np.pi)**3
if bugLev >= 5:
print 'recipVolume: origMat:\n%s' \
% (repr(volScale * resObj.finalBasisMat),)
print 'recipVolume: invMat:\n%s' % (repr(invMat),)
print 'recipVolume: det:\n%s' % (repr(np.linalg.det( invMat)),)
print 'recipVolume: %g' % (resObj.recipVolume,)
# Density
# xxx better: get atomic weights from periodic table
volCm = resObj.finalVolumeCalc_ang3 / (1.e8)**3 # 10**8 Angstrom per cm
totMass = np.dot( atomTypeMrr['atomspertype'], atomTypeMrr['mass'])
totMassGm = totMass * 1.660538921e-24 # 1.660538921e-24 g / amu
resObj.finalDensity_g_cm3 = totMassGm / volCm
if bugLev >= 5:
print 'volCm: %g' % (volCm,)
print 'totMassGm: %g' % (totMassGm,)
print 'finalDensity_g_cm3: %g' % (resObj.finalDensity_g_cm3,)
if bugLev >= 5: print '\n===== last calc forces =====\n'
resObj.finalForceMat_ev_ang = getRawArray(
root, 'calculation[last()]/varray[@name=\'forces\']/v',
0, 3, float)
if bugLev >= 5:
print 'finalForceMat_ev_ang:\n%s' % (repr(resObj.finalForceMat_ev_ang),)
# Get stress
resObj.finalStressMat_kbar = getRawArray(
root, 'calculation[last()]/varray[@name=\'stress\']/v',
3, 3, float)
if bugLev >= 5:
print 'finalStressMat_kbar:\n%s' % (repr(resObj.finalStressMat_kbar),)
# Calc pressure
# xxx Caution: we do not include the non-diag terms in:
# VASP: force.F: FORCE_AND_STRESS: line 1410:
# PRESS=(TSIF(1,1)+TSIF(2,2)+TSIF(3,3))/3._q &
# & -DYN%PSTRESS/(EVTOJ*1E22_q)*LATT_CUR%OMEGA
diag = [resObj.finalStressMat_kbar[ii][ii] for ii in range(3)]
resObj.finalPressure_kbar = sum( diag) / 3.0
if bugLev >= 5: print 'finalPressure_kbar: %g' % (resObj.finalPressure_kbar,)
if bugLev >= 5: print '\n===== eigenvalues and occupancies =====\n'
# PyLada: eigenvalues
eigenMrr = getArrayByPath(
bugLev, root, 'calculation[last()]/eigenvalues/array')
if bugLev >= 5:
print '\neigenMrr beg =====:'
printMrr( eigenMrr)
print '\neigenMrr end =====:'
for isp in range( resObj.numSpin):
print '\neigenMrr: eigene[isp=%d][0]\n%s' \
% (isp, repr(eigenMrr['eigene'][isp][0]),)
print '\neigenMrr: occ[isp=%d][0]\n%s' \
% (isp, repr(eigenMrr['occ'][isp][0]),)
shp = eigenMrr['_dimLens']
if shp[0] != resObj.numSpin: throwerr('numSpin mismatch')
if shp[1] != resObj.numKpoint: throwerr('numKpoint mismatch')
if shp[2] != prmNumBand: # see caution at prmNumBand, above
print('numBand mismatch: prm: %d shape: %d inFile: %s' \
% (prmNumBand, shp[2], inFile,))
resObj.numBand = shp[2]
resObj.eigenMat = eigenMrr['eigene']
# Caution: for non-magnetic (numSpin==1),
# OUTCAR has occupMat values = 2, while vasprun.xml has values = 1.
# For magnetic (numSpin==2), both OUTCAR and vasprun.xml have 1.
resObj.occupMat = eigenMrr['occ']
if resObj.numSpin == 1: resObj.occupMat *= 2
if bugLev >= 5:
print 'resObj.eigenMat.shape: ', resObj.eigenMat.shape
print 'resObj.occupMat.shape: ', resObj.occupMat.shape
# Compare projected and standard eigenvalues
getProjected = False
if getProjected:
for isp in range( resObj.numSpin):
projEigenMrr = getArrayByPath(
bugLev, root, 'calculation[last()]/projected/eigenvalues/array')
# eigs and projected eigs are identical
eigs = resObj.eigenMrr['eigene'][isp]
peigs = projEigenMrr['eigene'][isp]
if bugLev >= 5:
print 'Compare iegs, peigs for isp: %d' % (isp,)
print ' eigs.shape: ', eigs.shape
print ' peigs.shape: ', peigs.shape
print ' eigs[0,:]: ', eigs[0,:]
print ' peigs[0,:]: ', peigs[0,:]
print ' Diff projeigs - eigs: max maxabs: %g' \
% (max( map( max, abs(peigs - eigs))),)
# occs and projected occs are identical
occs = resObj.eigenMrr['occ'][isp]
poccs = projEigenMrr['occ'][isp]
if bugLev >= 5:
print 'Compare occs, poccs for isp: %d' % (isp,)
print ' occs.shape: ', occs.shape
print ' poccs.shape: ', poccs.shape
print ' occs[0,:]: ', occs[0,:]
print ' poccs[0,:]: ', poccs[0,:]
print ' Diff projoccs - occs: max maxabs: %g' \
% (max( map( max, abs(poccs - occs))),)
if bugLev >= 5: print '\n===== misc junk =====\n'
# PyLada: vasp/extract/base.py: is_gw()
resObj.isGw = False
if resObj.algo in ['gw', 'gw0', 'chi', 'scgw', 'scgw0']: resObj.isGw = True
if bugLev >= 5: print 'isGw: %s' % (resObj.isGw,)
# PyLada: vasp/extract/base.py: is_dft()
resObj.isDft = not resObj.isGw
if bugLev >= 5: print 'isDft: %s' % (resObj.isDft,)
# functional: comes from appended FUNCTIONAL.
# success: look for final section
# General timing and accounting informations for this job:
# xxx skip: Hubbard / NLEP
if bugLev >= 5: print '\n===== energy, efermi0 =====\n'
resObj.energyNoEntrp = getScalar(
root, 'calculation[last()]/energy/i[@name=\'e_wo_entrp\']', float)
# efermi0
# PyLada uses an algorithm to compare the sum of occupancies
# to the valence.
# We get it from the xml file here.
# PyLada: 5.8574
# XML: 5.93253
resObj.efermi0 = getScalar(
root, 'calculation[last()]/dos/i[@name=\'efermi\']', float)
if bugLev >= 5: print 'efermi0: %g' % (resObj.efermi0,)
if bugLev >= 5: print '\n===== cbMin, vbMax, bandgap =====\n'
# Find cbm = min of eigs > efermi0
# Find vbm = max of eigs <= efermi0
cbms = resObj.numSpin * [np.inf]
vbms = resObj.numSpin * [-np.inf]
cbmKpis = resObj.numSpin * [None]
vbmKpis = resObj.numSpin * [None]
for isp in range( resObj.numSpin):
eigs = resObj.eigenMat[isp]
for ikp in range( resObj.numKpoint):
for iband in range( resObj.numBand):
val = eigs[ikp][iband]
if val > resObj.efermi0:
cbms[isp] = min( cbms[isp], val)
cbmKpis[isp] = ikp
if val <= resObj.efermi0:
vbms[isp] = max( vbms[isp], val)
vbmKpis[isp] = ikp
cbms = map( float, cbms) # change type from numpy.float64 to float
vbms = map( float, vbms) # change type from numpy.float64 to float
resObj.cbms = cbms
resObj.vbms = vbms
resObj.cbmKpis = cbmKpis
resObj.vbmKpis = vbmKpis
resObj.cbMin = min( cbms) # This is PyLada's cbm
resObj.vbMax = max( vbms) # This is PyLada's vbm
resObj.bandgaps = [ (cbms[ii] - vbms[ii]) for ii in range( resObj.numSpin)]
resObj.bandgapa = min( resObj.bandgaps)
resObj.bandgap = resObj.cbMin - resObj.vbMax # This is PyLada version
if bugLev >= 5:
print 'cbmKpis: %s cbms: %s' % (cbmKpis, cbms,)
print 'vbmKpis: %s vbms: %s' % (vbmKpis, vbms,)
print 'cbMin: %g' % (resObj.cbMin,)
print 'vbMax: %g' % (resObj.vbMax,)
print 'bandgaps: %s' % (resObj.bandgaps,)
print 'bandgapa: %g' % (resObj.bandgapa,)
print 'bandgap: %g' % (resObj.bandgap,)
# xxx
# delta between cbmIndex, vbmIndex
# print kpoints coords. which is gamma, etc?
# is any of frasier med exp?
return
########################### End of parseXml ###############################
# The following code was used for initial testing,
# and who knows, someday may be useful again.
#print '\n'
#print '\ntesta:'
#lst = root.findall('kpoints/generation/v[@name=\'genvec2\']')
#amat = []
#for ele in lst:
# text = ele.text
# print ' ele.text: %s' % (text,)
# toks = text.split()
# vals = map( float, toks)
# amat.append( vals)
#print 'amat: %s' % (amat,)
#amat = np.array( amat, dtype=float)
#print 'amat:\n%s' % (amat,)
#vec = getVec( root, 'kpoints/generation/v[@name=\'genvec2\']', 0, 0, float)
#print 'vec:\n%s' % (vec,)
#amat = getRawArray( root, 'kpoints/generation/v', 0, 0, float)
#print 'amat:\n%s' % (amat,)
#calcNodes = root.findall('calculation')
#print '\nlen(calcNodes): %d' % (len(calcNodes,))
## pairs: (itot, en_wo_entrp) for the energy of each scstep
#scstep_withouts = []
## pairs: (itot, en_wo_entrp) for the last energy of each calculation step
#calcstep_withouts = []
#basisMats = []
#recipBasisMats = []
#posMats = []
#forceMats = []
#stressMats = []
#itot = 0 # index all scsteps, across calculations
#ncalc = len( calcNodes)
#for icalc in range( ncalc):
# cnode = calcNodes[icalc]
# forceMat = getRawArray( cnode, 'varray[@name=\'forces\']/v', 0, 0, float)
# print '\nforceMat for calcNodes[%d]:\n%s' % (icalc, forceMat,)
# scNodes = cnode.findall('scstep')
# print ' len(scNodes): %d' % (len(scNodes,))
# for isc in range(len(scNodes)):
# snode = scNodes[isc]
# sc_e_fr = getScalar( snode, 'energy/i[@name=\'e_fr_energy\']', float)
# sc_e_wo = getScalar( snode, 'energy/i[@name=\'e_wo_entrp\']', float)
# sc_e_0 = getScalar( snode, 'energy/i[@name=\'e_0_energy\']', float)
# print ' scNodes[%d]: sc_e_fr: %g sc_e_wo: %g sc_e_0: %g' \
# % (isc, sc_e_fr, sc_e_wo, sc_e_0,)
# scstep_withouts.append( (itot, sc_e_wo,))
# itot += 1
# # Structure for this calculation step
# strucNodes = cnode.findall('structure')
# if len(strucNodes) != 1: throwerr('calc structure not found')
# snode = strucNodes[0]
# basisMat = getRawArray(
# snode, 'crystal/varray[@name=\'basis\']/v', 3, 3, float)
# recipBasisMat = getRawArray(
# snode, 'crystal/varray[@name=\'rec_basis\']/v', 3, 3, float)
# # xxx should be nrow = num atoms
# posMat = getRawArray(
# snode, 'varray[@name=\'positions\']/v', 0, 3, float)
# print ' Calc final: basisMat:\n%s' % (basisMat,)
# print ' Calc final: recipBasisMat:\n%s' % (recipBasisMat,)
# print ' Calc final: posMat:\n%s' % (posMat,)
# basisMats.append( basisMat)
# recipBasisMats.append( recipBasisMat)
# posMats.append( posMat)
# # Forces for this calculation step
# forceNodes = cnode.findall('varray[@name=\'forces\']')
# if len(forceNodes) != 1: throwerr('calc forces not found')
# forceMat = getRawArray( forceNodes[0], 'v', 0, 3, float)
# print ' Calc final: forceMat:\n%s' % (forceMat,)
# forceMats.append( forceMat)
# # Stress for this calculation step
# stressNodes = cnode.findall('varray[@name=\'stress\']')
# if len(stressNodes) != 1: throwerr('calc stresses not found')
# stressMat = getRawArray( stressNodes[0], 'v', 3, 3, float)
# print ' Calc final: stressMat:\n%s' % (stressMat,)
# stressMats.append( stressMat)
# # Final energy for this calculation step
# enNodes = cnode.findall('energy')
# if len(enNodes) != 1: throwerr('calc energy not found')
# enode = enNodes[0]
# c_e_fr = getScalar( enode, 'i[@name=\'e_fr_energy\']', float)
# c_e_wo = getScalar( enode, 'i[@name=\'e_wo_entrp\']', float)
# c_e_0 = getScalar( enode, 'i[@name=\'e_0_energy\']', float)
# print ' Calc final: c_e_fr: %g c_e_wo: %g c_e_0: %g' \
# % (c_e_fr, c_e_wo, c_e_0,)
# calcstep_withouts.append( (itot - 1, c_e_wo,))
#print ''
#print 'scstep_withouts: %s' % (scstep_withouts,)
#print ''
#print 'calcstep_withouts: %s' % (calcstep_withouts,)
#scmat = np.array( scstep_withouts, dtype=float)
#print ''
#print 'scmat:\n%s' % (scmat,)
#calcmat = np.array( calcstep_withouts, dtype=float)
#print ''
#print 'calcmat:\n%s' % (calcmat,)
#print ''
#print 'Investigate DOS'
#icals = len(calcNodes) - 1
#cnode = calcNodes[icalc]
#setNodes = cnode.findall('dos/total/array/set/set[@comment=\'spin 1\']')
#print ' len(total setNodes): %d' % (len(setNodes),)
#print ' setNodes[0]: %s' % (setNodes[0],)
#if len(setNodes) != 1: throwerr('dos/total not found')
#dosTotalMat = getRawArray( setNodes[0], 'r', 0, 0, float)
#print ''
#print 'type(dosTotalMat): ', type(dosTotalMat)
#print 'dosTotalMat.shape: ', dosTotalMat.shape
#print ''
#print 'dosTotalMat:\n%s' % (dosTotalMat,)
#dosPartialMats = []
#partialSetNodes = cnode.findall('dos/partial/array/set')
#print ' len(partialSetNodes): %d' % (len(partialSetNodes),)
#if len(partialSetNodes) != 1: throwerr('dos/partial not found')
#partialSet = partialSetNodes[0]
#ionNodes = partialSet.findall('set')
#print ' len(ionNodes): %d' % (len(ionNodes),)
## xxx should be nrow = num atoms
#for ii in range(len(ionNodes)):
# dosPartialMat = getRawArray(
# ionNodes[ii], 'set[@comment=\'spin 1\']/r', 0, 0, float)
# print ''
# print 'dosPartialMat %d:' % (ii,)
# print 'type(dosPartialMat): ', type(dosPartialMat)
# print 'dosPartialMat.shape: ', dosPartialMat.shape
# print ''
# print 'dosPartialMat:\n%s' % (dosPartialMat,)
# dosPartialMats.append( dosPartialMat)
#print 'len(dosPartialMats): %d' % (len(dosPartialMats),)
#print '\nbasisMats: len: %d' % (len(basisMats),)
#for mat in basisMats: print '%s' % (mat,)
#print '\nrecipBasisMats: len: %d' % (len(recipBasisMats),)
#for mat in recipBasisMats: print '%s' % (mat,)
#print '\nposMats: len: %d' % (len(posMats),)
#for mat in posMats: print '%s' % (mat,)
#print '\nforceMats: len: %d' % (len(forceMats),)
#for mat in forceMats: print '%s' % (mat,)
#print '\nstressMats: len: %d' % (len(stressMats),)
#for mat in stressMats: print '%s' % (mat,)
#basisDeltas = calcMatDeltas( basisMats)
#recipBasisDeltas = calcMatDeltas( recipBasisMats)
#posDeltas = calcMatDeltas( posMats)
#forceDeltas = calcMatDeltas( forceMats)
#stressDeltas = calcMatDeltas( stressMats)
#print 'basisDeltas: %s' % ( basisDeltas,)
#print 'recipBasisDeltas: %s' % ( recipBasisDeltas,)
#print 'posDeltas: %s' % ( posDeltas,)
#print 'forceDeltas: %s' % ( forceDeltas,)
#print 'stressDeltas: %s' % ( stressDeltas,)
#import matplotlib
#matplotlib.use('tkagg')
#import matplotlib.pyplot as plt
#fig, axes = plt.subplots( 1, 1)
###ax00 = axes[0,0]
#ax00 = axes
#ax00.plot( dosTotalMat[:,0], dosTotalMat[:,1], color='r', linestyle='-',
# marker=None)
#ax00.set_xlabel('Energy, eV')
#ax00.set_ylabel('Number of states per unit cell')
#ax00.set_title('Density of states')
#ax00.xaxis.grid(color='lightblue', linestyle='solid')
#ax00.yaxis.grid(color='lightblue', linestyle='solid')
##plt.show()
##fig, ax = plt.subplots()
##
##ax.plot( scmat[:,0], scmat[:,1], 'b-')
##ax.plot( calcmat[:,0], calcmat[:,1], 'bo')
##ax.set_ylim( calcmat[-1,1] - 5, calcmat[-1,1] + 5)
##ax.xaxis.grid(color='lightblue', linestyle='solid')
##ax.yaxis.grid(color='lightblue', linestyle='solid')
##
##savefig('tempa.png', dpi=100, orientation='landscape', papertype='letter')
##
##plt.show()
#tnodes = root.findall('calculation[last()]')
#printNode( tnodes[0], 0, 1)
#tnodes = root.findall('calculation[last()]/eigenvalues')
#printNode( tnodes[0], 0, 1)
#tnodes = root.findall('calculation[last()]/eigenvalues/array')
#printNode( tnodes[0], 0, 1)
#res = getArrayByPath(
# bugLev, root, 'calculation[last()]/eigenvalues/array')
#print '\ncalculation[last()]/eigenvalues:\n%s' % (res,)
#print '\n'
#====================================================================
def printNode( node, curLev, maxLev):
'''
Recursively prints an XML tree, given an xml.etree.cElementTree node.
**Parameters**:
* node (xml.etree.ElementTree.Element): The root of the XML tree.
* curLev (int): The current recursion level. Starts at 0 and
is incremented for each recursive call.
* maxLev (int): The max number of levels to print
**Returns**:
* None
'''
if curLev <= maxLev:
if node.tail == None: tail = 'None'
else: tail = '"%s"' % (node.tail.strip(),)
if node.text == None: text = 'None'
else: text = '"%s"' % (node.text.strip(),)
print '%stag: %s attrib: %s tail: %s text: %s' \
% (curLev * ' ', node.tag, node.attrib, tail, text,)
for kid in node:
printNode( kid, curLev + 1, maxLev)
#====================================================================
def parseText( path, nmin, nmax, dtype, text):
'''
Splits ``text`` into tokens, and converts each token to ``dtype``.
Called by getVec, getRawArray.
**Parameters**:
* path (str): the XML tree path to the current node, for error msgs.
* nmin (int): the minimum num tokens. If fewer are found, throwerr.
* nmax (int): the maximum num tokens. If more are found, throwerr.
* dtype (python type): Either int, float, or str: the tokens
are converted to dtype.
* text (str): the text string to be split.
**Returns**:
* list of tokens each having type = dtype.
'''
toks = text.split()
ntok = len( toks)
if ntok < nmin:
throwerr('ntok < nmin for path: "%s" text: "%s"' % (path, text,))
if nmax > 0 and ntok > nmax:
throwerr('ntok > nmax for path: "%s" text: "%s"' % (path, text,))
vals = ntok * [None]
for ii in range(ntok):
tok = toks[ii]
if dtype == int:
try: val = int( tok)
except ValueError, exc:
throwerr('invalid int in path: "%s" text: "%s"' % (path, text,))
elif dtype == float:
try: val = float( tok)
except ValueError, exc:
throwerr('invalid float in path: "%s" text: "%s"' % (path, text,))
elif dtype == str: val = tok
else: throwerr('unknown dtype for path: "%s"' % (path,))
vals[ii] = val
return vals
#====================================================================
def getVec( root, path, nmin, nmax, dtype):
'''
Gets text at the specified XML path, splits, and converts tokens ``dtype``.
**Parameters**:
* root (xml.etree.ElementTree.Element): The current XML node.
* path (str): the XML path from the current node.
* nmin (int): the minimum num tokens. If fewer are found, throwerr.
* nmax (int): the maximum num tokens. If more are found, throwerr.
* dtype (python type): Either int, float, or str: the tokens
are converted to dtype.
**Returns**:
* list of tokens each having type = dtype.
'''
text = getString( root, path)
vals = parseText( path, nmin, nmax, dtype, text)
return vals
#====================================================================
# Return stripped string
def getString( root, path):
'''
Gets text at the specified XML path, insures there's just 1, and returns it.
**Parameters**:
* root (xml.etree.ElementTree.Element): The current XML node.
* path (str): the XML path from the current node.
**Returns**:
* stripped string.
'''
lst = root.findall( path)
if len(lst) == 0:
throwerr('path not found: "%s"' % (path,))
if len(lst) > 1:
throwerr('multiple matches for path: "%s"' % (path,))
ele = lst[0]
text = ele.text
return text.strip()
#====================================================================
def getScalar( root, path, dtype):
'''
Gets text at the specified XML path, and converts it to ``dtype``.
**Parameters**:
* root (xml.etree.ElementTree.Element): The current XML node.
* path (str): the XML path from the current node.
* dtype (python type): Either int, float, or str: the token
is converted to dtype.
**Returns**:
* item having type = dtype.
'''
lst = getVec( root, path, 1, 1, dtype)
return lst[0]
#====================================================================
def getRawArray( root, path, nrow, ncol, dtype):
'''
Gets text at the specified XML path, and converts to a
2D numpy array of ``dtype``.
The text must be organized as one text element per row.
**Parameters**:
* root (xml.etree.ElementTree.Element): The current XML node.
* path (str): the XML path from the current node.
* nrow (int): the number of rows. If 0, allow any number.
* ncol (int): the number of columns. If 0, allow any number.
* dtype (python type): Either int, float, or str: the tokens
are converted to dtype.
**Returns**:
* A regular 2-dimensional numpy array of dtype.
'''
lst = root.findall( path)
nlst = len( lst)
if nlst == 0: throwerr('path not found: "%s"' % (path,))
if nrow > 0 and nlst != nrow:
throwerr('nrow mismatch for path: "%s". expected: %d found: %d' \
% (path, nrow, nlst,))
rows = []
for ii in range(nlst):
ele = lst[ii]
text = ele.text
vals = parseText( path, 0, 0, dtype, text)
if len(rows) == 0: ncolActual = len( vals)
if len(vals) != ncolActual:
throwerr('irregular array for path: "%s"' % (path,))
if ncol > 0 and ncolActual != ncol:
throwerr('ncol mismatch path: "%s"' % (path,))
rows.append( vals)
if dtype == int:
amat = np.array( rows, dtype=int)
elif dtype == float:
amat = np.array( rows, dtype=float)
else: throwerr('unknown dtype for path: "%s"' % (path,))
return amat
#====================================================================
def getArrayByPath( bugLev, baseNode, path):
'''
Converts an XML ``<array>`` element in vasprun.xml
to a map with an array.
See :func:`getArrayByNode` for details.
**Parameters**:
* bugLev (int): Debug level. Normally 0.
* baseNode (xml.etree.ElementTree.Element): current XML node
* path (str): XML path from baseNode for the ``<array>`` element.
**Returns**:
* A Python array
'''
arrNodes = baseNode.findall( path)
if len(arrNodes) != 1: throwerr('path not found')
res = getArrayByNode( bugLev, arrNodes[0])
return res
#====================================================================
# Returns Mrr == map containing array, like:
# atomMrr:
# _dimLens: [6]
# _dimNames: ['ion']
# _fieldNames: ['element', 'atomtype']
# _fieldTypes: ['s', 'i']
# element: ['Mo' 'Mo' 'S' 'S' 'S' 'S']
# atomtype: [1 1 2 2 2 2]
def getArrayByNode( bugLev, arrNode):
'''
Converts an XML ``<array>`` element in vasprun.xml
to a map with an array.
Calls getArraySub to extract each field.
The output Python map has the following structure:
============= ========================================================
key value
============= ========================================================
_dimLens numpy vec of dimension lengths.
len( dimLens) == n == numDimensions.
_dimNames numpy vec of dimension names.
len( dimLens) == n == numDimensions.
_fieldNames numpy vec of field names in the parallel arrays.
len( fieldNames) == numVariables.
_fieldTypes numpy vec of field types in the parallel arrays.
len( fieldTypes) == numVariables.
The types are: 'i': int, 'f': float, 's': str
<fieldName> numpy n-dimensional array of the field <fieldName>
<fieldName> numpy n-dimensional array of the field <fieldName>
<fieldName> numpy n-dimensional array of the field <fieldName>
...
============= ========================================================
Example XML for a 1-dimensional array with 2 fields: ::
<array name="atoms" >
<dimension dim="1">ion</dimension>
<field type="string">element</field>
<field type="int">atomtype</field>
<set>
<rc><c>C </c><c> 1</c></rc>
<rc><c>Fe</c><c> 2</c></rc>
<rc><c>Fe</c><c> 2</c></rc>
<rc><c>Fe</c><c> 2</c></rc>
<rc><c>Fe</c><c> 2</c></rc>
</set>
</array>
Example resulting map: ::
_dimLens: [5]
_dimNames: ['ion']
_fieldNames: ['element' 'atomtype']
_fieldTypes: ['s' 'i']
element: ['C' 'Fe' 'Fe' 'Fe' 'Fe']
atomtype: [1 2 2 2 2]
Multiple dimension arrays also are supported.
The vasprun.xml handling of dimensions is unusual.
What they claim is ``dim="1"`` actually is the least
significant dimension and varies fastest, both
in the XML data and in our resulting Python array.
So the XML ``<dimension dim="1">band</dimension>``
becomes the last dimension in the resulting Python array.
Example XML for a 3 dimensional array with 2 fields: ::
<array>
<dimension dim="1">band</dimension>
<dimension dim="2">kpoint</dimension>
<dimension dim="3">spin</dimension>
<field>eigene</field>
<field>occ</field>
<set>
<set comment="spin 1">
<set comment="kpoint 1">
<r> -6.5058 1.0000 </r>
<r> 0.2537 1.0000 </r>
<r> 0.7101 1.0000 </r>
...
<r> 8.1390 0.0000 </r>
</set>
<set comment="kpoint 2">
<r> -6.3718 1.0000 </r>
<r> -0.0841 1.0000 </r>
<r> 0.7508 1.0000 </r>
...
</set>
<set comment="kpoint 101">
<r> -5.8567 1.0000 </r>
<r> -0.0854 1.0000 </r>
<r> 0.9602 1.0000 </r>
<r> 7.7174 0.0000 </r>
<r> 7.8556 0.0000 </r>
</set>
</set>
</set>
</array>
Example resulting map: ::
_dimLens: [ 1 101 22]
_dimNames: ['spin' 'kpoint' 'band']
_fieldNames: ['eigene' 'occ']
_fieldTypes: ['f' 'f']
eigene: [[[-6.5058 0.2537 0.7101 ..., 7.6096 7.8817 8.139 ]
[-6.3718 -0.0841 0.7508 ..., 7.481 7.8491 7.9595]
[-6.1332 -0.611 1.0672 ..., 7.0857 7.8655 7.9314]
...,
[-5.8462 0.3687 0.9498 ..., 7.1721 7.4739 7.6631]
[-5.8016 0.5503 0.5886 ..., 7.4113 7.5794 7.7332]
[-5.8567 -0.0854 0.9602 ..., 7.2729 7.7174 7.8556]]]
occ: [[[ 1. 1. 1. ..., 0. 0. 0. ]
[ 1. 1. 1. ..., 0. 0. 0. ]
[ 1. 1. 1. ..., 1. 0. 0. ]
...,
[ 1. 1. 1. ..., 1. 0. 0. ]
[ 1. 1. 1. ..., 0. 0. 0. ]
[ 1. 1. 1. ..., 0.9751 0. 0. ]]]
**Parameters**:
* bugLev (int): Debug level. Normally 0.
* node (xml.etree.ElementTree.Element):
The XML node for the ``<array>`` element.
**Returns**:
* A Python array
'''
dimNodes = arrNode.findall('dimension')
ndim = len( dimNodes)
if ndim == 0: throwerr('no dimensions found')
dimNames = [nd.text for nd in dimNodes]
dimNames.reverse() # dimNames are in reverse order in XML
dimNames = np.array( dimNames, dtype=str)
dimLens = np.zeros( [ndim], dtype=int)
fieldNodes = arrNode.findall('field')
nfield = len( fieldNodes)
if nfield == 0: throwerr('no fields found')
fieldNames = [nd.text for nd in fieldNodes]
fieldNames = np.array( fieldNames, dtype=str)
# We set fieldTypes[ifield] to max( all found types for ifield)
# Types are: 0:int, 1:float, 2:string
fieldTypes = nfield * [0]
setNodes = arrNode.findall('set')
if len(setNodes) != 1: throwerr('wrong len for primary set')
setNode = setNodes[0]
resList = nfield * [None]
for ifield in range( nfield):
amat = getArraySub(
bugLev,
setNode,
ifield,
fieldTypes,
0, # idim
dimLens)
# Convert all elements of each field ifield to fieldTypes[ifield].
if fieldTypes[ifield] == 0: amat = np.array( amat, dtype=int)
elif fieldTypes[ifield] == 1: amat = np.array( amat, dtype=float)
elif fieldTypes[ifield] == 2: amat = np.array( amat, dtype=str)
else: throwerr('unknown fieldType')
resList[ifield] = amat
# Convert fieldTypes from 0,1,2 to 'i', 'f', 's'
fldMap = { 0:'i', 1:'f', 2:'s'}
fieldTypeStgs = map( lambda x: fldMap[x], fieldTypes)
fieldTypeStgs = np.array( fieldTypeStgs, dtype=str)
resMap = {
'_dimNames': dimNames,
'_dimLens': dimLens,
'_fieldNames': fieldNames,
'_fieldTypes': fieldTypeStgs,
}
for ii in range(len(fieldNames)):
ar = resList[ii]
if not all(ar.shape == np.array(dimLens)): throwerr('dimLens mismatch')
resMap[fieldNames[ii]] = ar
return resMap
#====================================================================
def getArraySub(
bugLev,
setNode,
ifield,
fieldTypes,
idim,
dimLens):
'''
Decodes the XML for one field (one variable) for an
``<array>``.
Called by getArrayByNode. See :func:`getArrayByNode` for details.
**Parameters**:
* bugLev (int): Debug level. Normally 0.
* setNode (xml.etree.ElementTree.Element): the element for ``<set>``.
* ifield (int): the index number of the field.
* fieldTypes (int[]): the numeric field types so far.
The numeric types are: 0: int, 1: float, 2: str.
We take the max of the field types.
* tp (Python type): The desired type.
* idim (int): dimension number == recursion level == array nest level.
0 on the first call, 1 for the next level array, etc.
* dimLens (int[]): list of dimension lengths. Updated.
**Returns**:
* A Python array with elements of type str.
The caller converts them to the correct type.
'''
nfield = len(fieldTypes)
ndim = len(dimLens)
# If we're at the last dimension, decode the element values.
if idim == ndim - 1:
# Try long form:
# <set>
# <rc>
# <c>2</c>
# <c>Mo</c>
rcNodes = setNode.findall('rc') # long form: <rc> <c>
# Try short form:
# <set comment='spin 1'>
# <set comment='kpoint 1'>
# <r>-30.3711 1.0000</r>
# <r>-30.3709 1.0000</r>
rNodes = setNode.findall('r') # short form: <r>
nval = max( len( rcNodes), len( rNodes))
if dimLens[idim] == 0: dimLens[idim] = nval
if nval != dimLens[idim]: throwerr('irregular array')
resVec = nval * [None]
if len(rcNodes) > 0: # long form: <rc> <c>
for ival in range( nval):
cNodes = rcNodes[ival].findall('c')
if len(cNodes) != nfield: throwerr('wrong num fields')
stg = cNodes[ifield].text
resVec[ival] = stg
elif len(rNodes) > 0: # short form: <r>
for ival in range( nval):
txt = rNodes[ival].text
toks = txt.split()
if len(toks) != nfield: throwerr('wrong num fields')
resVec[ival] = toks[ifield]
else: throwerr('unknown array structure')
# Strip all strings.
# Set fieldTypes[ifield] to max( current type, all found types)
# Types are: 0:int, 1:float, 2:string
for ival in range( nval):
resVec[ival] = resVec[ival].strip()
stg = resVec[ival]
ftype = 2 # assume worst case: string
try:
float( stg)
ftype = 1
except ValueError: pass
try:
int( stg)
ftype = 0
except ValueError: pass
fieldTypes[ifield] = max( fieldTypes[ifield], ftype)
else: # else idim < ndim - 1. Recursion.
setNodes = setNode.findall('set')
nset = len( setNodes)
if dimLens[idim] == 0: dimLens[idim] = nset
if nset != dimLens[idim]: throwerr('irregular array')
resVec = nset * [None]
for iset in range(nset):
resVec[iset] = getArraySub( # recursion
bugLev,
setNodes[iset],
ifield,
fieldTypes,
idim + 1,
dimLens)
return resVec
#====================================================================
# Not used
def convertTypesUnused( tp, vec):
'''
Recursively converts the elements of an array ``vec``
from str to the specified type.
**Parameters**:
* tp (Python type): The desired type.
* vec (str[] or str[][] or ...): the array to be converted.
**Returns**:
* A Python array with elements of type ``tp``.
'''
if isinstance( vec[0], str):
for ii in range(len(vec)):
vec[ii] = tp( vec[ii])
elif isinstance( vec[0], list):
for subVec in vec:
convertTypes( tp, subVec) # recursion
else: throwerr('unknown array structure')
#====================================================================
def maxAbsDiff( mata, matb):
'''
Returns the max abs diff between two 2D numpy matrices.
**Parameters**:
* mata (numpy 2D array): Array to be compared.
* matb (numpy 2D array): Array to be compared.
**Returns**:
* float scalar: max_i( max_j( abs( mata[i][j] - matb[i][j]))
'''
(nrow,ncol) = mata.shape
if matb.shape != mata.shape: throwerr('maxAbsDiff: shape mismatch')
diffMat = abs( matb - mata)
res = max( map( max, diffMat))
return res
#====================================================================
def calcMatDeltas( mats):
'''
Returns the max abs diffs between adjacent pairs of a
list of 2D numpy matrices.
**Parameters**:
* mats (list of 2D numpy matrices)
**Returns**:
* deltas (float[]): deltas[k] = maxAbsDiff( mats[k-1], mats[k])
'''
nmat = len( mats)
deltas = []
for ii in range( 1, nmat):
delta = maxAbsDiff( mats[ii-1], mats[ii])
deltas.append( delta)
return deltas
#====================================================================
def printMrr( vmap):
'''
Prints the Mrr map returned by getArrayByPath or getArrayByNode.
**Parameters**:
* vmap (map): the MRR map
**Returns**:
* None
'''
keys = vmap.keys()
keys.sort()
for key in keys:
if key.startswith('_'):
val = vmap[key]
print ' %s: %s' % (key, val,)
for key in vmap['_fieldNames']:
val = vmap[key]
print ' %s: %s' % (key, val,)
print ''
#====================================================================
def throwerr( msg):
'''
Prints an error message and raises Exception.
**Parameters**:
* msg (str): Error message.
**Returns**
* (Never returns)
**Raises**
* Exception
'''
print msg
print >> sys.stderr, msg
raise Exception( msg)
#====================================================================
if __name__ == '__main__': main()
#====================================================================
|
ssullivangh/nrelmat
|
nrelmat/ScanXml.py
|
Python
|
gpl-3.0
| 52,847
|
#! /usr/bin/python
import sys
sys.path.append('../')
from toolbox.hreaders import token_readers as reader
from toolbox.hmappers import simple_mapper as mapper
_map = mapper.Simple_mapper(1,[0,3])
_reader = reader.Token_reader()
for line in sys.stdin:
words = _reader.read_all(line)
print '{}\t{}'.format(_map.get_key(words), _map.get_value(words))
|
xavi783/u-tad
|
Modulo4/ejercicio3/mapper.py
|
Python
|
gpl-3.0
| 358
|
from __future__ import print_function
import datetime
import errno
import os
import sys
import core
from core import logger, nzbToMediaDB
from core.nzbToMediaUtil import extractFiles, CharReplace, convert_to_ascii, get_nzoid
from core.nzbToMediaUtil import flatten, listMediaFiles, import_subs
from core.transcoder import transcoder
from libs.six import text_type
class Filechecker(object):
def checkfiles(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None):
cfg = dict(core.CFG[section][inputCategory])
# auto-detect correct fork
#fork, fork_params = autoFork(section, inputCategory)
nzbExtractionBy = cfg.get("nzbExtractionBy", "Downloader")
status = int(failed)
if status > 0 and core.NOEXTRACTFAILED:
extract = 0
else:
extract = int(cfg.get("extract", 0))
if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name.
dirName = os.path.split(os.path.normpath(dirName))[0]
SpecificPath = os.path.join(dirName, str(inputName))
cleanName = os.path.splitext(SpecificPath)
if cleanName[1] == ".nzb":
SpecificPath = cleanName[0]
if os.path.isdir(SpecificPath):
dirName = SpecificPath
# Attempt to create the directory if it doesn't exist and ignore any
# error stating that it already exists. This fixes a bug where SickRage
# won't process the directory because it doesn't exist.
try:
os.makedirs(dirName) # Attempt to create the directory
except OSError as e:
# Re-raise the error if it wasn't about the directory not existing
if e.errno != errno.EEXIST:
raise
#if 'process_method' not in fork_params or (clientAgent in ['nzbget', 'sabnzbd'] and nzbExtractionBy != "Destination"):
#if inputName:
# process_all_exceptions(inputName, dirName)
# inputName, dirName = convert_to_ascii(inputName, dirName)
# Now check if tv files exist in destination.
if not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False):
if listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract:
logger.debug('Checking for archives to extract in directory: {0}'.format(dirName))
core.extractFiles(dirName)
inputName, dirName = convert_to_ascii(inputName, dirName)
if listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed.
flatten(dirName)
# Check video files for corruption
good_files = 0
num_files = 0
for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False):
num_files += 1
if transcoder.isVideoGood(video, status):
good_files += 1
import_subs(video)
if num_files > 0:
if good_files == num_files and not status == 0:
logger.info('Found Valid Videos. Setting status Success')
status = 0
failed = 0
if good_files < num_files and status == 0:
logger.info('Found corrupt videos. Setting status Failed')
status = 1
failed = 1
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD')
if failureLink:
failureLink += '&corrupt=true'
elif nzbExtractionBy == "Destination":
logger.info("Check for media files ignored because nzbExtractionBy is set to Destination.")
if int(failed) == 0:
logger.info("Setting Status Success.")
status = 0
failed = 0
else:
logger.info("Downloader reported an error during download or verification. Processing this as a failed download.")
status = 1
failed = 1
else:
logger.warning("No media files found in directory {0}. Processing this as a failed download".format(dirName), section)
status = 1
failed = 1
return status
#definitions
# post-processing
def startproc(inputDirectory, inputName=None, status=0, clientAgent='manual', download_id=None, inputCategory=None, failureLink=None):
if core.SAFE_MODE and inputDirectory == core.NZB_DEFAULTDIR:
logger.error(
'The input directory:[{0}] is the Default Download Directory. Please configure category directories to prevent processing of other media.'.format(
inputDirectory))
return [-1, ""]
if not download_id and clientAgent == 'sabnzbd':
download_id = get_nzoid(inputName)
if clientAgent != 'manual' and not core.DOWNLOADINFO:
logger.debug('Adding NZB download info for directory {0} to database'.format(inputDirectory))
myDB = nzbToMediaDB.DBConnection()
inputDirectory1 = inputDirectory
inputName1 = inputName
try:
encoded, inputDirectory1 = CharReplace(inputDirectory)
encoded, inputName1 = CharReplace(inputName)
except:
pass
controlValueDict = {"input_directory": text_type(inputDirectory1)}
newValueDict = {"input_name": text_type(inputName1),
"input_hash": text_type(download_id),
"input_id": text_type(download_id),
"client_agent": text_type(clientAgent),
"status": 0,
"last_update": datetime.date.today().toordinal()
}
myDB.upsert("downloads", newValueDict, controlValueDict)
# auto-detect section
if inputCategory is None:
inputCategory = 'UNCAT'
usercat = inputCategory
section = core.CFG.findsection(inputCategory).isenabled()
if section is None:
section = core.CFG.findsection("ALL").isenabled()
if section is None:
logger.error(
'Category:[{0}] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.'.format(
inputCategory))
return [-1, ""]
else:
usercat = "ALL"
if len(section) > 1:
logger.error(
'Category:[{0}] is not unique, {1} are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.'.format(
inputCategory, section.keys()))
return [-1, ""]
if section:
sectionName = section.keys()[0]
logger.info('Auto-detected SECTION:{0}'.format(sectionName))
else:
logger.error(
"Unable to locate a section with subsection:{0} enabled in your autoProcessMedia.cfg, exiting!".format(
inputCategory))
return [-1, ""]
cfg = dict(core.CFG[sectionName][usercat])
extract = int(cfg.get("extract", 0))
try:
if int(cfg.get("remote_path")) and not core.REMOTEPATHS:
logger.error(
'Remote Path is enabled for {0}:{1} but no Network mount points are defined. Please check your autoProcessMedia.cfg, exiting!'.format(
sectionName, inputCategory))
return [-1, ""]
except:
logger.error(
'Remote Path {0} is not valid for {1}:{2} Please set this to either 0 to disable or 1 to enable!'.format(
core.get("remote_path"), sectionName, inputCategory))
inputName, inputDirectory = convert_to_ascii(inputName, inputDirectory)
if extract == 1:
logger.debug('Checking for archives to extract in directory: {0}'.format(inputDirectory))
extractFiles(inputDirectory)
logger.info("Calling Checker to check the Files and Convert them to mp4")
result = Filechecker().checkfiles(sectionName, inputDirectory, inputName, status, clientAgent, download_id, inputCategory, failureLink)
return result
def main():
#START
# Initialize the config
section = None
args = sys.argv
core.initialize(section)
logger.info("#########################################################")
logger.info("## ..::[{0}]::.. ##".format(os.path.basename(__file__)))
logger.info("#########################################################")
# debug command line options
logger.debug("Options passed into nzbToMedia: {0}".format(args))
# Post-Processing Result
result = [0, ""]
status = 0
#set arguments to variable args, because this is used as a default variable later
#Argumente von Sabnzbd aufteilen
# SABnzbd Pre 0.7.17
if len(args) == core.SABNZB_NO_OF_ARGUMENTS:
# SABnzbd argv:
# 1 The final directory of the job (full path)
# 2 The original name of the NZB file
# 3 Clean version of the job name (no path info and ".nzb" removed)
# 4 Indexer's report number (if supported)
# 5 User-defined category
# 6 Group that the NZB was posted in e.g. alt.binaries.x
# 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2
clientAgent = 'sabnzbd'
logger.info("Script triggered from SABnzbd")
result = startproc(args[1], inputName=args[2], status=args[7], inputCategory=args[5], clientAgent=clientAgent,
download_id='')
# SABnzbd 0.7.17+
elif len(args) >= core.SABNZB_0717_NO_OF_ARGUMENTS:
# SABnzbd argv:
# 1 The final directory of the job (full path)
# 2 The original name of the NZB file
# 3 Clean version of the job name (no path info and ".nzb" removed)
# 4 Indexer's report number (if supported)
# 5 User-defined category
# 6 Group that the NZB was posted in e.g. alt.binaries.x
# 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2
# 8 Failure URL
clientAgent = 'sabnzbd'
logger.info("Script triggered from SABnzbd 0.7.17+")
result = startproc(args[1], inputName=args[2], status=args[7], inputCategory=args[5], clientAgent=clientAgent, download_id='', failureLink=''.join(args[8:]))
##########here
sys.exit(result)
if __name__ == '__main__':
exit(main())
|
Filechaser/nzbToMedia
|
checkfilesinfolder.py
|
Python
|
gpl-3.0
| 10,729
|
# -*- encoding: utf-8 -*-
# pylint: disable=no-self-use
"""Test class for Organization CLI"""
import random
from fauxfactory import gen_string
from random import randint
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.factory import (
make_domain, make_hostgroup, make_lifecycle_environment,
make_medium, make_org, make_proxy, make_subnet, make_template, make_user,
make_compute_resource,)
from robottelo.cli.lifecycleenvironment import LifecycleEnvironment
from robottelo.cli.org import Org
from robottelo.config import settings
from robottelo.constants import FOREMAN_PROVIDERS
from robottelo.decorators import (
run_only_on,
skip_if_bug_open,
stubbed,
tier1,
tier2,
)
from robottelo.test import CLITestCase
def valid_names():
"""Random data for positive creation
Note: The maximum allowed length of org name is 242 only. This is an
intended behavior (Also note that 255 is the standard across other
entities.)
"""
return (
{'name': gen_string("latin1")},
{'name': gen_string("utf8")},
{'name': gen_string("alpha", randint(1, 242))},
{'name': gen_string("alphanumeric", randint(1, 242))},
{'name': gen_string("numeric", randint(1, 242))},
{'name': gen_string("html")},
)
def valid_name_label_combo():
"""Random simpler data for positive creation
Use this when name and label must match. Labels cannot contain the same
data type as names, so this is a bit limited compared to other tests.
Label cannot contain characters other than ascii alpha numerals, '_', '-'.
"""
return (
{'name': gen_string("alpha")},
{'name': gen_string("alphanumeric")},
{'name': gen_string("numeric")},
{'name': '{0}-{1}'.format(gen_string("alpha", 5),
gen_string("alpha", 5))},
{'name': '{0}_{1}'.format(gen_string("alpha", 5),
gen_string("alpha", 5))},
)
def valid_names_simple():
"""Random data for alpha, numeric and alphanumeric
Note: The maximum allowed length of org name is 242 only. This is an
intended behavior (Also note that 255 is the standard across other
entities.)
"""
return(
gen_string('alpha', randint(1, 242)),
gen_string('numeric', randint(1, 242)),
gen_string('alphanumeric', randint(1, 242))
)
def valid_names_simple_all():
"""Random data for alpha, numeric and alphanumeric
Note: The maximum allowed length of org name is 242 only. This is an
intended behavior (Also note that 255 is the standard across other
entities.)
"""
return(
gen_string('alpha', randint(1, 242)),
gen_string('alphanumeric', randint(1, 242)),
gen_string('numeric', randint(1, 242)),
gen_string('latin1', 15),
gen_string('utf8', 15),
)
def valid_name_label():
"""Random data for Label tests
Label cannot contain characters other than ascii alpha numerals, '_', '-'.
Note: The maximum allowed length of org name is 242 only. This is an
intended behavior (Also note that 255 is the standard across other
entities.)
"""
return (
{'name': gen_string("latin1"),
'label': gen_string("alpha")},
{'name': gen_string("utf8"),
'label': gen_string("alpha")},
{'name': gen_string("alpha", randint(1, 242)),
'label': gen_string("alpha")},
{'name': gen_string("alphanumeric", randint(1, 242)),
'label': gen_string("alpha")},
{'name': gen_string("numeric", randint(1, 242)),
'label': gen_string("alpha")},
{'name': gen_string("html"),
'label': gen_string("alpha")},
)
def valid_name_desc():
"""Random data for Descriptions tests
Note: The maximum allowed length of org name is 242 only. This is an
intended behavior (Also note that 255 is the standard across other
entities.)
"""
return (
{'name': gen_string("latin1"),
'description': gen_string("latin1")},
{'name': gen_string("utf8"),
'description': gen_string("utf8")},
{'name': gen_string("alpha", randint(1, 242)),
'description': gen_string("alpha")},
{'name': gen_string("alphanumeric", randint(1, 242)),
'description': gen_string("alphanumeric")},
{'name': gen_string("numeric", randint(1, 242)),
'description': gen_string("numeric")},
{'name': gen_string("html"),
'description': gen_string("html")},
)
def valid_name_desc_label():
"""Random data for Labels and Description
Note: The maximum allowed length of org name is 242 only. This is an
intended behavior (Also note that 255 is the standard across other
entities.)
"""
return (
{'name': gen_string("alpha", randint(1, 242)),
'description': gen_string("alpha"),
'label': gen_string("alpha")},
{'name': gen_string("alphanumeric", randint(1, 242)),
'description': gen_string("alphanumeric"),
'label': gen_string("alpha")},
{'name': gen_string("numeric", randint(1, 242)),
'description': gen_string("numeric"),
'label': gen_string("alpha")},
{'name': gen_string("html"),
'description': gen_string("numeric"),
'label': gen_string("alpha")},
)
def invalid_name_label():
"""Random invalid name and label data"""
return(
{'label': gen_string('alpha'),
'name': gen_string('alpha', 300)},
{'label': gen_string('alpha'),
'name': gen_string('numeric', 300)},
{'label': gen_string('alpha'),
'name': gen_string('alphanumeric', 300)},
{'label': gen_string('alpha'),
'name': gen_string('utf8', 300)},
{'label': gen_string('alpha'),
'name': gen_string('latin1', 300)},
{'label': gen_string('alpha'),
'name': gen_string('html', 300)},
)
def positive_desc_data():
"""Random valid data for description"""
return(
{'description': gen_string("latin1")},
{'description': gen_string("utf8")},
{'description': gen_string("alpha")},
{'description': gen_string("alphanumeric")},
{'description': gen_string("numeric")},
{'description': gen_string("html")},
)
def invalid_name_data():
"""Random invalid name data"""
return(
{'name': ' '},
{'name': gen_string('alpha', 300)},
{'name': gen_string('numeric', 300)},
{'name': gen_string('alphanumeric', 300)},
{'name': gen_string('utf8', 300)},
{'name': gen_string('latin1', 300)},
{'name': gen_string('html', 300)}
)
class OrganizationTestCase(CLITestCase):
"""Tests for Organizations via Hammer CLI"""
# Tests for issues
# This Bugzilla bug is private. It is impossible to fetch info about it.
@tier1
def test_verify_bugzilla_1078866(self):
"""@Test: hammer organization <info,list> --help types information
doubled
@Feature: Organization
@Assert: no duplicated lines in usage message
"""
# org list --help:
result = Org.list({'help': True})
# get list of lines and check they all are unique
lines = [line['message'] for line in result]
self.assertEqual(len(set(lines)), len(lines))
# org info --help:info returns more lines (obviously), ignore exception
result = Org.info({'help': True})
# get list of lines and check they all are unique
lines = [line for line in result['options']]
self.assertEqual(len(set(lines)), len(lines))
# CRUD
@tier1
def test_positive_create_with_name(self):
"""@test: Create organization with valid name only
@feature: Organization
@assert: organization is created, label is auto-generated
"""
for test_data in valid_names():
with self.subTest(test_data):
org = make_org(test_data)
self.assertEqual(org['name'], test_data['name'])
@tier1
def test_positive_create_with_matching_name_label(self):
"""@test: Create organization with valid matching name and label only
@feature: Organization
@assert: organization is created, label matches name
"""
for test_data in valid_name_label_combo():
with self.subTest(test_data):
test_data['label'] = test_data['name']
org = make_org(test_data)
self.assertEqual(org['name'], org['label'])
@skip_if_bug_open('bugzilla', 1142821)
@tier1
def test_positive_create_with_unmatched_name_label(self):
"""@test: Create organization with valid unmatching name and label only
@feature: Organization
@assert: organization is created, label does not match name
@bz: 1142821
"""
for test_data in valid_name_label():
with self.subTest(test_data):
org = make_org(test_data)
self.assertNotEqual(org['name'], org['label'])
self.assertEqual(org['name'], test_data['name'])
self.assertEqual(org['label'], test_data['label'])
@tier1
def test_positive_create_with_name_description(self):
"""@test: Create organization with valid name and description only
@feature: Organization
@assert: organization is created, label is auto-generated
"""
for test_data in valid_name_desc():
with self.subTest(test_data):
org = make_org(test_data)
self.assertNotEqual(org['name'], org['description'])
self.assertEqual(org['name'], test_data['name'])
self.assertEqual(org['description'], test_data['description'])
@skip_if_bug_open('bugzilla', 1142821)
@tier1
def test_positive_create_with_name_label_description(self):
"""@test: Create organization with valid name, label and description
@feature: Organization
@assert: organization is created
@bz: 1142821
"""
for test_data in valid_name_desc():
with self.subTest(test_data):
test_data['label'] = gen_string('alpha')
org = make_org(test_data)
self.assertEqual(org['name'], test_data['name'])
self.assertEqual(org['description'], test_data['description'])
self.assertEqual(org['label'], test_data['label'])
@tier1
@stubbed('Needs to be improved')
def test_positive_list(self):
"""@Test: Check if Org can be listed
@Feature: Organization
@Assert: Org is listed
"""
Org.list()
@run_only_on('sat')
@tier2
def test_positive_add_subnet_by_name(self):
"""@Test: Add a subnet by its name
@Feature: Organizatiob
@Assert: Subnet is added to the org
"""
for name in (gen_string('alpha'), gen_string('numeric'),
gen_string('alphanumeric'), gen_string('utf8'),
gen_string('latin1')):
with self.subTest(name):
org = make_org()
new_subnet = make_subnet({'name': name})
Org.add_subnet({
'name': org['name'],
'subnet': new_subnet['name'],
})
org = Org.info({'id': org['id']})
self.assertIn(name, org['subnets'][0])
@run_only_on('sat')
@tier2
@stubbed()
def test_positive_add_subnet_by_id(self):
"""@test: Add a subnet by its ID
@feature: Organization
@assert: Subnet is added to the org
"""
for name in (gen_string('alpha'), gen_string('numeric'),
gen_string('alphanumeric'), gen_string('utf8'),
gen_string('latin1')):
with self.subTest(name):
org = make_org()
new_subnet = make_subnet({'name': name})
Org.add_subnet({
'name': org['name'],
'subnet-id': new_subnet['id'],
})
org = Org.info({'id': org['id']})
self.assertIn(name, org['subnets'][0])
@run_only_on('sat')
@tier2
def test_positive_remove_subnet_by_name(self):
"""@Test: Add a subnet and then remove it by its name
@Feature: Organization
@Assert: Subnet is removed from the org
"""
org = make_org()
subnet = make_subnet()
Org.add_subnet({
'name': org['name'],
'subnet': subnet['name'],
})
org = Org.info({'id': org['id']})
self.assertEqual(len(org['subnets']), 1)
self.assertIn(subnet['name'], org['subnets'][0])
Org.remove_subnet({
'name': org['name'],
'subnet': subnet['name'],
})
org = Org.info({'id': org['id']})
self.assertEqual(len(org['subnets']), 0)
@run_only_on('sat')
@tier2
def test_positive_remove_subnet_by_id(self):
"""@Test: Remove a subnet and then remove it by its ID
@Feature: Organization
@Assert: Subnet is removed from the org
"""
org = make_org()
subnet = make_subnet()
Org.add_subnet({
'name': org['name'],
'subnet': subnet['name'],
})
org = Org.info({'id': org['id']})
self.assertEqual(len(org['subnets']), 1)
self.assertIn(subnet['name'], org['subnets'][0])
Org.remove_subnet({
'name': org['name'],
'subnet-id': subnet['id'],
})
org = Org.info({'id': org['id']})
self.assertEqual(len(org['subnets']), 0)
@tier2
def test_positive_add_user_by_name(self):
"""@Test: Add an user by its name
@Feature: Organization
@Assert: User is added to the org
"""
org = make_org()
user = make_user()
Org.add_user({
'name': org['name'],
'user': user['login'],
})
@tier2
def test_positive_add_user_by_id(self):
"""@Test: Add an user by its ID
@Feature: Organization
@Assert: User is added to the org
"""
org = make_org()
user = make_user()
Org.add_user({
'name': org['name'],
'user-id': user['id'],
})
@tier2
def test_positive_remove_user_by_id(self):
"""@Test: Check if a User can be removed from an Org
@Feature: Organization
@Assert: User is removed from the org
"""
org = make_org()
user = make_user()
Org.add_user({
'name': org['name'],
'user-id': user['id'],
})
Org.remove_user({
'name': org['name'],
'user-id': user['id'],
})
@tier2
@stubbed()
def test_positive_remove_user_by_name(self):
"""@test: Create different types of users then add/remove user
by using the organization name
@feature: Organization
@assert: The user is added then removed from the organization
@status: manual
"""
@tier2
@stubbed()
def test_positive_remove_admin_user_by_name(self):
"""@test: Create admin users then add user and remove it
by using the organization name
@feature: Organization
@assert: The user is added then removed from the organization
@status: manual
"""
@run_only_on('sat')
@tier2
def test_positive_add_hostgroup_by_name(self):
"""@Test: Add a hostgroup by its name
@Feature: Organization
@Assert: Hostgroup is added to the org
"""
org = make_org()
hostgroup = make_hostgroup()
Org.add_hostgroup({
'hostgroup': hostgroup['name'],
'name': org['name'],
})
@run_only_on('sat')
@tier2
def test_positive_remove_hostgroup_by_name(self):
"""@Test: Add a hostgroup and then remove it by its name
@Feature: Organization
@Assert: Hostgroup is removed from the org
"""
org = make_org()
hostgroup = make_hostgroup()
Org.add_hostgroup({
'hostgroup': hostgroup['name'],
'name': org['name'],
})
Org.remove_hostgroup({
'hostgroup': hostgroup['name'],
'name': org['name'],
})
@run_only_on('sat')
@tier2
@stubbed()
def test_positive_remove_hostgroup_by_id(self):
"""@test: Add a hostgroup and remove it by its ID
@feature: Organization
@assert: hostgroup is added to organization then removed
@status: manual
"""
@run_only_on('sat')
@tier2
def test_positive_add_compresource_by_name(self):
"""@Test: Add a compute resource by its name
@Feature: Organization
@Assert: Compute Resource is added to the org
"""
org = make_org()
compute_res = make_compute_resource({
'provider': FOREMAN_PROVIDERS['libvirt'],
'url': "qemu+tcp://%s:16509/system" % settings.server.hostname
})
Org.add_compute_resource({
'compute-resource': compute_res['name'],
'name': org['name'],
})
org = Org.info({'id': org['id']})
self.assertEqual(org['compute-resources'][0], compute_res['name'])
@tier2
def test_positive_add_compresource_by_id(self):
"""@Test: Add a compute resource by its ID
@Feature: Organization
@Assert: Compute Resource is added to the org
"""
compute_res = make_compute_resource()
org = make_org({'compute-resource-ids': compute_res['id']})
self.assertEqual(len(org['compute-resources']), 1)
self.assertEqual(org['compute-resources'][0], compute_res['name'])
@tier2
def test_positive_add_compresources_by_id(self):
"""@Test: Add multiple compute resources by their IDs
resources
@Feature: Organization
@Assert: All compute resources are added to the org
"""
cr_amount = random.randint(3, 5)
resources = [make_compute_resource() for _ in range(cr_amount)]
org = make_org({
'compute-resource-ids':
[resource['id'] for resource in resources],
})
self.assertEqual(len(org['compute-resources']), cr_amount)
for resource in resources:
self.assertIn(resource['name'], org['compute-resources'])
@run_only_on('sat')
@stubbed()
def test_positive_remove_compresource_by_id(self):
"""@Test: Add a compute resource and then remove it by its ID
@Feature: Organization
@Assert: Compute resource is removed from the org
@status: manual
"""
@run_only_on('sat')
@tier2
def test_positive_add_medium_by_name(self):
"""@Test: Add a medium by its name
@Feature: Organization
@Assert: Medium is added to the org
"""
org = make_org()
medium = make_medium()
Org.add_medium({
'name': org['name'],
'medium': medium['name'],
})
org = Org.info({'id': org['id']})
self.assertIn(medium['name'], org['installation-media'])
@run_only_on('sat')
@tier2
def test_positive_remove_medium_by_id(self):
"""@Test: Add a compute resource and then remove it by its ID
@Feature: Organization
@Assert: Medium is removed from the org
"""
org = make_org()
medium = make_medium()
Org.add_medium({
'name': org['name'],
'medium': medium['name']
})
Org.remove_medium({
'name': org['name'],
'medium': medium['name']
})
org = Org.info({'id': org['id']})
self.assertNotIn(medium['name'], org['installation-media'])
@run_only_on('sat')
@tier2
def test_positive_add_template_by_name(self):
"""@Test: Add a provisioning template by its name
@Feature: Organization
@Assert: Template is added to the org
"""
for name in valid_names_simple_all():
with self.subTest(name):
org = make_org()
template = make_template({
'content': gen_string('alpha'),
'name': name,
})
Org.add_config_template({
'config-template': template['name'],
'name': org['name'],
})
org = Org.info({'id': org['id']})
self.assertIn(
u'{0} ({1})'. format(template['name'], template['type']),
org['templates']
)
@tier2
def test_positive_add_template_by_id(self):
"""@Test: Add a provisioning template by its ID
@Feature: Organization
@Assert: Template is added to the org
"""
conf_templ = make_template()
org = make_org({'config-template-ids': conf_templ['id']})
self.assertIn(
u'{0} ({1})'.format(conf_templ['name'], conf_templ['type']),
org['templates']
)
@tier2
def test_positive_add_templates_by_id(self):
"""@Test: Add multiple provisioning templates by their IDs
@Feature: Organization
@Assert: All provisioning templates are added to the org
"""
templates_amount = random.randint(3, 5)
templates = [make_template() for _ in range(templates_amount)]
org = make_org({
'config-template-ids':
[template['id'] for template in templates],
})
self.assertGreaterEqual(len(org['templates']), templates_amount)
for template in templates:
self.assertIn(
u'{0} ({1})'.format(template['name'], template['type']),
org['templates']
)
@run_only_on('sat')
@tier2
def test_positive_remove_template_by_name(self):
"""@Test: Add a provisioning template and then remove it by its name
@Feature: Organization
@Assert: Template is removed from the org
"""
for name in valid_names_simple_all():
with self.subTest(name):
org = make_org()
template = make_template({
'content': gen_string('alpha'),
'name': name,
})
# Add config-template
Org.add_config_template({
'name': org['name'],
'config-template': template['name']
})
result = Org.info({'id': org['id']})
self.assertIn(
u'{0} ({1})'. format(template['name'], template['type']),
result['templates'],
)
# Remove config-template
Org.remove_config_template({
'config-template': template['name'],
'name': org['name'],
})
result = Org.info({'id': org['id']})
self.assertNotIn(
u'{0} ({1})'. format(template['name'], template['type']),
result['templates'],
)
@run_only_on('sat')
@tier2
def test_positive_add_domain_by_name(self):
"""@test: Add a domain by its name
@Feature: Organization
@assert: Domain is added to organization
@status: manual
"""
org = make_org()
domain = make_domain()
Org.add_domain({
'domain': domain['name'],
'name': org['name'],
})
result = Org.info({'id': org['id']})
self.assertEqual(len(result['domains']), 1)
self.assertIn(domain['name'], result['domains'])
@run_only_on('sat')
@tier2
def test_positive_add_domain_by_id(self):
"""@test: Add a domain by its ID
@feature: Organization
@assert: Domain is added to organization
@status: manual
"""
org = make_org()
domain = make_domain()
Org.add_domain({
'domain-id': domain['id'],
'name': org['name'],
})
result = Org.info({'id': org['id']})
self.assertEqual(len(result['domains']), 1)
self.assertIn(domain['name'], result['domains'])
@run_only_on('sat')
@tier2
def test_positive_remove_domain_by_name(self):
"""@Test: Add a domain and then remove it by its name
@Feature: Organization
@Assert: Domain is removed from the org
"""
org = make_org()
domain = make_domain()
Org.add_domain({
'domain': domain['name'],
'name': org['name'],
})
result = Org.info({'id': org['id']})
self.assertEqual(len(result['domains']), 1)
self.assertIn(domain['name'], result['domains'])
Org.remove_domain({
'domain': domain['name'],
'name': org['name'],
})
result = Org.info({'id': org['id']})
self.assertEqual(len(result['domains']), 0)
@run_only_on('sat')
@tier2
def test_positive_remove_domain_by_id(self):
"""@test: Add a domain and then remove it by its ID
@feature: Organization
@assert: Domain is removed from the organization
"""
org = make_org()
domain = make_domain()
Org.add_domain({
'domain-id': domain['id'],
'name': org['name'],
})
result = Org.info({'id': org['id']})
self.assertEqual(len(result['domains']), 1)
self.assertIn(domain['name'], result['domains'])
Org.remove_domain({
'domain-id': domain['id'],
'id': org['id'],
})
result = Org.info({'id': org['id']})
self.assertEqual(len(result['domains']), 0)
@run_only_on('sat')
@tier2
def test_positive_add_lce(self):
"""@Test: Add a lifecycle environment
@Feature: Organization
@Assert: Lifecycle environment is added to the org
"""
# Create a lifecycle environment.
org_id = make_org()['id']
lc_env_name = make_lifecycle_environment(
{'organization-id': org_id})['name']
# Read back information about the lifecycle environment. Verify the
# sanity of that information.
response = LifecycleEnvironment.list({
'name': lc_env_name,
'organization-id': org_id,
})
self.assertEqual(response[0]['name'], lc_env_name)
@run_only_on('sat')
@tier2
def test_positive_remove_lce(self):
"""@Test: Add a lifecycle environment and then remove it
@Feature: Organization
@Assert: Lifecycle environment is removed from the org
"""
# Create a lifecycle environment.
org_id = make_org()['id']
lc_env_name = make_lifecycle_environment(
{'organization-id': org_id})['name']
lc_env_attrs = {
'name': lc_env_name,
'organization-id': org_id,
}
# Read back information about the lifecycle environment. Verify the
# sanity of that information.
response = LifecycleEnvironment.list(lc_env_attrs)
self.assertEqual(response[0]['name'], lc_env_name)
# Delete it.
LifecycleEnvironment.delete(lc_env_attrs)
# We should get a zero-length response when searcing for the LC env.
response = LifecycleEnvironment.list(lc_env_attrs)
self.assertEqual(len(response), 0)
@run_only_on('sat')
@tier2
@stubbed("Needs to be re-worked!")
def test_positive_add_capsule_by_name(self):
"""@Test: Add a capsule by its name
@Feature: Organization
@Assert: Capsule is added to the org
"""
org = make_org()
proxy = make_proxy()
Org.add_smart_proxy({
'name': org['name'],
'smart-proxy': proxy['name'],
})
@run_only_on('sat')
@tier2
@stubbed()
def test_positive_add_capsule_by_id(self):
"""@test: Add a capsule by its ID
@feature: Organization
@assert: Capsule is added to the org
@status: manual
"""
@run_only_on('sat')
@stubbed("Needs to be re-worked!")
def test_positive_remove_capsule_by_name(self):
"""@Test: Add a capsule and then remove it by its name
@Feature: Organization
@Assert: Capsule is removed from the org
"""
org = make_org()
proxy = make_proxy()
Org.add_smart_proxy({
'name': org['name'],
'smart-proxy': proxy['name'],
})
Org.remove_smart_proxy({
'name': org['name'],
'smart-proxy': proxy['name'],
})
# Negative Create
@tier1
def test_negative_create_name_long(self):
"""@test: Create organization with valid label and description, name is
too long
@feature: Organization
@assert: organization is not created
"""
for test_data in invalid_name_label():
with self.subTest(test_data):
with self.assertRaises(CLIReturnCodeError):
Org.create({
'description': test_data['label'],
'label': test_data['label'],
'name': test_data['name'],
})
@tier1
def test_negative_create_name_blank(self):
"""@test: Create organization with valid label and description, name is
blank
@feature: Organization
@assert: organization is not created
"""
for test_data in valid_names_simple():
with self.subTest(test_data):
with self.assertRaises(CLIReturnCodeError):
Org.create({
'description': test_data,
'label': test_data,
'name': '',
})
@tier1
def test_negative_create_name_spaces(self):
"""@test: Create organization with valid label and description, name is
whitespace
@feature: Organization
@assert: organization is not created
"""
for test_data in valid_names_simple():
with self.subTest(test_data):
with self.assertRaises(CLIReturnCodeError):
Org.create({
'description': test_data,
'label': test_data,
'name': ' \t',
})
@tier1
def test_negative_create_same_name(self):
"""@test: Create organization with valid values, then create a new one
with same values.
@feature: Organization
@assert: organization is not created
"""
for test_data in valid_names_simple():
with self.subTest(test_data):
Org.create({
'description': test_data,
'label': test_data,
'name': test_data,
})
with self.assertRaises(CLIReturnCodeError):
Org.create({
'description': test_data,
'label': test_data,
'name': test_data,
})
# Positive Delete
@tier1
def test_positive_delete_by_id(self):
"""@test: Create organization with valid values then delete it
by ID
@feature: Organization
@assert: organization is deleted
"""
for test_data in valid_name_desc_label():
with self.subTest(test_data):
org = make_org(test_data)
Org.delete({'id': org['id']})
# Can we find the object?
with self.assertRaises(CLIReturnCodeError):
Org.info({'id': org['id']})
@tier1
def test_positive_delete_by_label(self):
"""@test: Create organization with valid values then delete it
by label
@feature: Organization
@assert: organization is deleted
"""
for test_data in valid_name_desc_label():
with self.subTest(test_data):
org = make_org(test_data)
Org.delete({'label': org['label']})
# Can we find the object?
with self.assertRaises(CLIReturnCodeError):
Org.info({'id': org['id']})
@tier1
def test_positive_delete_by_name(self):
"""@test: Create organization with valid values then delete it
by name
@feature: Organization
@assert: organization is deleted
"""
for test_data in valid_name_desc_label():
with self.subTest(test_data):
org = make_org(test_data)
Org.delete({'name': org['name']})
# Can we find the object?
with self.assertRaises(CLIReturnCodeError):
Org.info({'id': org['id']})
@tier1
def test_positive_update_name(self):
"""@test: Create organization with valid values then update its name
@feature: Organization
@assert: organization name is updated
"""
for test_data in valid_names():
with self.subTest(test_data):
org = make_org()
# Update the org name
Org.update({
'id': org['id'],
'new-name': test_data['name'],
})
# Fetch the org again
org = Org.info({'id': org['id']})
self.assertEqual(org['name'], test_data['name'])
@tier1
def test_positive_update_description(self):
"""@test: Create organization with valid values then update its
description
@feature: Organization
@assert: organization description is updated
"""
for test_data in positive_desc_data():
with self.subTest(test_data):
org = make_org()
# Update the org name
Org.update({
'description': test_data['description'],
'id': org['id'],
})
# Fetch the org again
org = Org.info({'id': org['id']})
self.assertEqual(org['description'], test_data['description'])
@tier1
def test_positive_update_name_description(self):
"""@test: Create organization with valid values then update its name
and description
@feature: Organization
@assert: organization name and description are updated
"""
for test_data in valid_name_desc():
with self.subTest(test_data):
org = make_org()
# Update the org name
Org.update({
'description': test_data['description'],
'id': org['id'],
'new-name': test_data['name'],
})
# Fetch the org again
org = Org.info({'id': org['id']})
self.assertEqual(org['description'], test_data['description'])
self.assertEqual(org['name'], test_data['name'])
# Negative Update
@tier1
def test_negative_update_name(self):
"""@test: Create organization then fail to update its name
@feature: Organization
@assert: organization name is not updated
"""
for test_data in invalid_name_data():
with self.subTest(test_data):
org = make_org()
# Update the org name
with self.assertRaises(CLIReturnCodeError):
Org.update({
'id': org['id'],
'new-name': test_data['name'],
})
# This test also covers the redmine bug 4443
@tier1
def test_positive_search_by_name(self):
"""@test: Can search for an organization by name
@feature: Organization
@assert: organization is created and can be searched by name
"""
for test_data in valid_names():
with self.subTest(test_data):
org = make_org(test_data)
# Can we find the new object?
result = Org.exists(search=('name', org['name']))
self.assertEqual(org['name'], result['name'])
@tier1
def test_positive_search_by_label(self):
"""@test: Can search for an organization by name
@feature: Organization
@assert: organization is created and can be searched by label
"""
for test_data in valid_names():
with self.subTest(test_data):
org = make_org(test_data)
# Can we find the new object?
result = Org.exists(search=('label', org['label']))
self.assertEqual(org['name'], result['name'])
@tier1
def test_positive_info_by_label(self):
"""@Test: Get org information by its label
@Feature: Organization
@Assert: Organization is created and info can be obtained by its label
graciously
"""
org = make_org()
result = Org.info({'label': org['label']})
self.assertEqual(org['id'], result['id'])
@tier1
def test_positive_info_by_name(self):
"""@Test: Get org information by its name
@Feature: Organization
@Assert: Organization is created and info can be obtained by its name
graciously
"""
org = make_org()
result = Org.info({'name': org['name']})
self.assertEqual(org['id'], result['id'])
|
tkolhar/robottelo
|
tests/foreman/cli/test_organization.py
|
Python
|
gpl-3.0
| 38,207
|
from argparse import ArgumentParser
import os.path
class CliHelper(object):
@staticmethod
def readable_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return open(arg, 'r') # return an open file handle
class CliArgumentParser(object):
def __init__(self):
self._parser = self.setup_parser()
def parse(self, argv):
return self._parser.parse_args(argv)
@staticmethod
def setup_parser():
parser = ArgumentParser()
parser.add_argument(
"-t",
dest="target_hosts",
required=True,
help="Set a target range of addresses to target. Ex 10.11.1.1-255")
parser.add_argument(
"-o",
dest="output_directory",
required=True,
help="Set the output directory. Ex /root/Documents/labs/")
parser.add_argument(
"-w",
dest="wordlist",
required=False,
help="Set the wordlist to use for generated commands."
" Ex /usr/share/wordlist.txt",
default=False)
parser.add_argument(
"-p",
dest="port",
required=False,
help="Set the port to use. Leave blank to use discovered ports. "
"Useful to force virtual host "
"scanning on non-standard webserver ports.",
default=80)
parser.add_argument(
"--pingsweep",
dest="ping_sweep",
action="store_true",
help="Write a new target.txt by performing "
"a ping sweep and discovering live hosts.",
default=False)
parser.add_argument("--dns", "--dnssweep",
dest="find_dns_servers",
action="store_true",
help="Find DNS servers from a list of targets.",
default=False)
parser.add_argument("--services",
dest="perform_service_scan",
action="store_true",
help="Perform service scan over targets.",
default=False)
parser.add_argument(
"--hostnames",
dest="hostname_scan",
action="store_true",
help="Attempt to discover target hostnames and "
"write to 0-name.txt and hostnames.txt.",
default=False)
parser.add_argument("--snmp",
dest="perform_snmp_walk",
action="store_true",
help="Perform service scan over targets.",
default=False)
parser.add_argument(
"--quick",
dest="quick",
action="store_true",
required=False,
help="Move to the next target after "
"performing a quick scan and writing "
"first-round recommendations.",
default=False)
parser.add_argument(
"--virtualhosts",
dest="virtualhosts",
action="store_true",
required=False,
help="Attempt to discover virtual hosts "
"using the specified wordlist.",
default=False)
parser.add_argument(
'--ignore-http-codes',
dest='ignore_http_codes',
type=str,
help='Comma separated list of http '
'codes to ignore with virtual host scans.',
default='404')
parser.add_argument(
'--ignore-content-length',
dest='ignore_content_length',
type=int,
help='Ignore content lengths of specificed amount. '
'This may become useful when a server returns a static page on '
'every virtual host guess.',
default=0)
parser.add_argument(
"--quiet",
dest="quiet",
action="store_true",
help="Supress banner and headers to limit "
"to comma dilimeted results only.",
default=False)
parser.add_argument("--no-udp",
dest="no_udp_service_scan",
action="store_true",
help="Disable UDP services scan over targets.",
default=False)
return parser
|
codingo/Reconnoitre
|
Reconnoitre/lib/core/input.py
|
Python
|
gpl-3.0
| 4,515
|
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
import logging
from enum import Enum
from lxml import etree
from osgeo import ogr
from ribxlib import models
logger = logging.getLogger(__name__)
# NAMESPACES
NS = {
"gml": "http://www.opengis.net/gml",
}
class Mode(Enum):
PREINSPECTION = 1 # Ordering party -> contractor.
INSPECTION = 2 # Contractor -> ordering party.
def parse(f, mode):
"""Parse a GWSW.Ribx / GWSW.Ribx-A document.
GWSW.Ribx and GWSW.Ribx-A are immature standards. Their current versions
have arguable deficiencies: a wrong use of namespaces, gml:point, etc.
In absence of useful schema's, no attempt is made here to validate
documents. Only information that is needed for uploadserver-site,
is extracted and checked.
Args:
f (string): Full path to the file to be parsed.
mode (Enum): See ribx.parsers.Mode.
Returns:
A (ribx, log) tuple. The ribxlib.models.Ribx instance carries
the pipes, manholes and drains (to be) inspected/cleaned.
Log is a list that contains all parsing errors.
"""
parser = etree.XMLParser()
try:
tree = etree.parse(f, parser)
except etree.XMLSyntaxError as e:
logger.error(e)
return models.Ribx(), _log(parser)
# At this point, the document is well formed.
# Even if no exception was raised, the error log might not be empty: it may
# contain warnings, for example. TODO: should these be returned as well?
error_log = _log(parser)
ribx = models.Ribx()
inspection_pipe_parser = TreeParser(
tree, models.InspectionPipe, mode, error_log)
cleaning_pipe_parser = TreeParser(
tree, models.CleaningPipe, mode, error_log)
drain_parser = TreeParser(
tree, models.Drain, mode, error_log)
inspection_manhole_parser = TreeParser(
tree, models.InspectionManhole, mode, error_log)
cleaning_manhole_parser = TreeParser(
tree, models.CleaningManhole, mode, error_log)
ribx.inspection_pipes = inspection_pipe_parser.elements()
ribx.cleaning_pipes = cleaning_pipe_parser.elements()
ribx.inspection_manholes = inspection_manhole_parser.elements()
ribx.cleaning_manholes = cleaning_manhole_parser.elements()
ribx.drains = drain_parser.elements()
return ribx, error_log
def _log(parser, level=etree.ErrorLevels.FATAL):
"""Return a list of parser errors.
"""
return [{
'column': error.column,
'level': error.level_name,
'line': error.line,
'message': error.message,
} for error in parser.error_log.filter_from_level(level)]
def _log2(node, expr, e, error_log):
"""Append to a list of parser errors.
"""
message = "Element {} has problems with {}: {}".format(node.tag, expr, e)
error_log.append({'line': node.sourceline, 'message': message})
logger.error(message)
class TreeParser(object):
"""Parser for any kind of thing (Pipe / Manhole / Drain); all the tags
work very similarly, except for different prefixes.
"""
def __init__(self, tree, model, mode, error_log):
self.tree = tree
self.model = model
self.mode = mode
self.error_log = error_log
def elements(self):
"""Return all SewerElement model instances that are in the tree."""
elements = []
nodes = self.tree.xpath('//{}'.format(self.model.tag), namespaces=NS)
for node in nodes:
element_parser = ElementParser(node, self.model, self.mode)
try:
instance = element_parser.parse()
if instance:
elements.append(instance)
except Exception as e:
_log2(node, element_parser.expr, e, self.error_log)
return elements
class ElementParser(object):
"""Parse an individual node."""
def __init__(self, node, model, mode):
self.node = node
self.model = model
self.mode = mode
self.expr = '' # Keep it around so we can log it in case of error
def xpath(self, expr):
self.expr = expr
return self.node.xpath(expr, namespaces=NS)
def tag(self, name):
return self.model.tag[-1] + name
def parse(self):
# ?AA: reference
item_ref, item_sourceline = self.tag_value('AA', complain=True)
instance = self.model(item_ref)
instance.sourceline = item_sourceline
instance.inspection_date = self.get_inspection_date()
if issubclass(self.model, models.Pipe):
# We need two manholes and two sets of coordinates.
manhole1_ref, manhole1_sourceline = self.tag_value(
'AD', complain=True)
instance.manhole1 = models.Manhole(manhole1_ref)
instance.manhole1.sourceline = manhole1_sourceline
instance.manhole1.geom = self.tag_point('AE')
manhole2_ref, manhole2_sourceline = self.tag_value(
'AF', complain=True)
instance.manhole2 = models.Manhole(manhole2_ref)
instance.manhole2.sourceline = manhole2_sourceline
instance.manhole2.geom = self.tag_point('AG')
if issubclass(self.model, models.InspectionPipe):
if self.mode == Mode.INSPECTION:
instance.manhole_start = self.get_manhole_start(instance)
value, sourceline = self.tag_value('BQ')
if value is not None:
instance.expected_inspection_length = float(value)
value2, sourceline = self.tag_value('CG')
if value2 is not None:
instance.segment_length = float(value2)
else:
# ?AB holds coordinates
instance.geom = self.tag_point('AB')
# ?AQ: Ownership
instance.owner = self.tag_value('AQ')[0]
if self.model.has_video:
instance.media.update(self.get_video())
# Maybe inspection / cleaning wasn't possible
instance.work_impossible = self.get_work_impossible()
# If a *XC tag exists, this element was new, not planned
# *XC = "Ontbreekt in opracht"
if self.xpath(self.tag('XC')):
instance.new = True
# ZC nodes
for observation in self.get_observations():
instance.media.update(observation.media())
if issubclass(self.model, models.InspectionPipe):
for observation in self.get_observations():
instance.observations.append(observation)
# All well...
return instance
def tag_value(self, name, complain=False):
items = self.xpath(self.tag(name))
if not items:
if complain:
raise models.ParseException(
"Expected {} record".format(self.tag(name)))
else:
return None, None
item = items[0]
return item.text.strip(), item.sourceline
def tag_attribute(self, name, attribute):
item = self.xpath('{}/@{}'.format(self.tag(name), self.tag(attribute)))
if item:
return item[0]
def tag_point(self, name):
"""Interpret tag contents as gml:Point and return geom"""
node_set = self.xpath('{}/gml:Point/gml:pos'.format(self.tag(name)))
if node_set:
coordinates = map(float, node_set[0].text.split())
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(*coordinates)
return point
def get_manhole_start(self, instance):
"""Return a manhole ref that references the starting manhole of
a Pipe inspection, which corresponds to either manhole1 or manhole2 of
the pipe."""
manhole_start_ref, manhole_start_sourceline = self.tag_value('AB')
if (manhole_start_ref and manhole_start_ref not in
[instance.manhole1.ref, instance.manhole2.ref]):
raise Exception(
"manhole_start {} doesn't correspond to either manhole1 {} or "
"manhole2 {} of the pipe.".format(manhole_start_ref,
instance.manhole1.ref,
instance.manhole2.ref))
if not manhole_start_ref:
raise Exception("Inspection start node for pipes must be present. "
"Current mode: {}".format(self.mode))
return manhole_start_ref
def get_work_impossible(self):
xd, sourceline = self.tag_value('XD')
if xd:
xd_explanation = {
'A': 'Voertuig/obstakel op toegang',
'B': 'Straat niet toegankelijk voor het voertuig',
'C': 'Groen blokkeert de toegang',
'D': 'Niet aangetroffen',
'E': 'Deksel vast',
'Z': 'Andere reden.'
}.get(xd, None)
if xd_explanation is None:
raise Exception('Onbekende {}XD code "{}"'.format(
self.tag('XD'), xd))
attr_explanation = self.tag_attribute('XD', 'DE') or ''
if xd == 'Z' and not attr_explanation:
raise Exception(
'Expected explanation for Z code in {} tag'
.format(self.tag('DE')))
elif xd != 'Z' and attr_explanation:
raise Exception(
'Explanation in {} tag not allowed without Z code.'
.format(self.tag('DE')))
tag_explanation, sourceline = self.tag_value('DE')
explanation = "{} ({})\n{}\n{}".format(
xd_explanation, xd, attr_explanation,
tag_explanation).strip()
return explanation
def get_inspection_date_as_string(self):
"""?BF: inspection date
In inspection mode, skip everything without an inspection date.
?BF must be present for something considered to be inspected!
Occurrence: 0 for pre-inspection
Occurrence: 1 for inspection
"""
node_set = self.xpath(self.tag('BF'))
if self.mode == Mode.PREINSPECTION and len(node_set) != 0:
msg = "maxOccurs = 0 in {}".format(self.mode)
raise Exception(msg)
if self.mode == Mode.INSPECTION and len(node_set) < 1:
msg = "minOccurs = 1 in {}".format(self.mode)
raise Exception(msg)
if self.mode == Mode.INSPECTION and len(node_set) > 1:
msg = "maxOccurs = 1 in {}".format(self.mode)
raise Exception(msg)
if self.mode == Mode.INSPECTION:
return node_set[0].text.strip()
else:
return None
def get_inspection_time_as_string(self):
"""?BG: inspection date including the time.
?BG is always an optional field, while the date (?BF) is required in
INSPECTION mode. This method will combine both ?BF and ?BG into one
single datetime when a ?BG tag is found (there is a bit of
redundancy here).
Occurrence: 0 for pre-inspection
Occurrence: 0..1 for inspection
"""
node_set = self.xpath(self.tag('BG'))
if self.mode == Mode.PREINSPECTION and len(node_set) != 0:
msg = "maxOccurs = 0 in {}".format(self.mode)
raise Exception(msg)
if self.mode == Mode.INSPECTION and len(node_set) > 0:
return node_set[0].text.strip()
return None
def get_inspection_date(self):
"""PREINSPECTION/INSPECTION checks are delegated to
``get_inspection_time_as_string`` and
``get_inspection_date_as_string``."""
datestr = self.get_inspection_date_as_string()
timestr = self.get_inspection_time_as_string()
if timestr and datestr:
return datetime.strptime(
'{} {}'.format(datestr, timestr), "%Y-%m-%d %H:%M:%S")
if datestr:
return datetime.strptime(datestr, "%Y-%m-%d")
return None
def get_video(self):
# ?BS: file name of video
# Occurrence: 0 for pre-inspection
# Occurrence: 0..1 for inspection
node_set = self.xpath(self.tag('BS'))
if self.mode == Mode.PREINSPECTION and len(node_set) != 0:
msg = "maxOccurs = 0 in {}".format(self.mode)
raise Exception(msg)
if self.mode == Mode.INSPECTION and len(node_set) > 1:
msg = "maxOccurs = 1 in {}".format(self.mode)
raise Exception(msg)
if node_set:
video = node_set[0].text.strip()
models._check_filename(video)
return set([video])
else:
return set([])
def get_observations(self):
# ZC: observation
# Occurrence: 0 for pre-inspection
# Occurrence: * for inspection
node_set = self.xpath('ZC')
if self.mode == Mode.PREINSPECTION and len(node_set) != 0:
msg = "maxOccurs = 0 in {}".format(self.mode)
raise Exception(msg)
for zc_node in node_set:
yield models.Observation(zc_node)
|
nens/ribxlib
|
ribxlib/parsers.py
|
Python
|
gpl-3.0
| 13,369
|
#!/usr/bin/env python
"""
mtpy/mtpy/analysis/distortion.py
Contains functions for the determination of (galvanic) distortion of impedance
tensors.
The methods used follow Bibby et al 2005.
As it has been pointed out in that paper, there are various possibilities for
constraining the solution, esp. in the 2D case.
Here we just implement the 'most basic' variety for the calculation of the
distortion tensor.
Other mehtods can be implemented, but since the optimal assumtions and
constraints depend on the application, the actual place for further functions
is in an independent, personalised module.
Functions:
@UofA, 2013
(LK)
"""
#=================================================================
import numpy as np
import mtpy.core.z as MTz
import mtpy.analysis.geometry as MTge
import mtpy.utils.exceptions as MTex
import mtpy.utils.calculator as MTcc
#reload(MTex)
#reload(MTz)
#reload(MTcc)
#reload(MTge)
#=================================================================
#Finding the distortion of a Z array. Using the phase tensor
#(so, Z arrays are transformed into PTs first), following Bibby et al. 2005.
#
# First, try to find periods that indicate 1D. From them determine D incl.
# the g-factor by calculatiing a weighted mean. The g is assumed in order to
# cater for the missing unknown in the system, it is here set to det(X)^0.5.
# After that is found, the function no_distortion from the Z module can be
# called to obtain the unperturbated regional impedance tensor.
#Second, if there are no 1D sections: Find the strike angle, then rotate the
# Z to the principal axis. In order to do that, use the rotate(-strike) method
# of the Z module. Then take the real part of the rotated Z. As in the 1D case,
# we need an assumption to get rid of the (2) unknowns:
# set det(D) = P and det(D) = T, where P,T can be chosen. Common choice is to
# set one of P,T to an arbitrary value (e.g. 1). Then check, for which values
# of the other parameter S^2 = T^2+4*P*X_12*X_21/det(X) > 0 holds.
def find_distortion(z_object, g = 'det', lo_dims = None):
"""
find optimal distortion tensor from z object
automatically determine the dimensionality over all frequencies, then find
the appropriate distortion tensor D
"""
z_obj = z_object
if lo_dims is None :
lo_dims = MTge.dimensionality(z_object = z_obj)
try:
if len(lo_dims) != len(z_obj.z):
lo_dims = MTge.dimensionality(z_object = z_obj)
except:
pass
#dictionary of values that should be no distortion in case distortion
#cannot be calculated for that component
dis_dict = {(0,0):1, (0,1):0, (1,0):0, (1,1):1}
lo_dis = []
lo_diserr = []
if 1 in lo_dims:
idx_1 = np.where(np.array(lo_dims) == 1)[0]
for idx in idx_1:
realz = np.real(z_obj.z[idx])
imagz = np.imag(z_obj.z[idx])
mat1 = np.matrix([[0, -1],[1, 0]])
if g in ['01','10']:
gr = np.abs(realz[int(g[0]),int(g[1])])
gi = np.abs(imagz[int(g[0]),int(g[1])])
else:
gr = np.sqrt(np.linalg.det(realz))
gi = np.sqrt(np.linalg.det(imagz))
lo_dis.append(1./gr*np.dot(realz,mat1))
lo_dis.append(1./gi*np.dot(imagz,mat1))
if z_obj.zerr is not None:
#find errors of entries for calculating weights
lo_diserr.append(1./gr*\
np.array([[np.abs(z_obj.zerr[idx][0,1]),
np.abs(z_obj.zerr[idx][0,0])],
[np.abs(z_obj.zerr[idx][1,1]),
np.abs(z_obj.zerr[idx][1,0])]]))
lo_diserr.append(1./gi*\
np.array([[np.abs(z_obj.zerr[idx][0,1]),
np.abs(z_obj.zerr[idx][0,0])],
[np.abs(z_obj.zerr[idx][1,1]),
np.abs(z_obj.zerr[idx][1,0])]]))
else:
#otherwise go for evenly weighted average
lo_diserr.append(np.ones((2, 2)))
lo_diserr.append(np.ones((2, 2)))
dis = np.identity(2)
diserr = np.identity(2)
for i in range(2):
for j in range(2):
try:
dis[i,j], dummy = np.average(np.array([k[i, j]
for k in lo_dis]),
weights=np.array([1./(k[i,j])**2
for k in lo_diserr]),
returned=True)
diserr[i,j] = np.sqrt(1./dummy)
#if the distortion came out as nan set it to an appropriate
#value
if np.nan_to_num(dis[i,j]) == 0:
dis[i, j] = dis_dict[i, j]
diserr[i, j] = dis_dict[i, j]
except ZeroDivisionError:
print ('Could not get distortion for dis[{0}, {1}]'.format(
i, j)+' setting value to {0}'.format(dis_dict[i,j]))
dis[i, j] = dis_dict[i, j]
diserr[i, j] = dis_dict[i, j]*1e-6
return dis, diserr
if 2 in lo_dims:
idx_2 = np.where(np.array(lo_dims) == 2)[0]
#follow bibby et al. 2005 first alternative: P = 1
P = 1
lo_strikes = MTge.strike_angle(z_object = z_obj)
lo_tetms = []
lo_t = []
lo_tetm_errs =[]
for idx in idx_2:
mat = z_obj.z[idx]
ang = -lo_strikes[idx][0]
if np.isnan(ang):
ang = 0.
errmat = None
if z_obj.zerr is not None:
errmat = z_obj.zerr[idx]
tetm_mat, tetm_err = MTcc.rotatematrix_incl_errors(mat,
ang,
inmatrix_err=errmat)
lo_tetms.append(tetm_mat)
lo_tetm_errs.append(tetm_err)
realz = np.real(tetm_mat)
imagz = np.imag(tetm_mat)
lo_t.append(-4*P*realz[0,1]*realz[1,0]/np.linalg.det(realz) )
lo_t.append(-4*P*imagz[0,1]*imagz[1,0]/np.linalg.det(imagz) )
#since there is no 'wrong' solution by a different value of T, no
#error is given/calculated for T !
try:
#just add 0.1% for avoiding numerical issues in the squareroots
#later on
T = np.sqrt(max(lo_t))+0.001
except:
T = 2
for idx in range(len(lo_tetms)):
realz = np.real(lo_tetms[idx])
imagz = np.imag(lo_tetms[idx])
errmat = lo_tetm_errs[idx]
sr = np.sqrt(T**2+4*P*realz[0, 1]*realz[1, 0]/np.linalg.det(realz))
si = np.sqrt(T**2+4*P*imagz[0, 1]*imagz[1, 0]/np.linalg.det(imagz))
par_r = 2*realz[0, 1]/(T-sr)
orth_r = 2*realz[1, 0]/(T+sr)
par_i = 2*imagz[0, 1]/(T-si)
orth_i = 2*imagz[1, 0]/(T+si)
mat2_r = np.matrix([[0, 1./orth_r], [1./par_r, 0]])
mat2_i = np.matrix([[0, 1./orth_i], [1./par_i ,0]])
lo_dis.append(np.dot(realz,mat2_r))
lo_dis.append(np.dot(imagz,mat2_i))
if z_obj.zerr is not None:
#find errors of entries for calculating weights
sigma_sr = np.sqrt((-(2*P*realz[0,1]*realz[1,0]*\
realz[1,1]*errmat[0,0])/\
(np.linalg.det(realz)**2*sr))**2+\
((2*P*realz[0,0]*realz[1,0]*\
realz[1,1]*errmat[0,1])/\
(np.linalg.det(realz)**2*sr))**2+\
((2*P*realz[0,0]* realz[0,1]*\
realz[1,1]*errmat[1,0])/\
(np.linalg.det(realz)**2*sr))**2 +\
(-(2*P*realz[0,1]* realz[1,0]*\
realz[0,0]*errmat[1,1])/\
(np.linalg.det(realz)**2*sr))**2)
sigma_dr_11 = 0.5*sigma_sr
sigma_dr_22 = 0.5*sigma_sr
sigma_dr_12 = np.sqrt((mat2_r[0,1]/realz[0,0]*errmat[0,0])**2+\
(mat2_r[0,1]/realz[1,0]*errmat[1,0])**2+\
(0.5*realz[0,0]/realz[1,0]*sigma_sr)**2)
sigma_dr_21 = np.sqrt((mat2_r[1,0]/realz[1,1]*errmat[1,1])**2+\
(mat2_r[1,0]/realz[0,1]*errmat[0,1])**2+\
(0.5*realz[1,1]/realz[0,1]*sigma_sr)**2)
lo_diserr.append(np.array([[sigma_dr_11, sigma_dr_12],
[sigma_dr_21, sigma_dr_22]]))
sigma_si = np.sqrt((-(2*P*imagz[0,1]*imagz[1,0]*\
imagz[1,1]*errmat[0,0])/\
(np.linalg.det(imagz)**2*sr))**2+\
((2*P*imagz[0,0]*imagz[1,0]*\
imagz[1,1]*errmat[0,1])/\
(np.linalg.det(imagz)**2*sr))**2+\
((2*P*imagz[0,0]*imagz[0,1]*\
imagz[1,1]*errmat[1,0])/\
(np.linalg.det(imagz)**2*sr))**2+\
(-(2*P*imagz[0,1]*imagz[1,0]*\
imagz[0,0]*errmat[1,1])/\
(np.linalg.det(imagz)**2*sr))**2)
sigma_di_11 = 0.5*sigma_si
sigma_di_22 = 0.5*sigma_si
sigma_di_12 = np.sqrt((mat2_i[0,1]/imagz[0,0]*errmat[0,0])**2+\
(mat2_i[0,1]/imagz[1,0]*errmat[1,0])**2+\
(0.5*imagz[0,0]/imagz[1,0]*sigma_si)**2)
sigma_di_21 = np.sqrt((mat2_i[1,0]/imagz[1,1]*errmat[1,1])**2+\
(mat2_i[1,0]/imagz[0,1]*errmat[0,1])**2+\
(0.5*imagz[1,1]/imagz[0,1]*sigma_si)**2)
lo_diserr.append(np.array([[sigma_di_11, sigma_di_12],
[sigma_di_21, sigma_di_22]]))
else:
#otherwise go for evenly weighted average
lo_diserr.append(np.ones((2, 2)))
lo_diserr.append(np.ones((2, 2)))
dis = np.zeros((2, 2))
diserr = np.zeros((2, 2))
for i in range(2):
for j in range(2):
dis[i, j], dummy = np.average(np.array([k[i, j]
for k in lo_dis]),
weights=np.array([1./(k[i,j])**2
for k in lo_diserr]),
returned=True )
diserr[i, j] = np.sqrt(1./dummy)
return dis, diserr
#if only 3D, use identity matrix - no distortion calculated
dis = np.identity(2)
diserr = diserr = np.zeros((2, 2))
return dis, diserr
def find_1d_distortion(z_object, include_non1d = False):
"""
find 1D distortion tensor from z object
ONly use the 1D part of the Z to determine D.
Treat all frequencies as 1D, if "include_non1d = True".
"""
if not isinstance(z_object, MTz.Z):
raise MTex.MTpyError_inputarguments('first argument must be an '
'instance of the Z class')
z_obj = z_object
lo_dims = MTge.dimensionality(z_object=z_obj)
if include_non1d is True:
lo_dims = [1 for i in lo_dims]
if len(list(np.where(np.array(lo_dims) == 1))) == 0:
raise MTex.MTpyError_inputarguments('Z object does not have '
'frequencies with spatial 1D characteristic')
print lo_dims
return find_distortion(z_obj, lo_dims = lo_dims)
def find_2d_distortion(z_object, include_non2d=False):
"""
find 2D distortion tensor from z object
ONly use the 2D part of the Z to determine D.
Treat all frequencies as 2D, if "include_non2d = True".
"""
if not isinstance(z_object, MTz.Z):
raise MTex.MTpyError_inputarguments('first argument must be an '
'instance of the Z class')
z_obj = z_object
lo_dims = MTge.dimensionality(z_object = z_obj)
#avoid the (standard) 1D distortion call -> remove all 1
lo_dims = [ 4 if i == 1 else i for i in lo_dims ]
if include_non2d is True:
lo_dims = [2 for i in lo_dims]
if len(list(np.where(np.array(lo_dims) == 2))) == 0:
raise MTex.MTpyError_inputarguments('Z object does not have'
' frequencies with spatial 2D characteristic')
return find_distortion(z_obj, lo_dims = lo_dims)
def remove_distortion(z_array=None, z_object=None):
if z_array is not None:
z_obj = MTz.Z(z_array=z_array)
elif z_object is not None:
z_obj = z_object
#0. generate a Z object
#1. find distortion via function above,
#2. remove distortion via method of z object
dis, diserr = find_distortion(z_obj)
try:
distortion_tensor, zd, zd_err = z_obj.no_distortion(dis,
distortion_err_tensor=diserr)
distortion_z_obj = z_obj
distortion_z_obj.z = zd
distortion_z_obj.zerr = zd_err
return distortion_tensor, distortion_z_obj
except MTex.MTpyError_Z:
print 'Could not compute distortion tensor'
return np.identity(2), z_obj
|
geophysics/mtpy
|
mtpy/analysis/distortion.py
|
Python
|
gpl-3.0
| 14,307
|
# The loose feed parser that interfaces with an SGML parsing library
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class _LooseFeedParser(object):
contentparams = None
def __init__(self, baseuri=None, baselang=None, encoding=None, entities=None):
self.baseuri = baseuri or ''
self.lang = baselang or None
self.encoding = encoding or 'utf-8' # character encoding
self.entities = entities or {}
super(_LooseFeedParser, self).__init__()
@staticmethod
def _normalize_attributes(kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
v = v.replace('&', '&')
return k, v
def decode_entities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
data = data.replace('/', '/')
data = data.replace('/', '/')
return data
@staticmethod
def strattrs(attrs):
return ''.join(
' %s="%s"' % (n, v.replace('"', '"'))
for n, v in attrs
)
|
rembo10/headphones
|
lib/feedparser/parsers/loose.py
|
Python
|
gpl-3.0
| 3,452
|
# This is an auto-generated file. Do not edit it.
from twisted.python import versions
version = versions.Version('twisted.web2', 8, 2, 0)
|
Donkyhotay/MoonPy
|
twisted/web2/_version.py
|
Python
|
gpl-3.0
| 138
|
#!/usr/bin/env python
# encoding: utf-8
"""
setup_py2exe_Search_BMO_Instances.py
Copyright (C) 2016 Stefan Braun
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from distutils.core import setup
import py2exe
import dms.dmspipe
import Tkinter
import ttk
# some help from
# http://stackoverflow.com/questions/5811960/is-there-a-way-to-specify-the-build-directory-for-py2exe
# http://www.py2exe.org/index.cgi/ListOfOptions
options = {'py2exe': {
'dist_dir': r'..\py2exe_output\Search_BMO_Instances'
}}
setup(console=[r'tools\Search_BMO_Instances.py'], options=options)
|
stefanbraun-private/pyVisiToolkit
|
py2exe_scripts/setup_py2exe_Search_BMO_Instances.py
|
Python
|
gpl-3.0
| 1,166
|
#!/usr/bin/python3
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""CherryMusic database definition, versioning and control.
To support schema changes that are not backward-compatible, databases can be
versioned.
"""
import logging as log
from cherrymusicserver.db import defs
from cherrymusicserver.db import sql
def require(dbname, version):
"""Make sure the database exists and has the given version."""
if not dbname:
raise ValueError('dbname must not be empty or None')
isversion = MultiUpdater.checkversion(dbname)
assert isversion == version, '{0!r}: bad version: {1!r} (want: {2!r})'.format(dbname, isversion, version)
def ensure_requirements(dbname=None, autoconsent=False, consent_callback=None):
'''Make sure all defined databases exist and are up to date.
Will connect to all these databases and try to update them, if
necessary, possibly asking the user for consent.
dbname : str
When given, only make sure of the database with that name.
autoconsent : bool
When ``True``, don't ask for consent, ever.
consent_callback: callable
Called when an update requires user consent; if the return value
does not evaluate to ``True``, abort don't run any updates and
return ``False``. If no callback is given or autoconsent == True,
the value of autoconsent will be used to decide if the update
should run.
Returns : bool
``True`` if requirements are met.
'''
if autoconsent or consent_callback is None:
consent_callback = lambda: autoconsent
assert callable(consent_callback), (type(consent_callback))
update = _create_updater(dbname)
if update.needed:
log.info('database definition out of date')
if update.requires_consent and not consent_callback():
return False
update.run()
log.info('database definition updated')
return True
def resetdb(dbname):
'''Delete all content and defined data structures from a database.
Raises:
ValueError : If dbname is ``None`` or empty, or not a defined database name.
'''
if not dbname:
raise ValueError('dbname must not be empty or None')
updater = _create_updater(dbname)
updater.reset()
def _create_updater(*dbnames):
return MultiUpdater(tuple(n for n in dbnames if n is not None))
class MultiUpdater(object):
'''Manage the state of multiple databases at once.
defs : dict
Definitions of all databases to manage.
connector : :class:`.connect.AbstractConnector`
For connecting to the databases.
'''
def __init__(self, dbnames=()):
dbdefs = defs.getall() if not dbnames else dict((n, defs.get(n)) for n in dbnames)
self.updaters = tuple(sql.Updater(k, dbdefs[k]) for k in dbdefs)
def __iter__(self):
return iter(self.updaters)
@property
def needed(self):
"""``True`` if any database needs updating.
See :meth:`.sql.Updater.needed`.
"""
for u in self:
if u.needed:
return True
return False
@property
def requires_consent(self):
"""``True`` if any database update needs user consent.
See :meth:`.sql.Updater.requires_consent`.
"""
for u in self:
if u.requires_consent:
return True
return False
def run(self):
"""Update all databases with out of date versions.
See :meth:`.sql.Updater.run`.
"""
for u in self:
if u.needed:
u.run()
def reset(self):
"""Delete content and data structures of all included databases.
See :meth:`.sql.Updater.reset`.
"""
for u in self:
u.reset()
@classmethod
def checkversion(self, dbname):
"""Return the effective version of a database."""
return sql.Updater(dbname, defs.get(dbname))._version
|
cherrymusic-meta/cherrymusic-experimental
|
cherrymusicserver/db/__init__.py
|
Python
|
gpl-3.0
| 4,959
|
"""
Cross Origin Resource Sharing (CORS) headers must be present when using multiple backends.
This middleware automatically returns OPTIONS requests, and appends other requests with the correct headers.
"""
import asyncio
from aiohttp import hdrs, web, web_exceptions
from brewblox_service import brewblox_logger, strex
LOGGER = brewblox_logger(__name__)
def set_cors_headers(request, response):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] =\
request.headers.get('Access-Control-Request-Method', ','.join(hdrs.METH_ALL))
response.headers['Access-Control-Allow-Headers'] =\
request.headers.get('Access-Control-Request-Headers', '*')
response.headers['Access-Control-Allow-Credentials'] = 'true'
return response
@web.middleware
async def cors_middleware(request: web.Request, handler) -> web.Response:
# preflight requests
if request.method == 'OPTIONS':
return set_cors_headers(request, web.Response())
else:
try:
response = await handler(request)
except asyncio.CancelledError:
raise
except web_exceptions.HTTPError as ex:
response = ex
except Exception as ex:
response = web_exceptions.HTTPInternalServerError(reason=strex(ex))
return set_cors_headers(request, response)
|
glibersat/brewpi-service
|
brewblox_service/cors.py
|
Python
|
gpl-3.0
| 1,385
|
from file_finder import find_a_file
def is_module_installed(mod_name, should_exit=False):
try:
__import__(mod_name)
print "Python module %s is already installed, huzzah!"% mod_name
except ImportError:
print "Darn it! Python was unable to import the module '%s'; please make sure it is installed correctly." % mod_name
if should_exit:
exit(0)
user_response=raw_input('Is X3DNA v2.1 installed on your computer? (y/n)')
if (user_response.upper()=='Y' or user_response.upper()=='YES' or user_response==''):
print "Setup will now attempt to find where X3DNA is installed."
print "Woo hoo! Found X3DNA file '%s' in %s" % ('find_pair',find_a_file("find_pair"))
else:
print 'Please download and install X3DNA (http://rutchem.rutgers.edu/~xiangjun/3DNA/) as well as the optional file "x3dna-dssr".'
exit(0)
user_response=raw_input("\nIs SARA v1.0.7 installed on this computer? (y/n)")
if (user_response.upper()=='Y' or user_response.upper()=='YES' or user_response==''):
print "Setup will now attempt to find where SARA is installed."
print "Awesome! Found SARA file '%s' in %s" % ('runsara.py',find_a_file("runsara.py"))
else:
print 'Please install SARA (http://structure.biofold.org/sara/download.html)'
exit(0)
print "\n\nChecking for required Python modules....\n"
is_module_installed("Bio")
is_module_installed("Tkinter")
is_module_installed("tkFileDialog")
is_module_installed("tkMessageBox")
is_module_installed("Tkinter")
is_module_installed("math")
is_module_installed("urllib")
is_module_installed("time")
is_module_installed("pylab")
is_module_installed("numpy")
print "\n\nCongratulations! You can now use RNA_TwiZe!\n\n"
exit(0)
|
willblev/RNA_TwiZe
|
check_requirements.py
|
Python
|
gpl-3.0
| 1,668
|
# Text file dumper
from subprocess import call
import re
address = raw_input("URL: ")
# Grabs the raw html of the page
def get_html(address):
call(["wget", address, "-O", "toread.txt"])
with open("toread.txt") as source:
html = source.read()
return html
# Searches for all assosiated text files and links to "go deeper"
def link_search(html):
endings = re.findall(r"HREF=\"(.+)\"", html)
endings = [ending for ending in endings if not (ending.endswith(".html") or ending.endswith(".com"))]
return endings
def dump(address, endings):
with open("wget_list.txt", "w+") as urllist:
links = [address + end for end in endings]
for link in links:
urllist.write(link + "\n")
call(["wget", "-i", "wget_list.txt"])
initalhtml = get_html(address)
initallinks = link_search(initalhtml)
for link in initallinks:
deephtml = get_html("http://www.textfiles.com/"+link)
deep_endings = link_search(deephtml)
dump("http://www.textfiles.com/" + link + "/", deep_endings)
print link
|
palkiakerr/Misc
|
python/TextGrabber/textgrabber.py
|
Python
|
gpl-3.0
| 1,053
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'checkin_complete_form.ui'
#
# Created: Thu Jul 29 19:23:25 2010
# by: PyQt4 UI code generator 4.7.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Checkin_Success_Form(object):
def setupUi(self, Checkin_Success_Form):
Checkin_Success_Form.setObjectName("Checkin_Success_Form")
Checkin_Success_Form.resize(357, 210)
Checkin_Success_Form.setLayoutDirection(QtCore.Qt.LeftToRight)
self.gridLayout = QtGui.QGridLayout(Checkin_Success_Form)
self.gridLayout.setObjectName("gridLayout")
self.groupBox = QtGui.QGroupBox(Checkin_Success_Form)
self.groupBox.setObjectName("groupBox")
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName("gridLayout_2")
self.frame_2 = QtGui.QFrame(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(8)
sizePolicy.setVerticalStretch(12)
sizePolicy.setHeightForWidth(self.frame_2.sizePolicy().hasHeightForWidth())
self.frame_2.setSizePolicy(sizePolicy)
self.frame_2.setObjectName("frame_2")
self.formLayout = QtGui.QFormLayout(self.frame_2)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.FieldsStayAtSizeHint)
self.formLayout.setObjectName("formLayout")
self.label_2 = QtGui.QLabel(self.frame_2)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2)
self.venue_name_label = QtGui.QLabel(self.frame_2)
self.venue_name_label.setObjectName("venue_name_label")
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.venue_name_label)
self.label_4 = QtGui.QLabel(self.frame_2)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_4)
self.venue_address_label = QtGui.QLabel(self.frame_2)
self.venue_address_label.setObjectName("venue_address_label")
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.venue_address_label)
self.label_6 = QtGui.QLabel(self.frame_2)
self.label_6.setObjectName("label_6")
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_6)
self.checkin_date_label = QtGui.QLabel(self.frame_2)
self.checkin_date_label.setObjectName("checkin_date_label")
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.checkin_date_label)
self.venue_icon_label = QtGui.QLabel(self.frame_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(9)
sizePolicy.setHeightForWidth(self.venue_icon_label.sizePolicy().hasHeightForWidth())
self.venue_icon_label.setSizePolicy(sizePolicy)
self.venue_icon_label.setObjectName("venue_icon_label")
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.venue_icon_label)
self.gridLayout_2.addWidget(self.frame_2, 0, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 1)
self.ok_button = QtGui.QPushButton(Checkin_Success_Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.ok_button.sizePolicy().hasHeightForWidth())
self.ok_button.setSizePolicy(sizePolicy)
self.ok_button.setLayoutDirection(QtCore.Qt.RightToLeft)
self.ok_button.setObjectName("ok_button")
self.gridLayout.addWidget(self.ok_button, 1, 0, 1, 1)
self.retranslateUi(Checkin_Success_Form)
QtCore.QMetaObject.connectSlotsByName(Checkin_Success_Form)
def retranslateUi(self, Checkin_Success_Form):
Checkin_Success_Form.setWindowTitle(QtGui.QApplication.translate("Checkin_Success_Form", "check-in complete!", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("Checkin_Success_Form", "Success! You have been checked into:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Checkin_Success_Form", "Venue:", None, QtGui.QApplication.UnicodeUTF8))
self.venue_name_label.setText(QtGui.QApplication.translate("Checkin_Success_Form", "venue_name", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("Checkin_Success_Form", "Address:", None, QtGui.QApplication.UnicodeUTF8))
self.venue_address_label.setText(QtGui.QApplication.translate("Checkin_Success_Form", "venue-addr", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("Checkin_Success_Form", "date/time:", None, QtGui.QApplication.UnicodeUTF8))
self.checkin_date_label.setText(QtGui.QApplication.translate("Checkin_Success_Form", "date_time", None, QtGui.QApplication.UnicodeUTF8))
self.venue_icon_label.setText(QtGui.QApplication.translate("Checkin_Success_Form", "ImageLabel", None, QtGui.QApplication.UnicodeUTF8))
self.ok_button.setText(QtGui.QApplication.translate("Checkin_Success_Form", "OK", None, QtGui.QApplication.UnicodeUTF8))
|
nzaillian/Cheatsquare
|
checkin_complete_form.py
|
Python
|
gpl-3.0
| 5,493
|
'''
No doubt that fixed coordinates are the most flexible way of organizing elements in
an n-dimensional space; however, it is very time-consuming. Instead, Kivy provides a
good set of layouts instead, which facilitate the work of organizing widgets. A Layout
is a Widget subclass that implements different strategies to organize embedded
widgets. For example, one strategy could be organizing widgets in a grid ( GridLayout ).
Let's start with a simple FloatLayout example. It works very similar to the way
we organize widgets directly inside another Widget , except that now we can use
proportional coordinates (proportions of the total size of the window) rather than
fixed coordinates (exact pixels). This means that we don't need the calculations we
did in the previous section with self and root . The following is the Python code:
FloatLayout
This layout organizes the widgets with proportional coordinates with
the size_hint and pos_hint properties. The values are numbers
between 0 and 1 indicating a proportion to the window size.
Relative Layout:
This layout operates in the same way as FloatLayout does, but the
positioning properties (pos, x, center_x, right, y, center_y, and
top) are relative to the Layout size and not the window size.
GridLayout:
This layout organizes widgets in a grid. You have to specify at least one
of the two properties: cols (for columns) or rows (for rows).
BoxLayout:
This layout organizes widgets in one row or one column depending
whether the value of property orientation is horizontal or vertical.
StackLayout:
This layout is similar to BoxLayout but it goes to the next row or column
when it runs out of space. In this layout, there is more flexibility to set
the orientation. For example, 'rl-bt' organizes the widgets in right-
to-left and bottom-to-top order. Any combination of lr (left to right), rl
(right to left), tb (top to bottom), and bt (bottom to top) is allowed.
Anchor Layout:
This layout organizes the widgets to a border or to the center. The
anchor_x property indicates the x position (left, center or right),
whereas anchor_y indicates the y position (top, center or bottom)
ScatterLayout:
works similar to RelativeLayout
but it allows multitouch gesturing for rotating, scaling, and translating. It is
slightly different in its implementation so we will review it later. The Kivy API
( http://kivy.org/docs/api-kivy.html ) offers a detailed explanation and good
examples on each of them.
If we are using a Layout instance, can we force the use of fixed values? Yes, but
there can be conflicts if we are not careful with the properties we use. If we use any
Layout , then pos_hint and size_hint will have the priority.
If we want to use fixed positioning properties ( pos , x , center_x , right , y , center_y , and top ), we have to
ensure that we are not using the pos_hint property.
Secondly, if we want to use the size , height , or width properties, we need to give a None value to the size_hint
axis we want to use with the absolute values.
For example, size_hint: (None,.10) allows using the height property, but it keeps the width as 10 percent of the
windows size.
'''
from kivy.app import App
from kivy.uix.anchorlayout import AnchorLayout
class EventApp(App):
pass
class EventForm(AnchorLayout):
def Print(self):
print("event")
if __name__=="__main__":
EventApp().run()
|
pd-Shah/kivy
|
6-event/event.py
|
Python
|
gpl-3.0
| 3,400
|
class CityModel:
def __init__(self, world):
self.world = world
self.cities = {}
def add_hex_to_city(self, hid, cid):
if self.world.diplomacy.is_neutral(hid):
self.cities[cid] = hid
return True
else:
return False
|
connor-cash/nesonomics
|
NESCore/CityModel.py
|
Python
|
gpl-3.0
| 290
|
#!/bin/env python
""" Benchmarking for the index external service running on couchdb """
import os, sys, httplib, urllib, socket
import random
import couchdb.client
from db import couchdb_server, port
import time
rand = random.Random()
def runTime(doi):
db = couchdb_server['documents']
eqnID, searchTerm = rand.choice(db[doi]['source'].items())
try:
url = "http://localhost:%s/documents/_external/index?searchTerm=\"%s\"&searchTimeout=60&limit=10000" % (port, urllib.quote(searchTerm))
startTime = time.time()
resultsFile = urllib.urlopen(url)
endTime = time.time()
print endTime-startTime
except KeyboardInterrupt, e:
raise e
except Exception, e:
pass
def runTimes(n):
db = couchdb_server['documents']
dois = list(db)
for i in xrange(0,n):
doi = None
source = None
while not source:
try:
doi = rand.choice(dois)
source = db[doi]['source']
except socket.error:
pass # Connection refused, probably because someone restarted the server
runTime(doi)
sys.stdout.flush()
import getopt
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], "", ["n="])
for opt, arg in opts:
if opt == "--n":
runTimes(int(arg))
print "Ok"
|
jamii/texsearch
|
scripts/times.py
|
Python
|
gpl-3.0
| 1,254
|
#! /usr/bin/env python
#
# spigot is a rate limiter for aggregating syndicated content to pump.io
#
# (c) 2011-2015 by Nathan D. Smith <nathan@smithfam.info>
# (c) 2014 Craig Maloney <craig@decafbad.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
# Standard library imports
import argparse
from datetime import datetime, timedelta
try:
import json
except ImportError:
import simplejson as json
import logging
import os
import re
import sqlite3
import sys
from time import mktime
# 3rd-party modules
import feedparser
from pypump import PyPump
from pypump import Client
SPIGOT_VERSION = "2.3.0"
def simple_verifier(url):
print 'Please follow the instructions at the following URL:'
print url
return raw_input("Verifier: ")
class SpigotConfig(dict):
"""Extends the built-in dict type to provide a configuration interface for
Spigot, keeping track of feeds polled and accounts configured for posting.
"""
def __init__(self, path="spigot.json"):
self.config_file = path
self.no_config = True
if os.path.exists(self.config_file):
self.no_config = False
def check_old_config(self):
"""Check existing configuration for pre-2.2 format and return True
if the config needs to be updated."""
formats = [self["feeds"][feed]["format"] for feed in self["feeds"]]
for format in formats:
if (("$t" in format) or ("$l" in format)):
logging.debug("Existing config reflects pre-2.2 format")
return True
else:
logging.debug("Existing config reflects post-2.2 format")
return False
def load(self):
"""Load the spigot json config file from the working directory
and import it into the SpigotConfig dict object."""
logging.debug("Loading %s" % self.config_file)
# Start with a clean configuration object
self.clear()
try:
self.update(json.loads(open(self.config_file, "r").read()))
except IOError:
logging.warning("Could not load configuration file")
def save(self):
"Convert the state of the SpigotConfig dict to json and save."
logging.debug("Saving %s" % self.config_file)
try:
open(self.config_file, "w").write(json.dumps(self, indent=4))
return True
except IOError:
logging.exception("Could not save configuration file")
sys.exit(2)
def add_feed(self):
"Add a feed, account, interval, and format to the configuration."
# TODO Add feature to specify to and cc for each feed
self.load()
account = None
interval = None
form = None
print "Adding feed..."
url = raw_input("Feed URL: ")
# Test feed for presence, validity
test_feed = None
try:
test_feed = feedparser.parse(url)
logging.debug("Successfully parsed feed %s" % url)
except:
logging.warning("Could not parse feed %s" % url)
account = raw_input("Account Webfinger ID (e.g. bob@example.com): ")
# Verify that the account is valid
# PyPump will authorize the account if necessary
client = Client(
webfinger=account,
name="Spigot",
type="native")
try:
pump = PyPump(client, verifier_callback=simple_verifier)
print pump.me
except:
logging.exception("Could not verify account")
sys.exit(2)
# Obtain the posting interval
valid_interval = False
while not valid_interval:
try:
raw_inter = raw_input("Minimum time between posts (minutes): ")
interval = int(raw_inter)
valid_interval = True
except:
print "Invalid interval specified."
print """Spigot formats your outgoing posts based on fields in the feed
being scanned. Specify the field name surrounded by the '%'
character to have it replaced with the corresponding value for
the item (e.g. %title% or %link%)."""
if test_feed:
print """The following fields are present in an example item in
this feed:"""
for field in test_feed["items"][0].keys():
print field
print """Next you will be prompted for the message format and
optional title format for outgoing posts."""
form = raw_input("Message format: ")
title = raw_input("Title format (optional): ")
# Put it all together
feed = {}
feed["account"] = account
feed["interval"] = interval
feed["format"] = form
feed["title"] = title
if "feeds" in self:
self["feeds"][url] = feed
else:
feeds = {}
feeds[url] = feed
self["feeds"] = feeds
self.save()
def get_feeds(self):
"""Sets instance variable 'feeds' of feeds to check for new posts.
Formatted in a tuple in the form of (url, account, interval, format)
"""
feeds = self["feeds"]
feeds_to_poll = []
feeds_num = len(feeds)
logging.debug("Found %d feeds in configuration" % feeds_num)
for url in feeds.keys():
logging.debug("Processing feed %s" % url)
account = feeds[url]["account"]
logging.debug(" Account: %s" % account)
interval = feeds[url]["interval"]
logging.debug(" Interval: %s min" % interval)
form = feeds[url]["format"]
logging.debug(" Format: %s" % form)
feeds_to_poll.append((url, account, interval, form))
logging.debug(" Added to list of feeds to poll")
return feeds_to_poll
class SpigotDB():
"""Handle database calls for Spigot."""
def __init__(self, path="spigot.db"):
self.path = path
self._connect()
def _connect(self):
"""Establish the database connection for this instantiation."""
# Check first for a database file
new_db = False
if not os.path.exists(self.path):
new_db = True
logging.debug("Database file %s does not exist" % self.path)
det_types = sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
try:
self._db = sqlite3.connect(self.path, detect_types=det_types)
except:
logging.exception("Could not connect to database %s" % self.path)
sys.exit(2)
if new_db:
self._init_db_tables()
def _init_db_tables(self):
"""Initialize the database if it is new"""
curs = self._db.cursor()
# Figure out db tables based on tricklepost
create_query = """create table items (feed text, link text,
message text, title text, date timestamp,
posted timestamp)"""
curs.execute(create_query)
self._db.commit()
logging.debug("Initialized database tables")
curs.close()
def check_old_db(self):
"""Inspect schema of existing sqlite3 database and return True if
the database needs to be upgraded to the post 2.2 schema."""
curs = self._db.cursor()
curs.execute("PRAGMA table_info(items);")
cols = curs.fetchall()
curs.close()
if "message" not in [col[1] for col in cols]:
logging.debug("Existing database lacks 'message' field")
return True
elif "title" not in [col[1] for col in cols]:
logging.debug("Existing database lacks 'title' field")
return True
else:
logging.debug("Existing database is up-to-date")
return False
def close(self):
"""Cleanup after the db is no longer needed."""
self._db.close()
logging.debug("Closed connection to database")
def check_link(self, item_link):
"""Returns true if the specified link is already in the database."""
curs = self._db.cursor()
curs.execute("select * from items where link=?", [item_link])
items = curs.fetchall()
if len(items) > 0:
return True
else:
return False
curs.close()
def add_item(self, feed_url, link, message, title, date):
"""Add an item to the database with the given parameters. Return True
if successful."""
curs = self._db.cursor()
curs.execute("insert into items(feed, link, message, title, date) \
values (?, ?, ?, ?, ?)", (feed_url, link, message, title, date))
logging.debug(" Added item %s to database" % link)
curs.close()
self._db.commit()
return True
def get_unposted_items(self, feed):
"Return a list of items in the database which have yet to be posted."
curs = self._db.cursor()
curs.execute("SELECT feed, link, message, title FROM items \
where (posted is NULL AND feed=?) \
ORDER BY date ASC", [feed])
unposted_items = curs.fetchall()
num_items = len(unposted_items)
logging.debug(" Found %d unposted items in %s" % (num_items, feed))
curs.close()
return unposted_items
def mark_posted(self, item_link, date=None):
"""Mark the given item posted by setting its posted datetime to now."""
if not date:
date = datetime.utcnow()
curs = self._db.cursor()
curs.execute("UPDATE items SET posted=? WHERE link=?",
(date, item_link))
logging.debug(" Updated posted time of item %s in database"
% item_link)
curs.close()
self._db.commit()
def get_latest_post(self, feed):
"""Return the datetime of the most recent item posted by spigot of the
specified feed. If none have been posted, return None"""
curs = self._db.cursor()
curs.execute("SELECT posted FROM items WHERE \
(feed=? AND posted is not NULL) ORDER BY posted DESC LIMIT 1",
[feed])
result = curs.fetchone()
curs.close()
if result:
logging.debug(" Latest post for feed %s is %s" % (feed,
result[0]))
return result[0]
else:
logging.debug(" No items from feed %s have been posted" % feed)
return None
class SpigotFeeds():
"""
Handle the polling the specified feeds for new posts. Add new posts to
database in preparation for posting to the specified Pump.io accounts.
"""
def __init__(self, db, config):
self._spigotdb = db
self._config = config
def format_element(self, feed, entry, element):
"""Returns an outgoing message for the given entry based on the given
feed's configured format."""
message = self._config["feeds"][feed][element]
# Store a list of tuples containing format string and value
replaces = []
field_re = re.compile("%\w+%")
fields = field_re.findall(message)
for raw_field in fields:
# Trim the % character from format
field = raw_field[1:-1]
if field in entry:
# Make a special exception for the content element, which in
# ATOM can appear multiple times per entry. Assume the element
# with index=0 is the desired value.
if field == "content":
logging.debug(" 'content' field in formatting string")
value = entry.content[0].value
else:
value = entry[field]
else:
value = ""
replaces.append((raw_field, value))
# Fill in the message format with actual values
for string, val in replaces:
message = message.replace(string, val)
return message
def poll_feeds(self):
"""Check the configured feeds for new posts."""
feeds_to_poll = self._config.get_feeds()
for url, account, interval, form in feeds_to_poll:
self.scan_feed(url)
def scan_feed(self, url):
"""Poll the given feed and then update the database with new info"""
logging.debug("Polling feed %s for new items" % url)
# Allow for parsing of this feed to fail without raising an exception
try:
p = feedparser.parse(url)
except:
logging.error("Unable to parse feed %s" % url)
return None
# Get a list of items for the feed and compare it to the database
num_items = len(p.entries)
logging.debug("Found %d items in feed %s" % (num_items, url))
new_items = 0
for i in range(len(p.entries)):
logging.debug(" Processing item %d" % i)
title = p.entries[i].title
logging.debug(" Title: %s" % title)
link = p.entries[i].link
logging.debug(" Link: %s" % link)
# Check for existence of published_parsed, fail back to updated
if 'published_parsed' in p.entries[i]:
date = p.entries[i].published_parsed
else:
date = p.entries[i].updated_parsed
date_struct = datetime.fromtimestamp(mktime(date))
logging.debug(" Date: %s" % datetime.isoformat(date_struct))
# Craft the message based feed format string
message = self.format_element(url, p.entries[i], "format")
logging.debug(" Message: %s" % message)
note_title = self.format_element(url, p.entries[i], "title")
logging.debug(" Note Title: %s" % note_title)
# Check to see if item has already entered the database
if not self._spigotdb.check_link(link):
logging.debug(" Not in database")
self._spigotdb.add_item(url, link, message, note_title,
date_struct)
new_items += 1
else:
logging.debug(" Already in database")
logging.debug("Found %d new items in feed %s" % (new_items, url))
def feed_ok_to_post(self, feed):
"""Return True if the given feed is OK to post given its configured
interval."""
interval = int(self._config["feeds"][feed]["interval"])
delta = timedelta(minutes=interval)
posted = self._spigotdb.get_latest_post(feed)
if posted:
next = posted + delta
now = datetime.utcnow()
if now >= next:
# post it
logging.debug(" Feed %s is ready for a new post" % feed)
return True
else:
logging.debug(" Feed %s has been posted too recently" % feed)
logging.debug(" Next post at %s" % next.isoformat())
return False
else:
# Nothing has been posted for this feed, so it is OK to post
logging.debug(" Feed %s is ready for a new post" % feed)
return True
class SpigotPost():
"""Handle the posting of syndicated content stored in the SpigotDB to the
pump.io account.
"""
def __init__(self, db, spigot_config, spigot_feed):
self._spigotdb = db
self._config = spigot_config
self._spigotfeed = spigot_feed
def post_items(self):
"""Handle the posting of unposted items.
Iterate over each pollable feed and check to see if it is permissible
to post new items based on interval configuration. Loop while it is OK,
and terminate the loop when it becomes not OK. Presumably one or none
will be posted each time this method runs."""
for feed, account, interval, form in self._config.get_feeds():
logging.debug("Finding eligible posts in feed %s" % feed)
unposted_items = self._spigotdb.get_unposted_items(feed)
# Initialize Pump.IO connection here
client = Client(
webfinger=account,
type="native",
name="Spigot")
pump = PyPump(
client=client,
verifier_callback=simple_verifier)
while self._spigotfeed.feed_ok_to_post(feed):
try:
item = unposted_items.pop(0)
except:
# Escape the loop if there are no new posts waiting
break
feed = item[0]
link = item[1]
message = item[2]
# Optional title
title = None
if item[3]:
title = item[3]
try:
logging.info(" Posting item %s from %s to account %s"
% (link, feed, account))
new_note = pump.Note(message, title)
new_note.to = pump.Public
new_note.send()
self._spigotdb.mark_posted(link)
except:
logging.exception(" Unable to post item")
if __name__ == "__main__":
spigot_config = SpigotConfig()
parser = argparse.ArgumentParser()
parser.add_argument("--version", "-v", action="store_true")
parser.add_argument("--add-feed", "-f", action="store_true")
log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
parser.add_argument("--log-level", "-l", choices=log_levels,
default="WARNING")
args = parser.parse_args()
# Logging configuration
logging.basicConfig(level=args.log_level,
format='%(asctime)s %(levelname)s: %(message)s')
logging.debug("spigot startup")
# No configuration present, doing welcom wagon
if args.version:
print "Spigot %s" % SPIGOT_VERSION
sys.exit(0)
if spigot_config.no_config:
print "No configuration file now, running welcome wizard."
spigot_config.add_feed()
sys.exit(0)
if args.add_feed:
spigot_config.add_feed()
sys.exit(0)
# Normal operation
spigot_config.load()
# Check for pre-2.2 formatted spigot configuration file
if not spigot_config.no_config:
old_config = spigot_config.check_old_config()
if old_config:
logging.error("Config not upgraded for Spigot 2.2")
logging.error("Please upgrade the config using the \
utils/convert.py script found in the source repository.")
sys.exit(2)
spigot_db = SpigotDB()
# Test for pre-2.2 database structure
if spigot_db.check_old_db():
logging.error("Existing database not upgraded for the latest spigot")
logging.error("Please upgrade the database using the \
utils/convert.py script found in the source repository.")
sys.exit(2)
spigot_feed = SpigotFeeds(spigot_db, spigot_config)
spigot_feed.poll_feeds()
spigot_post = SpigotPost(spigot_db, spigot_config, spigot_feed)
spigot_post.post_items()
|
nathans/spigot
|
spigot.py
|
Python
|
gpl-3.0
| 19,916
|
import numpy as np
from pele.potentials import BasePotential
class WhamPotential(BasePotential):
"""
the idea behind this minimization procedure is as follows
from a simulation at temperature T you find the probability of finding energy
E is P(E,T). We know this can be compared to the density of states n(E) as
P(E,T) = n(E) exp(-E/T_i) / w_i
Where w_i is a constant that is not known. The density of
states is independent of temperature, so we can use it to find
P(E) at any other temperature, or Z(T), etc. But our estimate of n(E) from
one temperature is not very good. So we combine P(E,T) multiple simulations
at different temperatures to get a better estimate of n(E).
Define R the log deviation for each bin from the estimate of the density of states
R(E,T_i) = log(n_F(E)) - log(w_i) - log( P(E,T_i) * exp(E/T_i) )
we want to make each R(E,T_i) as small as possible. Define an "energy" function
CHI2 = sum_E sum_i P(E,T_i) * |R(E,T_i)|^2
Where each R(E,T_i) contributes weight proportional to P(E,T_i) to the sum to
make sure those with better statistics are more heavily weighted. To solve
the problem we find the set of {n_F(E), w_i} which minimize CHI2
"""
def __init__(self, P, reduced_energy):
"""
To make it fit within existing minimization schemes, we need to view it as a linear problem
nrep: the number of replica variables, i.e. len(w_i)
nbins: the number of bins in the histogram, e.g. len(n_F(E))
P: = P(E,T_i) a.k.a. log(visits) a 2d array of shape( nreps, nbins).
reduced_energy: E/T_i a 2d array of shape( nreps, nbins) giving the
reduced energy of each bin
note: this works perfectly well for 2d histograms as well. In this case the 2d
histograms should be linearized
"""
self.nreps, self.nbins = P.shape
assert P.shape == reduced_energy.shape
self.P = P
if np.any(self.P < 0):
raise ValueError("P has negative values")
SMALL = 0. # this number is irrelevant, as long as it's not NaN
self.log_n_rE = np.where(self.P==0, SMALL,
np.log(self.P) + reduced_energy)
def getEnergy(self, X):
"""
X: is the array of unknowns of length nrep + nbins
X[0:nrep] = {w_i} : the replica unknowns
X[nrep:] = {log(n_F(E))} : the bin unknowns
R(E,T_i) = log(n_F(E)) - log(w_i) - log( P(E,T_i)*exp(E/T_i) )
energy = sum_E sum_i P(E,T_i)*|R(E,T_i)|^2
"""
wi = X[:self.nreps]
lognF = X[self.nreps:]
energy = np.sum( self.P * (lognF[np.newaxis,:] - wi[:,np.newaxis] - self.log_n_rE)**2 )
return energy
def getEnergyGradient(self, X):
"""
X: is the array of unknowns of length nrep + nbins
X[0:nrep] = {w_i} : the replica unknowns
X[nrep:] = {log(n_F(E))} : the bin unknowns
R(E,T_i) = log(n_F(E)) - log(w_i) - log( P(E,T_i)*exp(E/T_i) )
energy = sum_E sum_i P(E,T_i)*|R(E,T_i)|^2
"""
wi = X[:self.nreps]
lognF = X[self.nreps:]
R = lognF[np.newaxis,:] - wi[:,np.newaxis] - self.log_n_rE
energy = np.sum( self.P * (R)**2 )
gradient = np.zeros(len(X))
gradient[:self.nreps] = -2. * np.sum( self.P * R, axis=1 )
gradient[self.nreps:] = 2. * np.sum( self.P * R, axis=0 )
#print np.shape(gradient)
#print gradient
return energy, gradient
|
marktoakley/LamarckiAnt
|
SCRIPTS/python/ptmc/histogram_reweighting/wham_potential.py
|
Python
|
gpl-3.0
| 3,705
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import operator
import sickbeard
from sickbeard import db
from sickbeard import exceptions
from sickbeard.exceptions import ex
from sickbeard import helpers, logger, show_name_helpers
from sickbeard import providers
from sickbeard import search
from sickbeard import history
from sickbeard.common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, Quality
from lib.tvdb_api import tvdb_api, tvdb_exceptions
from name_parser.parser import NameParser, InvalidNameException
class ProperFinder():
def __init__(self):
self.updateInterval = datetime.timedelta(hours=1)
def run(self):
if not sickbeard.DOWNLOAD_PROPERS:
return
# look for propers every night at 1 AM
updateTime = datetime.time(hour=1)
logger.log(u"Checking proper time", logger.DEBUG)
hourDiff = datetime.datetime.today().time().hour - updateTime.hour
# if it's less than an interval after the update time then do an update
if hourDiff >= 0 and hourDiff < self.updateInterval.seconds / 3600:
logger.log(u"Beginning the search for new propers")
else:
return
propers = self._getProperList()
self._downloadPropers(propers)
def _getProperList(self):
propers = {}
# for each provider get a list of the propers
for curProvider in providers.sortedProviderList():
if not curProvider.isActive():
continue
search_date = datetime.datetime.today() - datetime.timedelta(days=2)
logger.log(u"Searching for any new PROPER releases from " + curProvider.name)
try:
curPropers = curProvider.findPropers(search_date)
except exceptions.AuthException, e:
logger.log(u"Authentication error: " + ex(e), logger.ERROR)
continue
# if they haven't been added by a different provider than add the proper to the list
for x in curPropers:
name = self._genericName(x.name)
if not name in propers:
logger.log(u"Found new proper: " + x.name, logger.DEBUG)
x.provider = curProvider
propers[name] = x
# take the list of unique propers and get it sorted by
sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True)
finalPropers = []
for curProper in sortedPropers:
# parse the file name
try:
myParser = NameParser(False)
parse_result = myParser.parse(curProper.name, True)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + curProper.name + " into a valid episode", logger.DEBUG)
continue
if not parse_result.episode_numbers:
logger.log(u"Ignoring " + curProper.name + " because it's for a full season rather than specific episode", logger.DEBUG)
continue
# populate our Proper instance
if parse_result.air_by_date:
curProper.season = -1
curProper.episode = parse_result.air_date
else:
curProper.season = parse_result.season_number if parse_result.season_number != None else 1
curProper.episode = parse_result.episode_numbers[0]
curProper.quality = Quality.nameQuality(curProper.name)
# for each show in our list
for curShow in sickbeard.showList:
if not parse_result.series_name:
continue
genericName = self._genericName(parse_result.series_name)
# get the scene name masks
sceneNames = set(show_name_helpers.makeSceneShowSearchStrings(curShow))
# for each scene name mask
for curSceneName in sceneNames:
# if it matches
if genericName == self._genericName(curSceneName):
logger.log(u"Successful match! Result " + parse_result.series_name + " matched to show " + curShow.name, logger.DEBUG)
# set the tvdbid in the db to the show's tvdbid
curProper.tvdbid = curShow.tvdbid
# since we found it, break out
break
# if we found something in the inner for loop break out of this one
if curProper.tvdbid != -1:
break
if curProper.tvdbid == -1:
continue
if not show_name_helpers.filterBadReleases(curProper.name):
logger.log(u"Proper " + curProper.name + " isn't a valid scene release that we want, igoring it", logger.DEBUG)
continue
# if we have an air-by-date show then get the real season/episode numbers
if curProper.season == -1 and curProper.tvdbid:
showObj = helpers.findCertainShow(sickbeard.showList, curProper.tvdbid)
if not showObj:
logger.log(u"This should never have happened, post a bug about this!", logger.ERROR)
raise Exception("BAD STUFF HAPPENED")
tvdb_lang = showObj.lang
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
try:
t = tvdb_api.Tvdb(**ltvdb_api_parms)
epObj = t[curProper.tvdbid].airedOn(curProper.episode)[0]
curProper.season = int(epObj["seasonnumber"])
curProper.episodes = [int(epObj["episodenumber"])]
except tvdb_exceptions.tvdb_episodenotfound:
logger.log(u"Unable to find episode with date " + str(curProper.episode) + " for show " + parse_result.series_name + ", skipping", logger.WARNING)
continue
# check if we actually want this proper (if it's the right quality)
sqlResults = db.DBConnection().select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [curProper.tvdbid, curProper.season, curProper.episode])
if not sqlResults:
continue
oldStatus, oldQuality = Quality.splitCompositeStatus(int(sqlResults[0]["status"]))
# only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != curProper.quality:
continue
# if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
if curProper.tvdbid != -1 and (curProper.tvdbid, curProper.season, curProper.episode) not in map(operator.attrgetter('tvdbid', 'season', 'episode'), finalPropers):
logger.log(u"Found a proper that we need: " + str(curProper.name))
finalPropers.append(curProper)
return finalPropers
def _downloadPropers(self, properList):
for curProper in properList:
historyLimit = datetime.datetime.today() - datetime.timedelta(days=30)
# make sure the episode has been downloaded before
myDB = db.DBConnection()
historyResults = myDB.select(
"SELECT resource FROM history "
"WHERE showid = ? AND season = ? AND episode = ? AND quality = ? AND date >= ? "
"AND action IN (" + ",".join([str(x) for x in Quality.SNATCHED]) + ")",
[curProper.tvdbid, curProper.season, curProper.episode, curProper.quality, historyLimit.strftime(history.dateFormat)])
# if we didn't download this episode in the first place we don't know what quality to use for the proper so we can't do it
if len(historyResults) == 0:
logger.log(u"Unable to find an original history entry for proper " + curProper.name + " so I'm not downloading it.")
continue
else:
# make sure that none of the existing history downloads are the same proper we're trying to download
isSame = False
for curResult in historyResults:
# if the result exists in history already we need to skip it
if self._genericName(curResult["resource"]) == self._genericName(curProper.name):
isSame = True
break
if isSame:
logger.log(u"This proper is already in history, skipping it", logger.DEBUG)
continue
# get the episode object
showObj = helpers.findCertainShow(sickbeard.showList, curProper.tvdbid)
if showObj == None:
logger.log(u"Unable to find the show with tvdbid " + str(curProper.tvdbid) + " so unable to download the proper", logger.ERROR)
continue
epObj = showObj.getEpisode(curProper.season, curProper.episode)
# make the result object
result = curProper.provider.getResult([epObj])
result.url = curProper.url
result.name = curProper.name
result.quality = curProper.quality
# snatch it
downloadResult = search.snatchEpisode(result, SNATCHED_PROPER)
return downloadResult
def _genericName(self, name):
return name.replace(".", " ").replace("-", " ").replace("_", " ").lower()
|
TRex22/Sick-Beard
|
sickbeard/properFinder.py
|
Python
|
gpl-3.0
| 10,916
|
import sys
class Node():
def __init__(self, data):
self.data = data
self.left = None
self.right = None
class Tree() :
def __init__(self):
self.root = Node(4)
self.printed_val = - sys.maxint -1
def bst_or_not(self, present_root) :
if present_root :
self.bst_or_not(present_root.left)
if not self.printed_val < present_root.data :
print "Given Tree is not a BST"
sys.exit(0)
self.printed_val = present_root.data
print present_root.data
self.bst_or_not(present_root.right)
else :
return
if __name__ == "__main__" :
tree_root = Tree()
tree_root.root.left = Node(2)
tree_root.root.left.left = Node(1)
tree_root.root.left.right = Node(3)
tree_root.root.right = Node(5)
tree_root.bst_or_not(tree_root.root)
print "Given tree is a BST"
|
srinivasanmit/all-in-all
|
amazon/bst_or_not.py
|
Python
|
gpl-3.0
| 932
|
#!/usr/bin/python
#
# currentTest.py TEST CURRENT SENSOR
#
import sys
sys.path.append("/home/pi/RWPi/rwpilib")
import myPDALib
import myPyLib
import time
import traceback
import currentsensor
# ### TEST MAIN() ######################
def main():
myPyLib.set_cntl_c_handler() # Set CNTL-C handler
try:
print "\nCURRENT SENSOR TEST"
while True:
mA=currentsensor.current_sense(1,1)
time.sleep(5)
except SystemExit:
myPDALib.PiExit()
print "CURRENT SENSOR TEST: Bye Bye"
except:
print "Exception Raised"
traceback.print_exc()
if __name__ == "__main__":
main()
|
slowrunner/RWPi
|
systemtests/currentTest.py
|
Python
|
gpl-3.0
| 631
|
import gillard
import test_utils
class GillardTestCase(test_utils.GillardBaseTestCase):
def test_invalid_req(self):
rv = self.app.get('/bad_url_dont_exist')
assert rv.status_code == 404
def test_index_exists(self):
rv = self.app.get('/')
assert rv.status_code == 200
assert b'index' in rv.data
def test_health(self):
rv = self.app.get('/health')
assert rv.status_code == 200
assert b'OK' in rv.data
|
spncrlkt/gillard
|
gillard/test/gillard_tests.py
|
Python
|
gpl-3.0
| 479
|
from datetime import datetime
from pytaku.config import secret
from pytaku.models import User
from itsdangerous import URLSafeSerializer, BadSignature
from google.appengine.api.app_identity import get_application_id
_secret_str = secret[get_application_id()]
_signer = URLSafeSerializer(_secret_str)
_datetimefmt = '%Y-%m-%d.%H:%M:%S'
def gen_token(user, expires=True):
data = {
'id': user.key.id(),
'hash': user.password_hash,
}
if expires:
data['created_at'] = datetime.now().strftime(_datetimefmt)
return _signer.dumps(data)
def validate_token(message, max_days=None):
try:
data = _signer.loads(message)
except BadSignature:
return None, 'invalid_access_token'
# Tokens without creation time don't expire over time
if 'created_at' in data:
token_created_at = datetime.strptime(data['created_at'], _datetimefmt)
if (datetime.now() - token_created_at).days > max_days:
return None, 'expired_access_token'
user = User.get_by_id(data['id'])
if user is None:
return None, 'invalid_access_token'
# All existing tokens expire when user password has been changed
if user.password_hash != data['hash']:
return None, 'expired_access_token'
return user, None
|
hnq90/pytaku
|
pytaku/api/token.py
|
Python
|
gpl-3.0
| 1,297
|
#!/usr/bin/env python3
# Copyright (C) 2017 Dmitry Malko
# This file is part of PeptoVar (Peptides of Variations): the program for personalized and population-wide peptidome generation.
#
# PeptoVar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PeptoVar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PeptoVar. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import random
import string
import tempfile
class Seq:
def __init__(self, seq_id, tmp_dir = None):
self.id = seq_id
self._file = tempfile.TemporaryFile(mode='w+', suffix='.seq', dir=tmp_dir)
self.len = 0
self._shiftpos = 0
def __del__(self):
if self._file and not self._file.closed:
self._file.close()
def get(self, beg, end):
if self.len < end or end < beg:
return None
self._file.seek(self._shiftpos + beg - 1)
return self._file.read(end - beg + 1)
def append(self, seq):
self._file.write(seq)
self.len += len(seq)
def set(self, fh, pos = 0):
self._file = fh
self._shiftpos = pos
fh.seek(0, 2)
self.len = fh.tell() - pos
fh.seek(0)
def flush(self):
self._file.flush()
# end of Seq
class Fasta:
def __init__(self, tmp_dir = None):
self._seq = {}
self._tmp = tmp_dir
def appendSeq(self, file_name):
try:
file = open(file_name, 'r')
except:
raise ValueError("can't open file: {}".format(file_name))
pos = 0
seq_id = ''
for line in file:
pos = len(line)
re_header = re.match(">(\S+)", line)
if re_header:
seq_id = re_header.group(1)
else:
raise ValueError("wrong format of sequence file")
break
seq = Seq(seq_id, self._tmp)
seq.set(file, pos)
self._seq[seq_id] = seq
def appendFasta(self, file_name, pos = 0):
try:
file = open(file_name, 'r')
except:
raise ValueError("can't open file: {}".format(file_name))
file.seek(pos)
seq = None
hdr_pattern = re.compile('>(\S+)')
n = 0
for line in file:
n += 1
if line[0] == '>':
match = re.match(hdr_pattern, line)
if match:
seq_id = match.group(1)
if seq: seq.flush()
seq = Seq(seq_id, self._tmp)
self._seq[seq_id] = seq
else:
raise ValueError("wrong FASTA file format (line {} )".format(n))
elif seq:
seq.append(line.strip())
if seq:
seq.flush()
file.close()
def getSeq(self, seq_id, beg, end):
if seq_id in self._seq:
return self._seq[seq_id].get(beg, end)
return None
# end of Fasta
|
DMalko/PeptoVar
|
lib/fasta.py
|
Python
|
gpl-3.0
| 3,432
|
from copy import copy
from datetime import datetime
from hl7 import client
from hl7.containers import Message
import hl7
from gloss import models, exceptions
from gloss.translators.hl7.segments import MSH, InpatientPID, QueryPD1, MSA
from gloss.translators.hl7.hl7_translator import HL7Translator
from gloss.message_type import (
PatientMessage
)
from gloss.conf import settings
try:
from flask import current_app
logger = current_app.logger
except:
import logging
logger = logging
class DemographicsQueryResponse(HL7Translator):
segments = (MSH, MSA, InpatientPID, QueryPD1,)
class DemographicsErrorResponse(HL7Translator):
segments = (MSH, MSA,)
def generate_message_id():
unique_id = models.get_next_message_id()
unique_id_length = len(str(unique_id))
return "ELC%s%s" % (("0" * (17 - unique_id_length)), unique_id)
def generate_demographics_query_message(identifier):
message = Message("|")
msh = message.create_segment([message.create_field(['MSH'])])
qrd = message.create_segment([message.create_field(['QRD'])])
now = datetime.now()
message_id = generate_message_id()
query_msg = message.create_message([msh, qrd])
query_msg.assign_field("|", "MSH", 1, 1)
query_msg.assign_field("^~\&", "MSH", 1, 2)
query_msg.assign_field("elcid", "MSH", 1, 3)
query_msg.assign_field("UCLH", "MSH", 1, 4)
query_msg.assign_field("Unicare", "MSH", 1, 5)
query_msg.assign_field("UCLH", "MSH", 1, 6)
query_msg.assign_field(now.strftime("%Y%m%d%H%M"), "MSH", 1, 7)
query_msg.assign_field("QRY^A19", "MSH", 1, 9)
query_msg.assign_field(message_id, "MSH", 1, 10)
query_msg.assign_field("2.4", "MSH", 1, 12)
query_msg.assign_field(now.strftime("%Y%m%d%H%M%S"), "QRD", 1, 1)
query_msg.assign_field("R", "QRD", 1, 2)
query_msg.assign_field("I", "QRD", 1, 3)
query_msg.assign_field(message_id, "QRD", 1, 4)
query_msg.assign_field("1^RD", "QRD", 1, 7, 1)
query_msg.assign_field(identifier, "QRD", 1, 8, 1)
query_msg.assign_field("DEM", "QRD", 1, 9)
return query_msg
def send_message(some_message):
with client.MLLPClient(settings.DEMOGRAPHICS_HOST, settings.DEMOGRAPHICS_PORT) as c:
response = c.send_message(some_message)
return response
def post_message_for_identifier(some_identifier):
msg = generate_demographics_query_message(some_identifier)
try:
response = send_message(msg)
except Exception as err:
logger.error(err)
raise exceptions.APIError("Unable to reach the external system")
unparsed_message = hl7.parse(response)
errored = DemographicsErrorResponse(unparsed_message)
if errored.msa.error_code:
raise exceptions.APIError(
"We can't find any patients with that identifier"
)
hl7_message = DemographicsQueryResponse(unparsed_message)
message = construct_internal_message(hl7_message)
save_message(message, hl7_message.pid.hospital_number)
@models.atomic_method
def save_message(demographics_message, hospital_number, session):
gloss_ref = models.get_or_create_identifier(
hospital_number, session, "uclh"
)
kwargs = copy(vars(demographics_message))
kwargs["gloss_reference"] = gloss_ref
patient = models.Patient(**kwargs)
session.add(patient)
return patient
def construct_internal_message(hl7Message):
interesting_fields = [
"surname",
"first_name",
"middle_name",
"title",
"date_of_birth",
"sex",
"marital_status",
"religion",
"ethnicity",
"post_code",
"date_of_death",
"death_indicator",
]
kwargs = {
i: getattr(hl7Message.pid, i) for i in interesting_fields if getattr(
hl7Message.pid, i
)
}
kwargs["gp_practice_code"] = hl7Message.pd1.gp_practice_code
if hl7Message.pid.death_indicator is False:
kwargs["date_of_death"] = None
return PatientMessage(**kwargs)
|
openhealthcare/gloss
|
gloss/external_api.py
|
Python
|
gpl-3.0
| 4,019
|
__author__ = 'matt'
from glob import glob
import os
import re
class find_coocs:
def __init__(self, orig, term, coocs, win_size, dest):
self.files = glob('{}/*.txt'.format(orig))
self.term = term
self.coocs = set(coocs)
self.win_size = win_size
self.dest = dest
def find(self):
with open(self.dest, mode='w') as dest_file:
dest_file.write('Filename,Word #,Cooccurrent,Term\n')
for cooc in self.coocs:
print(cooc)
for file in self.files:
with open(file) as f:
words = f.read().split('\n')
for i, word in enumerate(words):
if self.term in word:
rng = words[max(0, i-self.win_size):min(len(words), i+self.win_size)]
for w in rng:
if cooc in w.lower():
print(w)
dest_file.write('{0},{1},{2},{3}\n'.format(os.path.basename(file), i, cooc, word))
class find_forms:
def __init__(self, orig, regex, dest):
self.files = glob('{}/*.txt'.format(orig))
self.pattern = re.compile(r'{}'.format(regex))
self.dest = dest
def find(self):
self.forms = []
for file in self.files:
with open(file) as f:
words = f.read().split('\n')
for word in words:
if re.search(self.pattern, word):
self.forms.append(word)
with open(self.dest, mode='w') as f:
for form in sorted(set(self.forms)):
f.write('{}\n'.format(form))
|
sonofmun/DissProject
|
Data_Production/find_cooc_places.py
|
Python
|
gpl-3.0
| 1,327
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
try: # for pip >= 10
from pip._internal.req import parse_requirements
from pip._internal.download import PipSession
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
from pip.download import PipSession
from distutils.core import setup
from setuptools import find_packages
# Parse requirements.txt to get the list of dependencies
inst_req = parse_requirements('requirements.txt',
session=PipSession())
REQUIREMENTS = [str(r.req) for r in inst_req]
setup(name='GeoNode',
version=__import__('geonode').get_version(),
description="Application for serving and sharing geospatial data",
long_description=open('README.md').read(),
classifiers=[
"Development Status :: 4 - Beta"],
python_requires='>=2.7, <3',
keywords='',
author='GeoNode Developers',
author_email='dev@geonode.org',
url='http://geonode.org',
license='GPL',
packages=find_packages(),
package_data={
'': ['*.*'], # noqa
'': ['static/*.*'], # noqa
'static': ['*.*'],
'': ['templates/*.*'], # noqa
'templates': ['*.*'],
},
include_package_data=True,
install_requires=REQUIREMENTS,
zip_safe=False
)
|
mcldev/geonode
|
setup.py
|
Python
|
gpl-3.0
| 2,133
|
#!/usr/bin/env python
import pickle, os, sys
from dq2.info.TiersOfATLAS import _refreshToACache, ToACache
from getopt import getopt,GetoptError
_refreshToACache
if __name__ == '__main__':
type = False
protocol = False
try:
opts, args = getopt(sys.argv[1:],':tp', ['type','protocol'])
except GetoptError:
print 'access_info.py -t or -p'
sys.exit(410100)
for opt, val in opts:
if opt in ['-t', '--type']:
type = True
if opt in ['-p', '--protocol']:
protocol = True
fileName = 'access_info.pickle'
f = open(fileName,"r")
access_info = pickle.load(f)
f.close()
accessType = 'DQ2_LOCAL'
accessProtocol = ''
if 'DQ2_LOCAL_SITE_ID' in os.environ:
dq2localsiteid = os.environ['DQ2_LOCAL_SITE_ID']
site_info = ToACache.sites[dq2localsiteid]
alternateName = site_info['alternateName'][-1].upper()
try:
accessType = access_info[alternateName][0]
except:
pass
try:
accessProtocol = access_info[alternateName][1]
except:
pass
if type:
print accessType
elif protocol:
print accessProtocol
|
ganga-devs/ganga
|
ganga/GangaAtlas/Lib/Athena/access_info.py
|
Python
|
gpl-3.0
| 1,225
|
import pinocchio as se3
robot=robots[4]
se3.computeAllTerms(robot.model,robot.data, robot.q, robot.v)
vardq = 0.0001
varddq = 0.
dq = np.matrix(np.ones(robot.nv)*vardq)
ddq = np.matrix(np.ones(robot.nv)*varddq)
frame_id = IDX_NECK #25
#a
x_dot_dot = robot.frameAcceleration(frame_id)
#b
J = robot.frameJacobian(robot.q.copy(),frame_id, True)
v_frame = robot.frameVelocity(frame_id).copy()
drift = x_dot_dot.copy()
drift.linear += np.cross(v_frame.angular.T, v_frame.linear.T).T
J_dot = drift.vector*np.linalg.pinv(robot.v).T
q_dot = robot.v.copy()
q_dot_dot = robot.a.copy()
b=(J_dot+J)*dq.T + J*ddq.T
b2=J*dq.T + J*ddq.T
b3=(2*J_dot)*dq.T + J*ddq.T
robot.v += dq
robot.a += ddq
se3.computeAllTerms(robot.model,robot.data, robot.q, robot.v)
x_dot_dot_next = robot.frameAcceleration(frame_id).copy()
a=x_dot_dot.vector-x_dot_dot_next.vector
|
GaloMALDONADO/motorg
|
motorog/test.py
|
Python
|
gpl-3.0
| 845
|
from distutils.core import setup
setup(
name='Pyspectr',
version='0.1.0',
author='Krzysztof Miernik',
author_email='kamiernik@gmail.com',
packages=['Pyspectr'],
url=['https://github.com/kmiernik/Pyspectr'],
scripts=['bin/py_grow_decay.py',
'bin/spectrum_fitter.py'],
license='LICENSE.txt',
description='Useful spectroscopic tools',
long_description=open('README.txt').read(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Programming Language :: Python :: 3.3",
"Topic :: Scientific/Engineering :: Physics",
],
requires=['matplotlib', 'numpy', 'lmfit'],
)
|
kmiernik/Pyspectr
|
setup.py
|
Python
|
gpl-3.0
| 774
|
import os
import string
import yaml
from collections import Mapping
from pprint import pformat
YamlLoader = yaml.Loader
if 'CLoader' in dir(yaml):
YamlLoader = yaml.CLoader
DEFAULT_CONFIG_SELECTOR = 'USED_CONFIG>'
BASE_CONFIG_SELECTOR = '<BASE'
INCLUDE_FILE_SPECIFIER = '<INCLUDE'
class CircularDependencyError(Exception):
pass
class UndefinedVariableError(Exception):
pass
class CircularIncludeError(Exception):
pass
class ConfigObject(object):
def __init__(self, data):
"""
:type data: dict
"""
self.__data = data
def __check(self, name):
"""
:type name: str
"""
if name not in self.__data.keys():
raise AttributeError('{!r} object has no attribute {!r}'.format(self.__class__.__name__, name))
def __getattr__(self, name):
"""
:type name: str
"""
self.__check(name)
if isinstance(self.__data[name], dict):
return ConfigObject(self.__data[name])
else:
return self.__data[name]
def __setattr__(self, key, value):
if key == '_ConfigObject__data':
super(ConfigObject, self).__setattr__(key, value)
else:
self.__setitem__(key, value)
def __iter__(self):
for each in self.__data.keys():
yield each
def __getitem__(self, name):
"""
:type name: str
"""
return self.__getattr__(name)
def __setitem__(self, name, value):
"""
:type name: str
"""
self.__data[name] = value
def __str__(self):
return pformat(self.__data)
def get_dict(self):
"""
:rtype: dict
"""
return self.__data
class Octoconf(object):
@classmethod
def load(cls, yaml_stream, variables=None, used_config=None, include_cwd=None):
"""
Load config from YAML contained IO stream (e.g. file)
:type yaml_stream: io.StringIO or io.TextIOWrapper
:type variables: dict or None
:type used_config: str or None
:type include_cwd: str or None
:rtype: ConfigObject
"""
yaml_string = yaml_stream.read()
return cls.loads(yaml_string, variables=variables, used_config=used_config, include_cwd=include_cwd)
@classmethod
def loads(cls, yaml_string, variables=None, used_config=None, include_cwd=None):
"""
Load config from YAML contained string
:type yaml_string: str
:type variables: dict or None
:type used_config: str or None
:type include_cwd: str or None
:rtype: ConfigObject
"""
variables = variables or {}
parsed_yaml = cls.__parse_yaml(yaml_string, variables=variables)
populated_yaml = cls.__populate_includes(parsed_yaml, variables=variables, include_cwd=include_cwd)
used_config = used_config or populated_yaml.get(DEFAULT_CONFIG_SELECTOR)
if used_config is None:
raise ValueError('used_config was not set')
if used_config not in populated_yaml.keys():
raise ValueError('missing used_config referred node: {!r}'.format(used_config))
inherited_yaml = cls.__inherit_yaml(populated_yaml, used_config)
return ConfigObject(inherited_yaml[used_config])
@classmethod
def __parse_yaml(cls, yaml_string, variables):
"""
:type yaml_string: str
:type variables: dict
:rtype dict
"""
substituted_yaml_string = cls.__substitute_yaml(yaml_string, variables)
parsed_yaml = yaml.load(substituted_yaml_string, Loader=YamlLoader) or {}
if not isinstance(parsed_yaml, dict):
raise ValueError('bad formatted YAML; have to be dict on top level')
return parsed_yaml
@classmethod
def __populate_includes(cls, parsed_yaml, variables, include_cwd=None, already_included=None):
"""
:type parsed_yaml: dict
:type variables: dict
:type include_cwd: str or None
:type already_included: list or None
:rtype: dict
"""
already_included = already_included or []
# initialize list of includes
includes = parsed_yaml.get(INCLUDE_FILE_SPECIFIER)
if isinstance(includes, str):
includes = [includes]
if not includes:
return parsed_yaml
# build base yaml from includes
base_yaml = {}
for path in includes:
already_included_stack = list(already_included)
if include_cwd:
path = os.path.join(include_cwd, path)
abs_path = os.path.abspath(path)
if abs_path in already_included_stack:
raise CircularIncludeError('circular include detected; ref_chain={ref_chain!s}'.format(
ref_chain=already_included_stack + [abs_path]))
with open(abs_path) as fd:
included_yaml_string = fd.read()
included_parsed_yaml = cls.__parse_yaml(included_yaml_string, variables=variables)
already_included_stack.append(abs_path)
included_populated_yaml = cls.__populate_includes(
included_parsed_yaml, variables=variables,
include_cwd=os.path.dirname(abs_path),
already_included=already_included_stack)
base_yaml = cls.__update_dict_recursive(base_yaml, included_populated_yaml)
# update included base with parsed_yaml
return cls.__update_dict_recursive(base_yaml, parsed_yaml)
@classmethod
def __substitute_yaml(cls, yaml_string, variables):
"""
:type yaml_string: str
:type variables: dict
:rtype: str
"""
yaml_template = string.Template(yaml_string)
try:
substituted_yaml = yaml_template.substitute(variables)
except KeyError as e:
raise UndefinedVariableError('; '.join(e.args))
return substituted_yaml
@classmethod
def __inherit_yaml(cls, parsed_yaml, config_name, parent_stack=None):
"""
:type parsed_yaml: dict
:type config_name: str
:type parent_stack: list or None
:rtype: dict
"""
if not parent_stack:
parent_stack = []
parent_stack.append(config_name)
# Has it base?
if BASE_CONFIG_SELECTOR not in parsed_yaml[config_name].keys():
return parsed_yaml
# Skipping circular-dependency
base_name = parsed_yaml[config_name][BASE_CONFIG_SELECTOR]
if base_name in parent_stack:
raise CircularDependencyError('circular dependency detected; ref_chain={ref_chain!s}'.format(
ref_chain=parent_stack + [base_name]))
del parsed_yaml[config_name][BASE_CONFIG_SELECTOR]
# Get full config with inherited base config
parsed_yaml = cls.__inherit_yaml(parsed_yaml, base_name, parent_stack)
# Set base_config based current config
parsed_yaml[config_name] = cls.__update_dict_recursive(parsed_yaml[base_name], parsed_yaml[config_name])
return parsed_yaml
@classmethod
def __update_dict_recursive(cls, base, update):
"""
:type base: dict
:type update: dict or Mapping
:rtype: dict
"""
for k, v in update.items():
if isinstance(v, Mapping):
base[k] = cls.__update_dict_recursive(base.get(k, {}), v)
else:
base[k] = update[k]
return base
|
andras-tim/octoconf
|
octoconf/octoconf.py
|
Python
|
gpl-3.0
| 7,588
|
#!/usr/bin/python
import sys
from merciscript import *
suff=sys.argv[1]
sequences=sys.argv[2]
#custom_motifs = create_pairs(['A', 'C', 'G', 'T'], 2)
runmerci('brightness' + suff + '.fa','darkness' + suff + '.fa')
#motifs1 = parseresult()
motifs1 = parse_output_file("+")
runmerci('darkness' + suff + '.fa','brightness' + suff + '.fa')
#motifs2 = parseresult()
motifs2 = parse_output_file("-")
motifs3 = merge_motifs(motifs1, motifs2)
print "num motifs " + str(len(motifs3))
cntb = read_sequence_data(sequences)
feature_vector_generator(motifs3,-1)
save_feature_vectors('merci_combinedfeatures'+suff+'-newtest.csv', 'merci_combinedfeatures'+suff+'-newtest-occ.csv')
|
petkobogdanov/silver-clusters
|
start/annotate_new.py
|
Python
|
gpl-3.0
| 684
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('naf_autoticket', '0050_incidentticketprocess_ticketeffectarea'),
]
operations = [
migrations.AlterField(
model_name='incidentticketprocess',
name='ticketID',
field=models.CharField(default=0, max_length=25, null=True),
),
]
|
kevinnguyeneng/django-uwsgi-nginx
|
app/naf_autoticket/migrations/0051_auto_20170307_1543.py
|
Python
|
gpl-3.0
| 468
|
#!/usr/bin/env python
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-procmail',
version='0.1',
packages=[
'procmail', 'procmail.migrations',
],
include_package_data=True,
license='GPLv3',
description=(
"A web interface for editing procmail's procmailrc files."
),
long_description=README,
author='Valentin Samir',
author_email='valentin.samir@crans.org',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
package_data={
'procmail': [
'templates/procmail/*',
'static/procmail/*',
'locale/*/LC_MESSAGES/*',
]
},
keywords=['django', 'procmail', 'mail', 'filter', 'gui', 'web', 'interface'],
install_requires=[
'Django >= 1.7,<1.10', 'pyprocmail', 'chardet', "django-formtools"
],
url="https://github.com/nitmir/django-procmail",
download_url="https://github.com/nitmir/django-procmail/releases",
zip_safe=False
)
|
nitmir/django-procmail
|
setup.py
|
Python
|
gpl-3.0
| 1,603
|
from mock import patch
from mock import call
import mock
import kiwi
from .test_helper import raises, patch_open
from kiwi.exceptions import KiwiLiveBootImageError
from kiwi.builder.live import LiveImageBuilder
class TestLiveImageBuilder(object):
@patch('platform.machine')
def setup(self, mock_machine):
mock_machine.return_value = 'x86_64'
self.firmware = mock.Mock()
self.firmware.efi_mode = mock.Mock(
return_value='uefi'
)
kiwi.builder.live.FirmWare = mock.Mock(
return_value=self.firmware
)
self.setup = mock.Mock()
kiwi.builder.live.SystemSetup = mock.Mock(
return_value=self.setup
)
self.filesystem = mock.Mock()
kiwi.builder.live.FileSystem = mock.Mock(
return_value=self.filesystem
)
self.filesystem_setup = mock.Mock()
kiwi.builder.live.FileSystemSetup = mock.Mock(
return_value=self.filesystem_setup
)
self.loop = mock.Mock()
kiwi.builder.live.LoopDevice = mock.Mock(
return_value=self.loop
)
self.bootloader = mock.Mock()
kiwi.builder.live.BootLoaderConfig = mock.Mock(
return_value=self.bootloader
)
self.boot_image_task = mock.Mock()
self.boot_image_task.boot_root_directory = 'initrd_dir'
self.boot_image_task.initrd_filename = 'initrd'
kiwi.builder.live.BootImageDracut = mock.Mock(
return_value=self.boot_image_task
)
self.mbrid = mock.Mock()
self.mbrid.get_id = mock.Mock(
return_value='0xffffffff'
)
kiwi.builder.live.SystemIdentifier = mock.Mock(
return_value=self.mbrid
)
kiwi.builder.live.Path = mock.Mock()
self.kernel = mock.Mock()
self.kernel.get_kernel = mock.Mock()
self.kernel.get_xen_hypervisor = mock.Mock()
self.kernel.copy_kernel = mock.Mock()
self.kernel.copy_xen_hypervisor = mock.Mock()
kiwi.builder.live.Kernel = mock.Mock(
return_value=self.kernel
)
self.xml_state = mock.Mock()
self.xml_state.get_fs_mount_option_list = mock.Mock(
return_value='async'
)
self.xml_state.build_type.get_flags = mock.Mock(
return_value=None
)
self.xml_state.get_image_version = mock.Mock(
return_value='1.2.3'
)
self.xml_state.xml_data.get_name = mock.Mock(
return_value='result-image'
)
self.xml_state.build_type.get_volid = mock.Mock(
return_value='volid'
)
self.xml_state.build_type.get_kernelcmdline = mock.Mock(
return_value='custom_cmdline'
)
self.xml_state.build_type.get_mediacheck = mock.Mock(
return_value=True
)
self.xml_state.build_type.get_publisher = mock.Mock(
return_value='Custom publisher'
)
self.live_image = LiveImageBuilder(
self.xml_state, 'target_dir', 'root_dir',
custom_args={'signing_keys': ['key_file_a', 'key_file_b']}
)
self.context_manager_mock = mock.Mock()
self.file_mock = mock.Mock()
self.enter_mock = mock.Mock()
self.exit_mock = mock.Mock()
self.enter_mock.return_value = self.file_mock
setattr(self.context_manager_mock, '__enter__', self.enter_mock)
setattr(self.context_manager_mock, '__exit__', self.exit_mock)
self.result = mock.Mock()
self.live_image.result = self.result
@patch('platform.machine')
def test_init_for_ix86_platform(self, mock_machine):
xml_state = mock.Mock()
xml_state.xml_data.get_name = mock.Mock(
return_value='some-image'
)
xml_state.get_image_version = mock.Mock(
return_value='1.2.3'
)
mock_machine.return_value = 'i686'
live_image = LiveImageBuilder(
xml_state, 'target_dir', 'root_dir'
)
assert live_image.arch == 'ix86'
@patch('kiwi.builder.live.mkdtemp')
@patch('kiwi.builder.live.NamedTemporaryFile')
@patch('kiwi.builder.live.shutil')
@patch('kiwi.builder.live.Iso.set_media_tag')
@patch('kiwi.builder.live.FileSystemIsoFs')
@patch('kiwi.builder.live.SystemSize')
@patch('kiwi.builder.live.Defaults.get_grub_boot_directory_name')
@patch('os.path.exists')
@patch_open
def test_create_overlay_structure(
self, mock_open, mock_exists, mock_grub_dir, mock_size,
mock_isofs, mock_tag, mock_shutil, mock_tmpfile, mock_dtemp
):
tempfile = mock.Mock()
tempfile.name = 'tmpfile'
mock_tmpfile.return_value = tempfile
mock_exists.return_value = True
mock_grub_dir.return_value = 'grub2'
tmpdir_name = ['temp-squashfs', 'temp_media_dir']
def side_effect(prefix, dir):
return tmpdir_name.pop()
mock_dtemp.side_effect = side_effect
mock_open.return_value = self.context_manager_mock
self.live_image.live_type = 'overlay'
iso_image = mock.Mock()
iso_image.create_on_file.return_value = 'offset'
mock_isofs.return_value = iso_image
rootsize = mock.Mock()
rootsize.accumulate_mbyte_file_sizes = mock.Mock(
return_value=8192
)
mock_size.return_value = rootsize
self.setup.export_package_verification.return_value = '.verified'
self.setup.export_package_list.return_value = '.packages'
self.live_image.create()
self.setup.import_cdroot_files.assert_called_once_with('temp_media_dir')
assert kiwi.builder.live.FileSystem.call_args_list == [
call(
custom_args={'mount_options': 'async'},
device_provider=self.loop, name='ext4',
root_dir='root_dir/'
),
call(
device_provider=None, name='squashfs',
root_dir='temp-squashfs'
)
]
self.filesystem.create_on_device.assert_called_once_with()
self.filesystem.sync_data.assert_called_once_with(
['image', '.profile', '.kconfig', '.buildenv', 'var/cache/kiwi']
)
self.filesystem.create_on_file.assert_called_once_with('tmpfile')
assert mock_shutil.copy.call_args_list == [
call('tmpfile', 'temp-squashfs/LiveOS/rootfs.img'),
call('tmpfile', 'temp_media_dir/LiveOS/squashfs.img')
]
self.setup.call_edit_boot_config_script.assert_called_once_with(
boot_part_id=1, filesystem='iso:temp_media_dir',
working_directory='root_dir'
)
assert call(
'root_dir/etc/dracut.conf.d/02-livecd.conf', 'w'
) in mock_open.call_args_list
assert self.file_mock.write.call_args_list == [
call('add_dracutmodules+=" kiwi-live pollcdrom "\n'),
call(
'omit_dracutmodules+=" '
'kiwi-dump kiwi-overlay kiwi-repart kiwi-lib multipath "\n'
),
call('hostonly="no"\n'),
call('dracut_rescue_image="no"\n')
]
assert kiwi.builder.live.BootLoaderConfig.call_args_list[0] == call(
'isolinux', self.xml_state, 'temp_media_dir'
)
assert self.bootloader.setup_live_boot_images.call_args_list[0] == call(
lookup_path=self.live_image.boot_image.boot_root_directory,
mbrid=None
)
assert self.bootloader.setup_live_image_config.call_args_list[0] == \
call(mbrid=None)
assert self.bootloader.write.call_args_list[0] == call()
assert kiwi.builder.live.BootLoaderConfig.call_args_list[1] == call(
'grub2', self.xml_state, 'temp_media_dir',
{'grub_directory_name': 'grub2'}
)
assert self.bootloader.setup_live_boot_images.call_args_list[1] == call(
lookup_path='root_dir', mbrid=self.mbrid
)
assert self.bootloader.setup_live_image_config.call_args_list[1] == \
call(mbrid=self.mbrid)
assert self.bootloader.write.call_args_list[1] == call()
self.boot_image_task.prepare.assert_called_once_with()
self.boot_image_task.create_initrd.assert_called_once_with(
self.mbrid
)
self.kernel.copy_kernel.assert_called_once_with(
'temp_media_dir/boot/x86_64/loader', '/linux'
)
self.kernel.copy_xen_hypervisor.assert_called_once_with(
'temp_media_dir/boot/x86_64/loader', '/xen.gz'
)
mock_shutil.move.assert_called_once_with(
'initrd', 'temp_media_dir/boot/x86_64/loader/initrd'
)
mock_size.assert_called_once_with(
'temp_media_dir'
)
rootsize.accumulate_mbyte_file_sizes.assert_called_once_with()
mock_isofs.assert_called_once_with(
custom_args={
'meta_data': {
'mbr_id': '0xffffffff',
'preparer': 'KIWI - http://suse.github.com/kiwi',
'publisher': 'Custom publisher',
'volume_id': 'volid',
'efi_mode': 'uefi',
'udf': True
}
}, device_provider=None, root_dir='temp_media_dir'
)
iso_image.create_on_file.assert_called_once_with(
'target_dir/result-image.x86_64-1.2.3.iso'
)
assert self.result.add.call_args_list == [
call(
key='live_image',
filename='target_dir/result-image.x86_64-1.2.3.iso',
use_for_bundle=True,
compress=False,
shasum=True
),
call(
key='image_packages',
filename='.packages',
use_for_bundle=True,
compress=False,
shasum=False
),
call(
key='image_verified',
filename='.verified',
use_for_bundle=True,
compress=False,
shasum=False
)
]
self.setup.export_package_verification.assert_called_once_with(
'target_dir'
)
self.setup.export_package_list.assert_called_once_with(
'target_dir'
)
@patch('kiwi.builder.live.mkdtemp')
@patch('kiwi.builder.live.shutil')
@patch_open
@raises(KiwiLiveBootImageError)
def test_create_no_kernel_found(self, mock_open, mock_shutil, mock_dtemp):
mock_dtemp.return_value = 'tmpdir'
self.kernel.get_kernel.return_value = False
self.live_image.create()
@patch('kiwi.builder.live.mkdtemp')
@patch('kiwi.builder.live.shutil')
@patch_open
@raises(KiwiLiveBootImageError)
def test_create_no_hypervisor_found(
self, mock_open, mock_shutil, mock_dtemp
):
mock_dtemp.return_value = 'tmpdir'
self.kernel.get_xen_hypervisor.return_value = False
self.live_image.create()
@patch('kiwi.builder.live.mkdtemp')
@patch('kiwi.builder.live.shutil')
@patch('os.path.exists')
@patch_open
@raises(KiwiLiveBootImageError)
def test_create_no_initrd_found(
self, mock_open, mock_exists, mock_shutil, mock_dtemp
):
mock_dtemp.return_value = 'tmpdir'
mock_exists.return_value = False
self.live_image.create()
@patch('kiwi.builder.live.Path.wipe')
def test_destructor(self, mock_wipe):
self.live_image.media_dir = 'media-dir'
self.live_image.live_container_dir = 'container-dir'
self.live_image.__del__()
assert mock_wipe.call_args_list == [
call('media-dir'),
call('container-dir')
]
self.live_image.media_dir = None
self.live_image.live_container_dir = None
|
adrianschroeter/kiwi
|
test/unit/builder_live_test.py
|
Python
|
gpl-3.0
| 12,040
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from configparser import ConfigParser
import sqlite3
from werkzeug.security import generate_password_hash as gen_pass_hash
webtex_path = os.path.dirname(os.path.abspath(__file__))
conf_path = webtex_path + '/WebTeX.ini'
db_path = webtex_path + '/WebTeX.db'
def init():
config = ConfigParser()
config.read(conf_path)
config['setup']['initial_setup'] = 'true'
config['auth']['method'] = 'local'
config['ldap']['server'] = ''
config['ldap']['port'] = ''
config['ldap']['base_dn'] = ''
config['redpen']['java_home'] = ''
config['redpen']['conf'] = ''
config['dev']['check_csrf'] = 'true'
f = open(conf_path, 'w')
config.write(f)
f.close()
con = sqlite3.connect(db_path)
cur = con.cursor()
sql = 'DELETE FROM user'
cur.execute(sql)
con.commit()
sql = 'INSERT INTO user VALUES(?,?)'
cur.execute(sql, ('Admin', gen_pass_hash('webtex')))
con.commit()
cur.close()
con.close()
if __name__ == '__main__':
init()
|
trileg/WebTeX
|
WebTeX/init.py
|
Python
|
gpl-3.0
| 1,059
|
class KBucket:
def __init__(self, k):
self._k = k
self._contacts = {}
def add(self, contact, distance):
if not self._contacts.__contains__(distance):
self._contacts[distance] = []
if(self._contacts[distance].__contains__(contact)):
self._contacts[distance].remove(contact)
elif (len(self._contacts[distance]) == self._k):
raise Exception("Bucket full")
self._contacts[distance].append(contact)
def getkclose(self, distance):
close = []
distances = [d for d in self._contacts.keys()]
while(len(close) < self._k and len(distances) > 0):
closest = self._find_close(distances, distance)
remaining = self._k - len(close)
close.extend(self._contacts[closest][:remaining])
distances.remove(closest)
return close
def _find_close(self, distances, distance):
assert(len(distances) > 0)
distances.sort()
closest, delta = distances[0], abs(distance - distances[0])
for k in distances[1:]:
if(delta > abs(distance - k)):
delta = abs(distance - k)
closest = k
else:
break
return closest
def getBucket(self, distance):
return self._contacts[distance]
|
0xNaN/dak
|
kbucket.py
|
Python
|
gpl-3.0
| 1,352
|
#!/usr/bin/python
from math import pi
from fake_sensor import FakeSensor
import rospy
import tf
from geometry_msgs.msg import Quaternion
from stuff.srv import FakeSensor,FakeSensorResponse
def make_quaternion(angle):
q = tf.transformations.quaternion_from_euler(0, 0, angle)
return Quaternion(*q)
def callback(request): # <1>
angle = sensor.value() * 2 * pi / 100.0
q = make_quaternion(angle)
return FakeSensorResponse(q)
if __name__ == '__main__':
sensor = FakeSensor()
rospy.init_node('fake_sensor')
service = rospy.Service('angle', FakeSensor, callback) # <2>
|
mkhuthir/catkin_ws
|
src/stuff/service_sensor.py
|
Python
|
gpl-3.0
| 621
|
# -*- coding: utf-8 -*-
def split_frames(frames) -> tuple:
'''Splits zmq ROUTER frames to base and message.'''
base = []
nfn = 0
for frame in frames:
nfn += 1
base.append(frame)
if frame == b'':
break
msg = frames[nfn:]
return base, msg
|
waipu/bakawipe
|
lib/sup/zmq.py
|
Python
|
gpl-3.0
| 298
|
#!/usr/bin/env python2.7
import subprocess, socket, sys, select, os, time, binascii, struct, platform, time, logging
from os.path import join as pjoin
from Core.Command import WinRUN
from Core.Color import WinColor, WinStatus
from Core.Start import WinStart
logging.basicConfig(handlers=file, level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.propagate = False
PathToFile = pjoin("Logs", "SERVER.log")
LogFileHandler = logging.FileHandler(PathToFile)
LogFileHandler.setLevel(logging.INFO)
Formatter = logging.Formatter('%(lineno)d:%(asctime)s - %(name)s - %(module)s[%(process)d] - %(levelname)s - %(message)s')
LogFileHandler.setFormatter(Formatter)
logger.addHandler(LogFileHandler)
def Current_Time():
t = time.localtime()
TIME = str(t[3]) + ":" + str(t[4]) + ":" + str(t[5])
DATE = str(t[0]) + "/" + str(t[1]) + "/" + str(t[2])
FullTime = DATE + " " + TIME
return FullTime
class Win_OCCULT_SERVER:
def __init__(self):
self.BUFFER_SIZE = 8192
self.SOCKET_LIST = []
self.Discussions = []
self.Traffic = []
self.ClientsCount = 0
def Server(self,HOST,PORT):
Server_Socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
Server_Socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
Server_Socket.bind((HOST,PORT))
Server_Socket.listen(2)
Server_Socket.setblocking(1)
self.SOCKET_LIST.append(Server_Socket)
logger.info(socket.gethostname())
logger.info(socket.gethostbyname(socket.gethostname()))
logger.info(platform.system())
logger.info(PORT)
logger.info("listenning")
os.system("cls")
print WinColor["LCYAN"] + "[+] " + WinColor["ENDC"] + "Server IP : " + socket.gethostbyname(socket.gethostname())
print WinColor["LCYAN"] + "[+] " + WinColor["ENDC"] + "Server Type : " + platform.system()
print WinColor["LCYAN"] + "[+] " + WinColor["ENDC"] + "Server Name : " + socket.gethostname()
print WinColor["LCYAN"] + "[+] " + WinColor["ENDC"] + "Listen Port : " + str(PORT)
print WinColor["LCYAN"] + "[+] " + WinColor["ENDC"] + "Press " + WinColor["YELLOW"] + "CTRL+C" + WinColor["ENDC"] + " To Exit"
print "="*35
Counter_1 = 1
Counter_2 = 1
while True:
Ready,Write,Error = select.select(self.SOCKET_LIST,[],[],0)
for Self_Socket in Ready:
if Self_Socket == Server_Socket:
Connection,Address = Server_Socket.accept()
self.ClientsCount += 1
try: un, CliInfo = self.Decode(Connection.recv(4096)).split('>|<'); UserName = un.rstrip()
except Exception, e:
print WinStatus["ERROR"] + str(e)
logger.error(str(e))
WinStart()
wlcMsg = WinColor["LBLUE"] + '\r[+] ' + WinColor["ENDC"] + 'Welcome \'' + WinColor["LGREEN"] + UserName + WinColor["ENDC"] + '\' | Enjoy !\n'
wlcMsg += WinColor["LBLUE"] + "[+] " + WinColor["ENDC"] + "You Are Number " + WinColor["RED"] + str(self.ClientsCount) + WinColor["ENDC"] + " Online\n"
wlcMsg += WinColor["LBLUE"] + '[+] ' + WinColor["ENDC"] + 'Server IP : %s\n'%socket.gethostbyname(socket.gethostname())
wlcMsg += WinColor["LBLUE"] + "[+] " + WinColor["ENDC"] + "Press " + WinColor["YELLOW"] +"CTRL+C" + WinColor["ENDC"] + " To Exit\n\n"
Connection.sendall(self.Encode(wlcMsg))
print WinColor["LGREEN"] + str(Counter_1) + "." + WinColor["ENDC"] + "New Client : " + UserName + "@" + Address[0] + " " + WinColor["LGREEN"] + "Connected " + WinColor["ENDC"]
self.SOCKET_LIST.append(Connection)
NewConnectionLog = "client " + str(Counter_1) + " %s@%s connected"%(UserName,Address[0])
logger.info(NewConnectionLog)
self.Traffic.append(NewConnectionLog)
try:
Path_To_Client_File = pjoin("Logs", UserName + "@" + Address[0] + "_" + str(Address[1]) + ".log")
ClientLog = open(Path_To_Client_File,"w")
ClientLog.write(CliInfo)
ClientLog.close()
logger.info("client \'" + UserName + "\' log file created")
except Exception, e: logger.info("failed to create \'" + UserName + "\' log file"); logger.info(str(e))
JoinMas = '\r[ ' + UserName + ' ] ' + WinColor["LGREEN"] + "Joined " + WinColor["ENDC"] + 'To Chat !\n'
self.Broadcast(Server_Socket,Connection,JoinMas)
Counter_1 = Counter_1 + 1
else:
try:
if Self_Socket.recv(self.BUFFER_SIZE).find("<DIS>") != -1:
Name,IP = Self_Socket.recv(self.BUFFER_SIZE).split("<DIS>")
if Self_Socket in self.SOCKET_LIST: self.SOCKET_LIST.remove(Self_Socket)
self.Broadcast(Server_Socket, Self_Socket,'\r[ ' + Name + ' ] ' + WinColor["LRED"] + "Offline" + WinColor["ENDC"] + '\n')
DisconnectLog = "client " + str(Counter_2) + " %s@%s disconnected"%(Name,IP)
logger.info(DisconnectLog)
self.Traffic.append(DisconnectLog)
print WinColor["LRED"] + str(Counter_2) + "." + WinColor["ENDC"] + "The Client : " + Name + "@" + IP + WinColor["LRED"] + " Disconnected" + WinColor["ENDC"]
Counter_2 = Counter_2 + 1
self.ClientsCount -= 1
else:
NAME,DATA = self.Decode(Self_Socket.recv(self.BUFFER_SIZE)).split('<|>')
Peer_Name = Self_Socket.getpeername()
self.Discussions.append(NAME + '@' + str(Peer_Name[0]) + ':' + str(Peer_Name[1]) +" "+ DATA.rstrip())
self.Broadcast(Server_Socket,Self_Socket, '\r[ ' + NAME + ' ] ' + DATA)
SendDataLog = NAME + '@' + str(Peer_Name[0]) + ':' + str(Peer_Name[1]) +" "+ DATA.rstrip()
logger.info(SendDataLog)
except:
logger.error("client %s:%d have error"%Address)
self.Broadcast(Server_Socket,Self_Socket, WinColor["RED"] + '[ %s:%d ] have error\n'%Address + WinColor["ENDC"])
def Encode(self,str):
cipher = ""
for i in range(len(str)): cipher += chr(ord(str[i])^(ord("S")))
return cipher.encode('rot13').encode('hex')
def Decode(self,hex):
plain = ""
cipher = hex.decode('hex').decode('rot13')
for i in range(len(cipher)):
plain += chr(ord(cipher[i])^(ord("S")))
return plain
def Broadcast(self,Server_Socket,Connection,MSG):
for sock in self.SOCKET_LIST:
if sock != Server_Socket and sock != Connection:
try: sock.sendall(self.Encode(MSG))
except:
sock.close()
if sock in self.SOCKET_LIST: self.SOCKET_LIST.remove(sock)
def Win_Server_Start(PORT):
try:
logger.info("server started")
HOST = ''
START = Win_OCCULT_SERVER()
START.Server(HOST,PORT)
except KeyboardInterrupt:
print "\r \n\n> " + WinColor["LCYAN"] + "Traffics:" + WinColor["ENDC"] + "\n==========="
logger.info("> traffics")
for Traff in START.Traffic:
print Traff.replace('\n','')
logger.info(Traff.replace('\n',''))
print "\n\n> " + WinColor["LCYAN"] + "Conversations:" + WinColor["ENDC"] + "\n================"
logger.info("> conversations")
for Speak in START.Discussions:
print Speak.replace('\n','')
logger.info(Speak.replace('\n',''))
logger.warn("CTRL+C pressed")
logger.info("end " + str(START.ClientsCount) + " users killed")
print "\n\n" + WinStatus['SUC'] + " End " + WinColor["RED"] + str(START.ClientsCount) + WinColor["ENDC"] + " Users Killed !"
TIME = Current_Time().replace('[','').replace(']','')
print WinStatus['SUC'] + " Shutting Down Server...\n" + WinStatus['SUC'] , TIME + WinColor["ENDC"]
logger.info("server stopped")
|
micle2018/OCCULT
|
Modules/chat/WinServer.py
|
Python
|
gpl-3.0
| 7,221
|
from rest_framework import parsers, renderers
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.auth import authenticate
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Token
class AuthTokenSerializer(serializers.Serializer):
username = serializers.CharField(label=_("Username"))
password = serializers.CharField(label=_("Password"), style={'input_type': 'password'})
device = serializers.CharField(label=_("Device"))
def validate(self, attrs):
username = attrs.get('username')
password = attrs.get('password')
device = attrs.get('device')
if username and password:
print("validating")
user = authenticate(username=username, password=password)
if user:
# From Django 1.10 onwards the `authenticate` call simply
# returns `None` for is_active=False users.
# (Assuming the default `ModelBackend` authentication backend.)
if not user.is_active:
msg = _('User account is disabled.')
raise serializers.ValidationError(msg, code='authorization')
else:
msg = _('Unable to log in with provided credentials.')
raise serializers.ValidationError(msg, code='authorization')
else:
msg = _('Must include "username" and "password".')
raise serializers.ValidationError(msg, code='authorization')
data = {"user": username, "name":device}
return data
class ObtainAuthToken(APIView):
authentication_classes = ()
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = AuthTokenSerializer
def post(self, request, *args, **kwargs):
print("obtain")
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
device = serializer.validated_data['name']
user_object = User.objects.get(username = user)
token, created = Token.objects.get_or_create(user=user_object, name=device)
return Response({'token': token.key})
obtain_auth_token = ObtainAuthToken.as_view()
|
yabirgb/OCManager
|
core/users/authentication.py
|
Python
|
gpl-3.0
| 2,507
|
# -*- coding: utf-8 -*-
import daemon
import scrappers
import pymongo
import os
import sys
import datetime
import time
import re
from config import Config
SCRAPPERS = (
('itjobs', scrappers.ITJobs),
)
class JobFeeder(object):
def __init__(self, scrappers):
self.scrappers = scrappers
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def bootstrap(self):
mongostr = "mongodb://%s:%s" % (Config.MONGO_HOST, Config.MONGO_PORT)
mongo = pymongo.MongoClient(mongostr)
self.db = mongo[Config.MONGO_DB]
self.load_configuration()
def load_configuration(self):
technologies_file = os.path.join(self.current_dir, 'config', 'technologies.txt')
consultancies_file = os.path.join(self.current_dir, 'config', 'consultancies.txt')
self.technologies = open(technologies_file).read().strip().split('\n')
self.consultancies = open(consultancies_file).read().strip().split('\n')
def run(self):
for source, scrapper_class in self.scrappers:
scrapper = scrapper_class(self.db, self.consultancies, self.technologies)
doc_cursor = self.db.jobs.find({'source': source})
doc_cursor.sort('publish_date')
doc_cursor.limit(1)
if doc_cursor.count() > 0:
document = doc_cursor[0]
date = None
else:
document = None
date = datetime.datetime.now() - datetime.timedelta(days=90)
scrapper.scrape(document, date)
if __name__ == '__main__':
feeder = JobFeeder(SCRAPPERS)
feeder.bootstrap()
feeder.run()
|
diogoosorio/werk.io
|
importer/src/feeder.py
|
Python
|
gpl-3.0
| 1,664
|
import sys, xbmcplugin, xbmcaddon, xbmc
from xbmcgui import ListItem
# xbmc hooks
addon = xbmcaddon.Addon(id='plugin.audio.googlemusic.exp')
# plugin constants
plugin = "GoogleMusicEXP-" + addon.getAddonInfo('version')
dbg = addon.getSetting( "debug" ) == "true"
addon_url = sys.argv[0]
handle = int(sys.argv[1])
# utility functions
def log(message):
if dbg:
xbmc.log("[%s] %s" % (plugin, message))
def paramsToDict(parameters):
''' Convert parameters encoded in a URL to a dict. '''
paramDict = {}
if parameters:
paramPairs = parameters[1:].split('&')
for paramsPair in paramPairs:
paramSplits = paramsPair.split('=')
try:
paramDict[paramSplits[0]] = paramSplits[1]
except: pass
return paramDict
def createItem(title, thumb):
li = ListItem(title)
try:
li.setThumbnailImage(thumb)
except: pass
li.setProperty('IsPlayable', 'true')
li.setProperty('Music', 'true')
li.setProperty('mimetype', 'audio/mpeg')
return li
def setResolvedUrl(listItem):
xbmcplugin.setResolvedUrl(handle=handle, succeeded=True, listitem=listItem)
def setDirectory(listItems, content, sortMethods):
xbmcplugin.addDirectoryItems(handle, listItems)
if handle > 0:
xbmcplugin.setContent(handle, content)
for sorts in sortMethods:
xbmcplugin.addSortMethod(int(sys.argv[1]), sorts)
xbmcplugin.endOfDirectory(handle, succeeded=True)
def tryEncode(text, encoding='utf-8'):
try:
if sys.platform.startswith('linux'):
return text.decode(encoding).encode('latin1')
return unicode(text.decode(encoding))
except: pass
try:
return text.encode(encoding, errors='ignore')
except:
log(" ENCODING FAIL!!! "+encoding+" "+repr(text))
return repr(text)
|
mrotschi/googlemusic-xbmc
|
utils.py
|
Python
|
gpl-3.0
| 1,856
|
#!/usr/bin/env python3
#License GPL v3
#Author Horst Knorr <gpgmailencrypt@gmx.de>
import shutil
import subprocess
from .child import _gmechild
from ._dbg import _dbg
from .version import *
S_NOSPAM=0
S_MAYBESPAM=1
S_SPAM=2
#################
#_basespamchecker
#################
class _basespamchecker(_gmechild):
def __init__(self,parent,leveldict):
_gmechild.__init__(self,parent=parent,filename=__file__)
self.cmd=None
@_dbg
def set_leveldict(self,leveldict):
raise NotImplementedError
@_dbg
def is_spam(self,mail):
raise NotImplementedError
def is_available(self):
if self.cmd!= None and len(self.cmd)>0:
return True
else:
return False
##############
#_SPAMASSASSIN
##############
class _SPAMASSASSIN(_basespamchecker):
def __init__( self,
parent,
leveldict):
_basespamchecker.__init__(self,parent,leveldict)
self._SPAMHOST="localhost"
self._SPAMPORT=783
self._SPAMMAXSIZE=5000000
self.cmd=shutil.which("spamc")
self.set_leveldict(leveldict)
@_dbg
def set_leveldict(self,leveldict):
self.spamlevel=5.0
self.spammaybelevel=2.5
try:
(self.spamlevel,
self.spammaybelevel,
self._SPAMHOST,
self._SPAMPORT,
self._SPAMMAXSIZE)=leveldict["SPAMASSASSIN"]
except:
self.log_traceback()
@_dbg
def is_spam(self,mail):
spamlevel=S_NOSPAM
p=subprocess.Popen([self.cmd,
"-s",str(self._SPAMMAXSIZE),
"-d",self._SPAMHOST,
"-R",
"-p",str(self._SPAMPORT)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result=p.communicate(input=mail.encode("UTF-8",
unicodeerror))[0].decode("UTF-8",
unicodeerror)
scoretext=result[:result.find("\n")].split("/")[0]
try:
score=float(scoretext)
except:
self.log("Could not convert score to float","e")
if score >self.spammaybelevel:
if score >self.spamlevel:
spamlevel=S_SPAM
else:
spamlevel=S_MAYBESPAM
return spamlevel,score
############
#_BOGOFILTER
############
class _BOGOFILTER(_basespamchecker):
def __init__(self,parent,leveldict):
_basespamchecker.__init__(self,parent,leveldict)
self.cmd=shutil.which("bogofilter")
self.set_leveldict(leveldict)
@_dbg
def set_leveldict(self,leveldict):
pass
@_dbg
def is_spam(self,mail):
self.debug("Spamcheck bogofilter")
spamlevel=S_NOSPAM
p=subprocess.Popen([self.cmd,
"-T"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result=p.communicate(input=mail.encode("UTF-8",
unicodeerror))[0].decode("UTF-8",
unicodeerror)
level,scoretext=result[:result.find("\n")].split(" ")
try:
score=float(scoretext)*50
except:
self.log("Could not convert score to float","e")
if level =="S":
spamlevel=S_SPAM
elif level == "U":
spamlevel=S_MAYBESPAM
return spamlevel,score
################################################################################
####################
#get_spamscannerlist
####################
def get_spamscannerlist():
return ["BOGOFILTER","SPAMASSASSIN"]
################
#get_spamscanner
################
def get_spamscanner(scanner,parent,leveldict):
scanner=scanner.upper().strip()
if scanner=="BOGOFILTER":
_s=_BOGOFILTER(parent,leveldict)
if _s.is_available():
return _s
if scanner=="SPAMASSASSIN":
_s=_SPAMASSASSIN(parent,leveldict)
if _s.is_available():
return _s
return None
|
gpgmailencrypt/gpgmailencrypt
|
gmeutils/spamscanners.py
|
Python
|
gpl-3.0
| 3,487
|
# -*- coding: utf-8
from .models import Condicionfisicavivienda, Comunidad, Hogar, Parentesco, Persona, Tipolocalidad, Tipovia, Tipovivienda, Vivienda, \
Miembrohogar, Zona, Vocero, Pais, Patologia, Salud, Tipoapoyo, Tipodiscapacidad, Discapacidad
from .forms import PersonaForm, MiembrohogarForm, VoceroForm
from django.contrib import admin, messages
from django.contrib.admin import ModelAdmin, SimpleListFilter
from django.contrib.admin.widgets import AdminDateWidget
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserChangeForm
from django.contrib.auth.models import User
from suit.admin import SortableTabularInline, SortableModelAdmin, SortableStackedInline
from suit.widgets import SuitDateWidget, SuitSplitDateTimeWidget, EnclosedInput, LinkedSelect, AutosizedTextarea, NumberInput, SuitSplitDateTimeWidget
###############################################################################
### Administraion de la Zona del Consejo Comunal ###
###############################################################################
class VoceroInline(admin.TabularInline):
form = VoceroForm
model = Vocero
extra = 1
verbose_name_plural = 'Voceros de la zona'
#ordering = ('parentesco',)
# class VoceroAdmin(VersionAdmin):
# search_fields = ('zona', 'persona')
# list_display = ('zona', 'persona')
# ordering = ('zona', 'persona')
# list_select_related = True
# #inlines = (VoceroInline,)
# fieldsets = [
# (None, {
# 'fields': [
# 'zona',
# 'persona'
# ]
# }),
# ]
# admin.site.register(Vocero, VoceroAdmin)
class ZonaAdmin(admin.ModelAdmin):
search_fields = ('comunidad', 'nombrezona')
list_display = ('comunidad', 'nombrezona')
ordering = ('comunidad', 'nombrezona')
list_select_related = True
inlines = (VoceroInline,)
fieldsets = [
(None, {
'fields': [
'comunidad',
'nombrezona',
'descripcion'
]
}),
]
admin.site.register(Zona, ZonaAdmin)
class ZonaInline(admin.TabularInline):
model = Zona
extra = 1
verbose_name_plural = "Zonas de la Comunidad"
ordering = ('nombrezona',)
###############################################################################
### Administraion del Consejo Comunal ###
###############################################################################
class comunidadAdmin(admin.ModelAdmin):
search_fields = ('nombrecomunidad', 'parroquia')
list_display = ('nombrecomunidad', 'parroquia')
ordering = ('nombrecomunidad', 'parroquia')
list_select_related = True
inlines = (ZonaInline,)
fieldsets = [
(None, {
'fields': [
'idcomunidad',
'nombrecomunidad',
'tipovia',
'nombrevia',
'tipolocalidad',
'nombrelocalidad',
'tipolocal',
'nombrelocal',
'pisolocal',
'numerolocal',
'referencialocal',
'parroquia',
'poligonal',
'centrocoordenadas',
'coordenadas'
]
}),
]
admin.site.register(Comunidad, comunidadAdmin)
###############################################################################
### Administraion del Miembro de Hogar ###
###############################################################################
# class MiembrohogarAdmin(VersionAdmin):
# form = MiembrohogarForm
# search_fields = ('hogar',)
# list_display = ('hogar', 'parentesco', 'persona')
# ordering = ('hogar', 'parentesco', 'persona')
# list_select_related = True
# fieldsets = [
# (None, {
# 'fields': [
# 'hogar',
# 'parentesco',
# 'persona',
# ]
# }),
# ]
# admin.site.register(Miembrohogar, MiembrohogarAdmin)
class MiembrohogarInline(admin.TabularInline):
form = MiembrohogarForm
model = Miembrohogar
extra = 1
verbose_name_plural = 'Miembros del hogar'
ordering = ('parentesco',)
###############################################################################
### Administraion del Modelo Hogar ###
###############################################################################
class HogarAdmin(admin.ModelAdmin):
search_fields = ('nombrehogar',)
list_display = ('nombrehogar', 'vivienda')
ordering = ('vivienda', 'nombrehogar')
list_select_related = True
inlines = (MiembrohogarInline,)
fieldsets = [
(None, {
'fields': [
'nombrehogar',
'vivienda',
'tipopropiedadvivienda'
]
}),
]
admin.site.register(Hogar, HogarAdmin)
admin.site.register(Parentesco)
###############################################################################
### Administraion del Modelo Persona ###
###############################################################################
class PersonaAdmin(admin.ModelAdmin):
form = PersonaForm
search_fields = ('cedulaidentidadpersona',)
list_display = ('__unicode__','fecharegistro')
#ordering = ('nombrepersona', 'otrosnombrespersona', 'apellidopersona', 'otrosapellidospersona')
list_select_related = True
admin.site.register(Persona, PersonaAdmin)
###############################################################################
### Administraion del Modelo Salud ###
###############################################################################
class PruebaAdmin(admin.ModelAdmin):
list_select_related = True
admin.site.register(Salud)
admin.site.register(Patologia)
admin.site.register(Tipoapoyo)
admin.site.register(Tipodiscapacidad)
admin.site.register(Discapacidad)
admin.site.register(Tipolocalidad)
###############################################################################
### Administraion del Modelo TipoVia ###
###############################################################################
class TipoviaAdmin(admin.ModelAdmin):
pass
admin.site.register(Tipovia, TipoviaAdmin)
###############################################################################
### Administraion del Modelo TipoVivienda ###
###############################################################################
class TipoviviendaAdmin(admin.ModelAdmin):
pass
admin.site.register(Tipovivienda, TipoviviendaAdmin)
###############################################################################
### Administraion del Modelo Vivienda ###
###############################################################################
class ViviendaAdmin(admin.ModelAdmin):
search_fields = ('nombrevivienda' ,'piso' ,'apartamento')
list_display = ('nombrevivienda' ,'piso' ,'apartamento')
list_select_related = True
fieldsets = [
(None, {
'fields': [
'tipovivienda',
'nombrevivienda',
'piso',
'apartamento',
'tipovia',
'nombrevia',
'tipolocalidad',
'nombrelocalidad',
'zona',
'metroscuadrados',
'numerohabitaciones',
'numerobanos',
'condicionfisica'
]
}),
]
admin.site.register(Vivienda, ViviendaAdmin)
|
gvizquel/comunidad
|
demografia/admin.py
|
Python
|
gpl-3.0
| 7,155
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2009 Doug Hellmann All rights reserved.
#
"""
"""
#end_pymotw_header
import csv
csv.register_dialect('pipes', delimiter='|')
with open('testdata.pipes', 'r') as f:
reader = csv.reader(f, dialect='pipes')
for row in reader:
print row
|
qilicun/python
|
python2/PyMOTW-1.132/PyMOTW/csv/csv_dialect.py
|
Python
|
gpl-3.0
| 310
|
# -*- coding: utf-8 -*-
# rdiffweb, A web interface to rdiff-backup repositories
# Copyright (C) 2012-2021 rdiffweb contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict, namedtuple
import datetime
from io import StringIO
import logging
import os
from chartkick.ext import ChartExtension
import cherrypy
import humanfriendly
from jinja2 import Environment, PackageLoader
from jinja2.filters import do_mark_safe
from jinja2.loaders import ChoiceLoader
from rdiffweb.tools import i18n
from rdiffweb.core import librdiff
from rdiffweb.core import rdw_helpers
from rdiffweb.tools.i18n import ugettext as _
from rdiffweb.core.store import RepoObject
# Define the logger
logger = logging.getLogger(__name__)
_ParentEntry = namedtuple("_ParentEntry", 'path,display_name')
def attrib(**kwargs):
"""Generate an attribute list from the keyword argument."""
def _escape(text):
if isinstance(text, bytes):
text = text.decode('ascii', 'replace')
text = str(text)
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
def _format(key, val):
# Don't write the attribute if value is False
if val is False:
return
if val is True:
yield str(key)
return
if isinstance(val, list):
val = ' '.join([_escape(v) for v in val if v])
else:
val = _escape(val)
if not val:
return
yield '%s="%s"' % (str(key), val)
first = True
buf = StringIO()
for key, val in sorted(kwargs.items()):
for t in _format(key, val):
if not first:
buf.write(' ')
first = False
buf.write(t)
data = buf.getvalue()
buf.close()
return do_mark_safe(data)
def do_filter(sequence, attribute_name):
"""Filter sequence of objects."""
return [x for x in sequence if (isinstance(x, dict) and attribute_name in x and x[attribute_name]) or (hasattr(x, attribute_name) and getattr(x, attribute_name))]
def do_format_lastupdated(value, now=None):
"""
Used to format date as "Updated 10 minutes ago".
Value could be a RdiffTime or an epoch as int.
"""
if not value:
return ""
now = librdiff.RdiffTime(now)
delta = now.epoch() - (value.epoch() if isinstance(value, librdiff.RdiffTime) else value)
delta = datetime.timedelta(seconds=delta)
if delta.days > 365:
return _('%d years ago') % (delta.days / 365)
if delta.days > 60:
return _('%d months ago') % (delta.days / 30)
if delta.days > 7:
return _('%d weeks ago') % (delta.days / 7)
elif delta.days > 1:
return _('%d days ago') % delta.days
elif delta.seconds > 3600:
return _('%d hours ago') % (delta.seconds / 3600)
elif delta.seconds > 60:
return _('%d minutes ago') % (delta.seconds / 60)
return _('%d seconds ago') % delta.seconds
def create_repo_tree(repos):
"""
Organise the repositories into a tree.
"""
repos = sorted(repos, key=lambda r: r.display_name)
repo_tree = OrderedDict()
for repo in repos:
h = repo_tree
key = repo.display_name.strip('/').split('/')
for p in key[:-1]:
if p in h and isinstance(h[p], RepoObject):
h[p] = {'.': h[p]}
h = h.setdefault(p, {})
h[key[-1]] = repo
return repo_tree
def list_parents(path):
# Build the parameters
# Build "parent directories" links
# TODO this function should return a list of DirEntry instead.
parents = [_ParentEntry(b'', path._repo.display_name)]
parent_path_b = b''
for part_b in path.path.split(b'/'):
if part_b:
parent_path_b = os.path.join(parent_path_b, part_b)
display_name = path._repo._decode(path._repo.unquote(part_b))
parents.append(_ParentEntry(parent_path_b, display_name))
return parents
def url_for(endpoint, *args, **kwargs):
"""
Generate a url for the given endpoint, path (*args) with parameters (**kwargs)
This could be used to generate a path with userobject and repo object
"""
path = "/" + endpoint.strip("/")
for chunk in args:
if not chunk:
continue
if hasattr(chunk, 'owner') and hasattr(chunk, 'path'):
# This is a RepoObject
path += "/"
path += chunk.owner
path += "/"
path += rdw_helpers.quote_url(chunk.path.strip(b"/"))
elif hasattr(chunk, 'path'):
# This is a DirEntry
if chunk.path:
path += "/"
path += rdw_helpers.quote_url(chunk.path.strip(b"/"))
elif chunk and isinstance(chunk, bytes):
path += "/"
path += rdw_helpers.quote_url(chunk.strip(b"/"))
elif chunk and isinstance(chunk, str):
path += "/"
path += chunk.rstrip("/")
else:
raise ValueError(
'invalid positional arguments, url_for accept str, bytes or RepoPath: %r' % chunk)
# Sort the arguments to have predictable results.
qs = [(k, v.epoch() if hasattr(v, 'epoch') else v)
for k, v in sorted(kwargs.items()) if v is not None]
return cherrypy.url(path=path, qs=qs)
class PatchedChartExtension(ChartExtension):
"""
Patched version of Chartkick.
"""
def _chart_support(self, name, data, caller, **kwargs):
# Remove l_0_
kwargs = dict((k[2:], v) for (k, v) in kwargs.items())
return super(PatchedChartExtension, self)._chart_support(
name, data, caller, **kwargs)
class TemplateManager(object):
"""
Uses to generate HTML page from template using Jinja2 templating.
"""
def __init__(self):
loader = ChoiceLoader([
PackageLoader('rdiffweb', 'templates')
])
# Load all the templates from /templates directory
self.jinja_env = Environment(
loader=loader,
auto_reload=True,
autoescape=True,
extensions=[
'jinja2.ext.i18n',
'jinja2.ext.with_',
'jinja2.ext.autoescape',
PatchedChartExtension,
])
# Register filters
self.jinja_env.filters['filter'] = do_filter
self.jinja_env.filters['lastupdated'] = do_format_lastupdated
self.jinja_env.filters['filesize'] = lambda x: humanfriendly.format_size(
x, binary=True)
# Register method
self.jinja_env.globals['attrib'] = attrib
self.jinja_env.globals['create_repo_tree'] = create_repo_tree
self.jinja_env.globals['list_parents'] = list_parents
self.jinja_env.globals['url_for'] = url_for
def compile_template(self, template_name, **kwargs):
"""Very simple implementation to render template using jinja2.
`templateName`
The filename to be used as template.
`kwargs`
The arguments to be passed to the template.
"""
logger.log(1, "compiling template [%s]", template_name)
self.jinja_env.install_gettext_callables(
i18n.ugettext, i18n.ungettext, newstyle=True)
template = self.jinja_env.get_template(template_name)
data = template.render(kwargs)
logger.log(1, "template [%s] compiled", template_name)
return data
|
ikus060/rdiffweb
|
rdiffweb/core/rdw_templating.py
|
Python
|
gpl-3.0
| 8,299
|
import cv2
import numpy as np
import urllib2
import security
from event import Event
from time import time, sleep
class Camera(object):
def __init__(self):
self.FrameCaptured = Event()
self.name = ''
def read(self):
raise NotImplementedError
def save(self, filename):
image = self.read()
cv2.imwrite(image, filename)
def stream(self, fps):
frameperiod = 1.0 / fps
now = time()
nextframe = now + frameperiod
name = 'image_{0}'.format(self.name)
cv2.namedWindow(name)
while len(self.FrameCaptured) > 0:
frame = self.read()
cv2.imshow(name, frame)
cv2.waitKey(1)
self.FrameCaptured(frame)
while now < nextframe:
sleep(nextframe - now)
now = time()
nextframe += frameperiod
cv2.destroyWindow(name)
@staticmethod
def create(configuration, name):
camera = None
if 'cameras' not in configuration:
raise KeyError('Failed to find cameras listing in configuration settings')
if name not in configuration['cameras']:
raise KeyError('Failed to find camera ["{0}"] in cameras listing'.format(name))
config = configuration['cameras'][name]
if 'type' in config:
typ = config['type']
else:
raise KeyError('Failed to find "type" of camera in configuration settings for...well the camera')
if 'address' in config:
address = config['address']
else:
raise KeyError('Failed to find "address" of camera in configuration settings for...well the camera')
if 'user' in config:
user = config['user']
if len(user) > 0:
user = security.Security.decrypt(user)
else:
user = ''
if 'password' in config:
pwd = config['password']
if len(pwd) > 0:
pwd = security.Security.decrypt(pwd)
else:
pwd = ''
if typ == 'ip':
camera = IpCamera(name, address, user, pwd)
elif typ == 'usb':
camera = UsbCamera(name, address)
elif typ == 'stream':
camera = StreamCamera(name, address)
return camera
class IpCamera(Camera):
def __init__(self, name, address, user, password):
Camera.__init__(self)
self.name = name
self.url = address.replace('{USER}', user).replace('{PASSWORD}', password)
def read(self):
capture = cv2.VideoCapture()
capture.open(self.url)
_, image = capture.read()
return image
class UsbCamera(Camera):
def __init__(self, name, number):
Camera.__init__(self)
self.name = name
number = int(float(number))
self.camera = number
def read(self):
image = None
capture = cv2.VideoCapture(self.camera)
if capture.isOpened():
_, image = capture.read()
return image
class StreamCamera(Camera):
def __init__(self, name, stream):
Camera.__init__(self)
self.name = name
self.stream = stream
def read(self):
data = self.stream.read()
image = np.asarray(bytearray(data), dtype="uint8")
return cv2.imdecode(image, cv2.IMREAD_COLOR)
|
shraklor/observations
|
camera.py
|
Python
|
gpl-3.0
| 3,388
|
#!/usr/bin/python3.3 -O
import re
prog = re.compile("^(\d{1,3})[.](\d{1,3})[.](\d{1,3})[.](\d{1,3})/(\d{1,2})$")
def iptobinpad(ip):
return ''.join([bin(int(i))[2:].zfill(8) for i in ip])
def CIDRtoIP(mask):
"""
return the subnet IP extracted from CIDR notation
"""
ip = ("1" * mask).ljust(32, '0')
return ".".join([str(int(ip[i:i+8], 2)) for i in range(0, 32, 8)])
class Host(object):
"""
The Host class is used to represent a host on a subnet
Each machine on a subnet must have differents hostname and ip
You can set TTL to a value between 0 and 255. Default value will be 64
"""
def __init__(self, name, ip, ttl = 64):
"""
The IP field must look like that "192.168.0.1/24" according to CIDR notation
If either of TTL or IP values are bad, it will throw a ValueError exception
"""
self._name = name
self._ttl = int(ttl)
if not 0 <= self._ttl <= 255:
raise ValueError("Bad TTL value")
self._ip, self._mask = self._checkIp(ip)
self._cidr = ip
self._routes = {}
self._defaultroute = None
def _checkIp(self, ip):
se = prog.match(ip)
if not se or True in [int(i) > 255 for i in se.groups()[:4]] or int(se.group(5)) > 32:
raise ValueError("""
Not a valid ip for {name} (was {ip})
Note that this tool only handle CIDR notation
""".format(name = self._name, ip = ip))
return se.groups()[:4], int(se.group(5))
def showRoutes(self):
"""
return a string representing all the routes for a Host instance
"""
s = "{0}:\n{pad}{1} -- {2}\n".format(self._name, ".".join(self._ip), CIDRtoIP(self._mask), pad = 1 * " ")
s += " routes:\n"
for k,v in self._routes.items():
s += "{pad}{rule} -> {route}\n".format(rule = k, route = v._name, pad = 1 * " ")
if self._defaultroute is not None:
s += "{pad}Default -> {route}\n".format(route = self._defaultroute._name, pad = 2 * " ")
return s
def _canConnect__(self, ip, mask, ignore = False):
return iptobinpad(ip)[:max(mask, self._mask if not ignore else mask)] == iptobinpad(self._ip)[:max(mask, self._mask if not ignore else mask)]
def _canConnect(self, otherhost):
"""
return a boolean
True if the two hosts can communicate directly, False otherwise
If the otherhost argument isn't an instance of the Host class, it will throw a TypeError
"""
if not isinstance(otherhost, Host):
raise TypeError("First argument must be of Host type")
if isinstance(otherhost, Router):
return otherhost._canConnect(self)
return self._canConnect__(otherhost._ip, otherhost._mask)
def addRoute(self, route, ip = None):
"""
Add a route for future connections to remote hosts
If no IP is provided or is "Default", set the Default route
If the IP isn't a valid CIDR notation, return False
return True otherwise
throw a TypeError if the route argument isn't an instance of the Router class
"""
if not isinstance(route, Router):
raise TypeError("First argument must be of Router type")
if ip is None or ip.lower() == "default":
self._defaultroute = route
else:
try:
self._checkIp(ip)
except ValueError:
return False
else:
self._routes[ip] = route
return True
def canConnect(self, remotehost, ttl = None, basehost = None):
"""
return a boolean
True if the two hosts can communicate directly or using their routes, False otherwise
If the remotehost argument isn't an instance of the Host class, it will throw a TypeError
"""
if not isinstance(remotehost, Host):
raise TypeError("First argument must be of Host type")
ttl = self._ttl if ttl is None else ttl - 1
basehost = self if basehost is None else basehost
if ttl == 0:
return (False, 0)
if self._canConnect(remotehost):
print("{0} -> {1}".format(self._name, remotehost._name))
return remotehost.canConnect(basehost, None, basehost) if remotehost is not basehost else (True, ttl)
restrict = []
for k,v in self._routes.items():
i,m = k.split('/')
if remotehost._canConnect__(i.split('.'), int(m), True):
restrict.append([int(m),v])
if restrict:
p = tuple(zip(*restrict))[0]
print("{0} -> {1}".format(self._name, restrict[max(range(len(p)), key=p.__getitem__)][1]._name))
return restrict[max(range(len(p)), key=p.__getitem__)][1].canConnect(remotehost, ttl, basehost) if self._canConnect(restrict[max(range(len(p)), key=p.__getitem__)][1]) else (False, 0)
if self._defaultroute is not None and self._canConnect(self._defaultroute):
print("{0} -> {1}".format(self._name, self._defaultroute._name))
return self._defaultroute.canConnect(remotehost, ttl, basehost)
return (False, 0)
class Router(Host):
"""
A router is like a super Host which can have multiple ip addresses on differents subnets
Default TTL for a router is set to 255
"""
def __init__(self, name, ip, *args, **kwargs):
super().__init__(name, ip, kwargs.get("ttl", 255))
self._cidrs = [self._cidr]
self._ips = [(self._ip, self._mask)]
del self._ip, self._mask, self._cidr
for i in args:
self._ips.append(self._checkIp(i))
def _canConnect__(self, ip, mask, ignore = False):
return any([iptobinpad(ip)[:max(mask, m if not ignore else mask)] == iptobinpad(i)[:max(mask, m if not ignore else mask)] for i,m in self._ips])
def _canConnect(self, otherhost):
"""
return a boolean
True if the two hosts can communicate directly, False otherwise
If the otherhost argument isn't an instance of the Host class, it will throw a TypeError
"""
if not isinstance(otherhost, Host):
raise TypeError("First argument must be of Host type")
if isinstance(otherhost, Router):
return any([self._canConnect__(i, m) for i,m in otherhost._ips])
return self._canConnect__(otherhost._ip, otherhost._mask)
def showRoutes(self):
"""
return a string representing all the routes for a Router instance
"""
s = "{0}:\n".format(self._name)
for x in self._ips:
s += "{pad}{1} -- {2}\n".format(self._name, ".".join(x[0]), CIDRtoIP(x[1]), pad = 1 * " ")
s += " routes:\n"
for k,v in self._routes.items():
s += "{pad}{rule} -> {route}\n".format(rule = k, route = v._name, pad = 2 * " ")
if self._defaultroute is not None:
s += "{pad}Default -> {route}\n".format(route = self._defaultroute._name, pad = 2 * " ")
return s
|
Arzaroth/virtualnetwork
|
network/classes.py
|
Python
|
gpl-3.0
| 7,100
|
styles = {
'darkgreen': {
'name': 'darkgreen',
'background': '#000000',
'foreground': '#007700',
'lines': '#001100',
'border': '#001100',
'info': '#007700',
'font': 'DejaVu Sans Mono',
'fontsize': 12,
'padding': 6,
'size': [0.6, 0.95],
},
'green': {
'name': 'green',
'background': '#000000',
'foreground': '#00ff00',
'lines': '#007700',
'border': '#003300',
'info': '#00ff00',
'font': 'DejaVu Sans Mono',
'fontsize': 12,
'padding': 6,
'size': [0.6, 0.95],
},
'blue': {
'name': 'blue',
'background': '#0000ff',
'foreground': '#ffffff',
'lines': '#5555ff',
'border': '#3333ff',
'info': '#ffffff',
'font': 'DejaVu Sans Mono',
'fontsize': 12,
'padding': 6,
'size': [0.6, 0.95],
},
'c64': {
'name': 'c64',
'background': '#10009c',
'foreground': '#9c9cff',
'lines': '#3b3bbd',
'border': '#3b3bbd',
'info': '#7373ff',
'font': 'DejaVu Sans Mono',
'fontsize': 12,
'padding': 8,
'size': [0.45, 0.75] # [width, height]
},
'locontrast': {
'name': 'locontrast',
'background': '#6f6f6f',
'foreground': '#212121',
'lines': '#2f455b',
'border': '#2f455b',
'info': '#C44700',
'font': 'DejaVu Sans Mono',
'fontsize': 9,
'padding': 8,
'size': [0.45, 0.75] # [width, height]
},
'cupid': {
'name': 'cupid',
'background': '#12000b',
'foreground': '#d68c8b',
'lines': '#733b44',
'border': '#733b44',
'info': '#deb8a6',
'font': 'DejaVu Sans Mono',
'fontsize': 9,
'padding': 8,
'size': [0.45, 0.75] # [width, height]
},
'banker': {
'name': 'banker',
'background': '#111d00',
'foreground': '#ded291',
'lines': '#7d9635',
'border': '#7d9635',
'info': '#7d9635',
'font': 'DejaVu Sans Mono',
'fontsize': 9,
'padding': 8,
'size': [0.45, 0.75] # [width, height]
},
'amber': {
'name': 'amber',
'background': '#151000',
'foreground': '#ae8400',
'lines': '#5e4803',
'border': '#5e4803',
'info': '#ae8400',
'font': 'DejaVu Sans Mono',
'fontsize': 9,
'padding': 8,
'size': [0.45, 0.75] # [width, height]
},
}
|
genewoo/pyroom
|
styles.py
|
Python
|
gpl-3.0
| 2,605
|
import logging
from channels.generic.websocket import AsyncJsonWebsocketConsumer
logger = logging.getLogger('essarch.core.auth.consumers')
class NotificationConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
user = self.scope["user"]
grp = 'notifications_{}'.format(user.pk)
await self.accept()
await self.channel_layer.group_add(grp, self.channel_name)
logger.info("Added {} channel to {}".format(self.channel_name, grp))
async def disconnect(self, close_code):
user = self.scope["user"]
grp = 'notifications_{}'.format(user.pk)
await self.channel_layer.group_discard(grp, self.channel_name)
logger.info("Removed {} channel from {}".format(self.channel_name, grp))
async def notify(self, event):
await self.send_json(event)
logger.info("Notification with id {} sent to channel {}".format(event['id'], self.channel_name))
|
ESSolutions/ESSArch_Core
|
ESSArch_Core/auth/consumers.py
|
Python
|
gpl-3.0
| 940
|
import subprocess
import select
import time
import sys
"""Provides wrapper functions that run cli commands and wrapper classes to
communicate with them.
Error messages are piped to stderr.
"""
proctal_exe = "./proctal"
class Error(Exception):
pass
class StopError(Error):
"""Raised when the process is not running but was expected to be."""
def __init__(self):
super().__init__("Proctal is not running.")
class ParseError(Error):
"""Raised when failing to parse a value."""
class Type:
"""Base class that all types must inherit."""
def type_options(self):
return ["--type=" + self._type_name()] + self._type_options()
def create_value(self):
return self._create_value()
def _type_name(self):
raise NotImplementedError("Must return the name of the type.")
def _create_value(self):
raise NotImplementedError("Must create a new value.")
def _type_options(self):
return []
class TypeByte(Type):
"""Represents byte type."""
def _type_name(self):
return "byte"
def _create_value(self):
return ValueByte(self)
class TypeAddress(Type):
"""Represents address type."""
def _type_name(self):
return "address"
def _create_value(self):
return ValueAddress(self)
class TypeInteger(Type):
"""Represents integer type."""
def __init__(self, bits=8):
self._bits = bits
def bits(self):
return self._bits
def _type_name(self):
return "integer"
def _type_options(self):
return ["--integer-bits=" + str(self._bits)]
def _create_value(self):
return ValueInteger(self)
class Value:
"""Represents a value."""
def format(self):
raise NotImplementedError("Must format this value into a string.")
def data(self):
raise NotImplementedError("Must return an array of bytes that represents the value in binary.")
def parse(self, s):
raise NotImplementedError("Must parse a string.")
def parse_binary(self, s):
raise NotImplementedError("Must parse a sequence of bytes.")
def clone(self):
raise NotImplementedError("Must create a copy of this value.")
def size(self):
raise NotImplementedError("Must return size of value in bytes.")
def cmp(self, other):
raise NotImplementedError("Must compare this value against another.")
def __str__(self):
return self.format()
class ValueByte(Value):
"""Represents a byte value."""
def __init__(self, type):
self._type = type
self._value = 0
def type(self):
return self._type
def parse(self, s):
self._value = int(s, 16)
def parse_binary(self, s):
self._value = s[0]
def format(self):
return hex(int(self._value))[2:].upper()
def data(self):
return self._value.to_bytes(self.size(), byteorder='little', signed=False)
def clone(self):
other = ValueAddress(self._type)
other._value = self._value
return other
def size(self):
return 1
def cmp(self, other):
if self._value > other._value:
return 1
elif self._value < other._value:
return -1
else:
return 0
class ValueAddress(Value):
"""Represents an address value."""
def __init__(self, type):
self._type = type
self._value = 0
def type(self):
return self._type
def parse(self, s):
self._value = int(s, 16)
def format(self):
return hex(int(self._value))[2:].upper()
def data(self):
return self._value.to_bytes(self.size(), byteorder='little', signed=False)
def clone(self):
other = ValueAddress(self._type)
other._value = self._value
return other
def size(self):
if sys.maxsize > 2**32:
return 8
else:
return 4
def cmp(self, other):
if self._value > other._value:
return 1
elif self._value < other._value:
return -1
else:
return 0
def add_address_offset(self, offset):
self._value = self._value + offset
class ValueInteger(Value):
"""Represents an integer value."""
def __init__(self, type):
self._type = type
self._value = 0
def type(self):
return self._type
def parse(self, s):
self._value = int(s)
def parse_binary(self, s):
size = self.size()
if len(s) < size:
raise ParseError("Expecting at least {expected} bytes.".format(expected=size))
data = s[:size]
del s[:size]
self._value = int.from_bytes(data, byteorder='little', signed=True)
def data(self):
return self._value.to_bytes(self.size(), byteorder='little', signed=False)
def format(self):
return str(self._value)
def clone(self):
other = ValueInteger(self.type)
other._value = self._value
return other
def size(self):
return self._type.bits() // 8
def cmp(self, other):
if not self._type.bits() == other._type.bits():
raise ValueError("Not the same type.")
if self._value > other._value:
return 1
elif self._value < other._value:
return -1
else:
return 0
class Process:
def __init__(self, process):
self._process = process
def _assert_running(self):
"""Asserts that the process is still running."""
self._process.poll()
if self._process.returncode != None:
raise StopError()
def stop(self):
"""Stops the command."""
self._process.terminate()
self._process.wait()
def exit_code(self):
"""Returns the exit code if the process has quit.
If the process is still running it will return None."""
return self._process.returncode
def has_stopped(self):
"""Returns True if the process has stopped, False otherwise."""
return self.exit_code() != None
class PauseProcess(Process):
"""Controls the pause command."""
def __init__(self, process):
super().__init__(process)
class WatchProcess(Process):
"""Controls the watch command."""
def __init__(self, process):
super().__init__(process)
self.poll = select.poll()
self.poll.register(self._process.stdout, select.POLLIN)
def wait_match(self, timeout):
"""Waits for the watch process to report a match."""
self._assert_running()
r = self.poll.poll(timeout)
if not r:
return False
return any([i[1] == select.POLLIN for i in r])
def next_match(self):
"""Reads the next available match."""
self._assert_running()
address_type = TypeAddress()
line = self._process.stdout.readline().decode("utf-8")
if line == '':
# No more lines to read.
return None
address = ValueAddress(address_type)
address.parse(line[:-1])
return SearchMatch(address, None)
class SearchMatch:
"""Represents a search match."""
def __init__(self, address, value):
self.address = address
self.value = value
class SearchProcess(Process):
"""Controls the search command."""
def __init__(self, process, type):
super().__init__(process)
self._type = type
def match_iterator(self):
"""An iterator that goes through all the matches that are currently
available."""
self._assert_running()
address_type = TypeAddress()
while True:
line = self._process.stdout.readline().decode("utf-8")
if line == '':
# No more lines to read.
break
first_break = line.index(" ")
address = ValueAddress(address_type)
address.parse(line[:first_break])
value = self._type.create_value()
value.parse(line[first_break + 1:])
yield SearchMatch(address, value)
class PatternProcess(Process):
"""Controls the pattern command."""
def __init__(self, process):
super().__init__(process)
def match_iterator(self):
"""An iterator that goes through all the matches that are currently
available."""
self._assert_running()
address_type = TypeAddress()
while True:
line = self._process.stdout.readline().decode("utf-8")
if line == '':
# No more lines to read.
break
address = ValueAddress(address_type)
address.parse(line[:-1])
yield SearchMatch(address, None)
class ReadProcess(Process):
"""Controls the read command."""
def __init__(self, process, type):
super().__init__(process)
self._type = type
def next_value(self):
"""Gets the next available value."""
self._assert_running()
line = self._process.stdout.readline().decode("utf-8")
if line == '':
return None
value = self._type.create_value()
value.parse(line[:-1])
return value
class ReadBinaryProcess(Process):
"""Controls the read command with the binary option."""
def __init__(self, process, type):
super().__init__(process)
self._type = type
self._buffer = bytearray()
def next_value(self):
"""Gets the next available value."""
self._assert_running()
self._buffer.extend(self._process.stdout.read(16));
value = self._type.create_value()
value.parse_binary(self._buffer)
return value
class DumpProcess(Process):
"""Controls the dump command."""
def __init__(self, process):
super().__init__(process)
def byte_iterator(self):
"""Iterates over every byte that is being dumped."""
self._assert_running()
poll = select.poll()
poll.register(self._process.stdout, select.POLLIN)
while True:
if not poll.poll(33):
break
byte = self._process.stdout.read(1)
if len(byte) == 0:
break
yield byte
class WriteBinaryProcess(Process):
"""Controls the write command with the binary option."""
def __init__(self, process):
super().__init__(process)
self._buffer = bytearray()
def write_value(self, value):
"""Sends a value to be written."""
self._buffer.extend(value.data());
def stop(self):
"""Flushes the output and stops the command."""
if not self.has_stopped():
self._process.communicate(input=self._buffer)
super().stop()
def pause(pid):
"""Runs the pause command and returns an object that can control it."""
cmd = [proctal_exe, "pause", "--pid=" + str(pid)]
process = subprocess.Popen(cmd)
# Waiting for the pause command to perform. We should probably figure out
# a reliable way for it to tell us in some way when it has paused the
# program instead of guessing when. This will be the culprit of
# false-positives.
time.sleep(0.033)
return PauseProcess(process)
def watch(pid, address, watch=None, address_start=None, address_stop=None, unique=None):
"""Runs the watch command."""
cmd = [
proctal_exe,
"watch",
"--pid=" + str(pid),
str(address),
]
if watch != None:
cmd.append("-" + str(watch))
if address_start != None:
cmd.append("--address-start=" + str(address_start))
if address_stop != None:
cmd.append("--address-stop=" + str(address_stop))
if unique:
cmd.append("--unique")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
process.poll()
if process.returncode != None:
return None
return WatchProcess(process)
def search(pid, type=TypeByte, eq=None, permission=None, address_start=None, address_stop=None, review=None):
"""Runs the search command."""
cmd = [
proctal_exe,
"search",
"--pid=" + str(pid),
]
cmd = cmd + type.type_options()
if eq != None:
cmd.append("--eq=" + str(eq))
if permission != None:
cmd.append("-" + str(permission))
if address_start != None:
cmd.append("--address-start=" + str(address_start))
if address_stop != None:
cmd.append("--address-stop=" + str(address_stop))
if review != None:
cmd.append("--review")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
if review != None:
for match in review:
match_line = match.address.format() + " " + match.value.format() + "\n"
process.stdin.write(match_line.encode())
process.stdin.close()
process.poll()
if process.returncode != None:
return None
return SearchProcess(process, type)
def pattern(pid, pattern, permission=None, address_start=None, address_stop=None):
"""Runs the pattern command."""
cmd = [
proctal_exe,
"pattern",
"--pid=" + str(pid),
pattern
]
if permission != None:
cmd.append("-" + str(permission))
if address_start != None:
cmd.append("--address-start=" + str(address_start))
if address_stop != None:
cmd.append("--address-stop=" + str(address_stop))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
process.poll()
if process.returncode != None:
return None
return PatternProcess(process)
def allocate(pid, size, permission=None):
"""Runs the allocate command and returns the address."""
cmd = [
proctal_exe,
"allocate",
"--pid=" + str(pid),
str(size)
]
if permission != None:
cmd.append("-" + str(permission))
address = subprocess.check_output(cmd)
address = address.strip().decode("utf-8")
if not address:
return None
v = ValueAddress(TypeAddress())
v.parse(address)
return v
def deallocate(pid, address):
"""Runs the deallocate command."""
cmd = [
proctal_exe,
"deallocate",
"--pid=" + str(pid),
str(address)
]
code = subprocess.call(cmd)
if code == 0:
return True
else:
return False
def write(pid, address, type, value=None, array=None, binary=False):
"""Runs the write command."""
cmd = [
proctal_exe,
"write",
"--pid=" + str(pid),
"--address=" + str(address),
]
cmd = cmd + type.type_options()
if binary:
cmd.append("--binary")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
process.poll()
if process.returncode != None:
return False
return WriteBinaryProcess(process)
else:
if isinstance(value, list):
cmd = cmd + list(map(lambda v: str(v), value))
else:
cmd.append(str(value))
if array != None:
cmd.append("--array=" + str(array))
code = subprocess.call(cmd)
if code == 0:
return True
else:
return False
def execute(pid, code):
"""Runs the execute command."""
cmd = [
proctal_exe,
"execute",
"--pid=" + str(pid),
]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
process.communicate(input=code.encode())
if process.returncode == 0:
return True
else:
return False
def read(pid, address, type, array=None, binary=False):
"""Runs the read command."""
cmd = [
proctal_exe,
"read",
"--pid=" + str(pid),
"--address=" + str(address),
]
cmd = cmd + type.type_options()
if array != None:
cmd.append("--array=" + str(array))
if binary:
cmd.append("--binary")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
return ReadBinaryProcess(process, type)
else:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
return ReadProcess(process, type)
def dump(pid, permission=None, address_start=None, address_stop=None):
"""Runs the dump command."""
cmd = [
proctal_exe,
"dump",
"--pid=" + str(pid),
]
if address_start != None:
cmd.append("--address-start=" + str(address_start))
if address_stop != None:
cmd.append("--address-stop=" + str(address_stop))
if permission != None:
cmd.append("-" + str(permission))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
process.poll()
if process.returncode != None:
return None
return DumpProcess(process)
|
daniel-araujo/proctal
|
src/cli/tests/util/proctal_cli.py
|
Python
|
gpl-3.0
| 16,972
|
# -*- coding: utf-8 -*-
# This file is part of Gtfslib-python.
#
# Gtfslib-python is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gtfslib-python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gtfslib-python. If not, see <http://www.gnu.org/licenses/>.
"""
@author: Laurent GRÉGOIRE <laurent.gregoire@mecatran.com>
"""
import hashlib
import traceback
import logging
import os
import random
import unittest
import requests
from sqlalchemy.orm import clear_mappers
from gtfslib.dao import Dao
# By default we do not enable this test, it takes ages
ENABLE = False
# Skip import of already downloaded GTFS
# Handy for re-launching the test w/o having to redo all
SKIP_EXISTING = True
# Limit loading to small GTFS only. Max size in bytes
MAX_GTFS_SIZE = 2 * 1024 * 1024
# Local cache directory
DIR = "all-gtfs.cache"
# List of ID to load. If none, download the whole list
# and process it at random.
IDS_TO_LOAD = None
# The following were known to have fancy formats that used to break:
# Contains UTF-8 BOM:
# IDS_TO_LOAD = [ 'janesville-transit-system' ]
# Contain header with space
# IDS_TO_LOAD = [ 'rseau-stan' ]
# Breaks on non-unique stop time
# IDS_TO_LOAD = [ 'biaostocka-komunikacja-miejska' ]
class TestAllGtfs(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
clear_mappers()
# Downlaod all GTFS from GTFS data-exchange web-site
# and load them into a DAO.
def test_all_gtfs(self):
if not ENABLE:
print("This test is disabled as it is very time-consuming.")
print("If you want to enable it, please see in the code.")
return
# Create temporary directory if not there
if not os.path.isdir(DIR):
os.mkdir(DIR)
# Create a DAO. Re-use any existing present.
logging.basicConfig(level=logging.INFO)
dao = Dao("%s/all_gtfs.sqlite" % (DIR))
deids = IDS_TO_LOAD
if deids is None:
print("Downloading meta-info for all agencies...")
resource_url = "http://www.gtfs-data-exchange.com/api/agencies?format=json"
response = requests.get(resource_url).json()
if response.get('status_code') != 200:
raise IOError()
deids = []
for entry in response.get('data'):
deid = entry.get('dataexchange_id')
deids.append(deid)
# Randomize the list, otherwise we will always load ABCBus, then ...
random.shuffle(deids)
for deid in deids:
try:
local_filename = "%s/%s.gtfs.zip" % (DIR, deid)
if os.path.exists(local_filename) and SKIP_EXISTING:
print("Skipping [%s], GTFS already present." % (deid))
continue
print("Downloading meta-info for ID [%s]" % (deid))
resource_url = "http://www.gtfs-data-exchange.com/api/agency?agency=%s&format=json" % deid
response = requests.get(resource_url).json()
status_code = response.get('status_code')
if status_code != 200:
raise IOError("Error %d (%s)" % (status_code, response.get('status_txt')))
data = response.get('data')
agency_data = data.get('agency')
agency_name = agency_data.get('name')
agency_area = agency_data.get('area')
agency_country = agency_data.get('country')
print("Processing [%s] %s (%s / %s)" % (deid, agency_name, agency_country, agency_area))
date_max = 0.0
file_url = None
file_size = 0
file_md5 = None
for datafile in data.get('datafiles'):
date_added = datafile.get('date_added')
if date_added > date_max:
date_max = date_added
file_url = datafile.get('file_url')
file_size = datafile.get('size')
file_md5 = datafile.get('md5sum')
if file_url is None:
print("No datafile available, skipping.")
continue
if file_size > MAX_GTFS_SIZE:
print("GTFS too large (%d bytes > max %d), skipping." % (file_size, MAX_GTFS_SIZE))
continue
# Check if the file is present and do not download it.
try:
existing_md5 = hashlib.md5(open(local_filename, 'rb').read()).hexdigest()
except:
existing_md5 = None
if existing_md5 == file_md5:
print("Using existing file '%s': MD5 checksum matches." % (local_filename))
else:
print("Downloading file '%s' to '%s' (%d bytes)" % (file_url, local_filename, file_size))
with open(local_filename, 'wb') as local_file:
cnx = requests.get(file_url, stream=True)
for block in cnx.iter_content(1024):
local_file.write(block)
cnx.close()
feed = dao.feed(deid)
if feed is not None:
print("Removing existing data for feed [%s]" % (deid))
dao.delete_feed(deid)
print("Importing into DAO as ID [%s]" % (deid))
try:
dao.load_gtfs("%s/%s.gtfs.zip" % (DIR, deid), feed_id=deid)
except:
error_filename = "%s/%s.error" % (DIR, deid)
print("Import of [%s]: FAILED. Logging error to '%s'" % (deid, error_filename))
with open(error_filename, 'wb') as errfile:
errfile.write(traceback.format_exc())
raise
print("Import of [%s]: OK." % (deid))
except Exception as error:
logging.exception(error)
continue
if __name__ == '__main__':
unittest.main()
|
afimb/gtfslib-python
|
test/test_all_gtfs.py
|
Python
|
gpl-3.0
| 6,602
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pordb_suchbegriffe.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Suchbegriffedialog(object):
def setupUi(self, Suchbegriffedialog):
Suchbegriffedialog.setObjectName("Suchbegriffedialog")
Suchbegriffedialog.resize(735, 905)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("pypordb/8027068_splash.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Suchbegriffedialog.setWindowIcon(icon)
self.gridLayout = QtWidgets.QGridLayout(Suchbegriffedialog)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.frame = QtWidgets.QFrame(Suchbegriffedialog)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.tableWidgetSuche = QtWidgets.QTableWidget(self.frame)
self.tableWidgetSuche.setObjectName("tableWidgetSuche")
self.tableWidgetSuche.setColumnCount(2)
self.tableWidgetSuche.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidgetSuche.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidgetSuche.setHorizontalHeaderItem(1, item)
self.horizontalLayout_2.addWidget(self.tableWidgetSuche)
self.verticalLayout.addWidget(self.frame)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButtonLandSpeichern = QtWidgets.QPushButton(Suchbegriffedialog)
self.pushButtonLandSpeichern.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("pypordb/media-floppy.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButtonLandSpeichern.setIcon(icon1)
self.pushButtonLandSpeichern.setObjectName("pushButtonLandSpeichern")
self.horizontalLayout.addWidget(self.pushButtonLandSpeichern)
self.pushButtonLandAbbrechen = QtWidgets.QPushButton(Suchbegriffedialog)
self.pushButtonLandAbbrechen.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("pypordb/dialog-cancel.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButtonLandAbbrechen.setIcon(icon2)
self.pushButtonLandAbbrechen.setObjectName("pushButtonLandAbbrechen")
self.horizontalLayout.addWidget(self.pushButtonLandAbbrechen)
self.verticalLayout.addLayout(self.horizontalLayout)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(Suchbegriffedialog)
QtCore.QMetaObject.connectSlotsByName(Suchbegriffedialog)
def retranslateUi(self, Suchbegriffedialog):
_translate = QtCore.QCoreApplication.translate
Suchbegriffedialog.setWindowTitle(_translate("Suchbegriffedialog", "Edit search items"))
self.tableWidgetSuche.setWhatsThis(_translate("Suchbegriffedialog", "Here you can enter synomyms for searching, e. g. \"18\" and \"eighteen\". When you enter \"18\", search will not only look for \"18\", but also for \"eighteen\". Be very careful with this function for avoiding long searchs with a lot of results."))
item = self.tableWidgetSuche.horizontalHeaderItem(0)
item.setText(_translate("Suchbegriffedialog", "search item"))
item = self.tableWidgetSuche.horizontalHeaderItem(1)
item.setText(_translate("Suchbegriffedialog", "Alternative"))
self.pushButtonLandSpeichern.setToolTip(_translate("Suchbegriffedialog", "<html><head/><body><p>Save</p></body></html>"))
self.pushButtonLandAbbrechen.setToolTip(_translate("Suchbegriffedialog", "<html><head/><body><p>Cancel</p></body></html>"))
|
hwmay/pordb3
|
pordb_suchbegriffe.py
|
Python
|
gpl-3.0
| 4,105
|
# Bulletproof Arma Launcher
# Copyright (C) 2016 Lukasz Taczuk
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import unicode_literals
import errno
import os
import stat
import subprocess
import sys
import textwrap
import libtorrent
from kivy.logger import Logger
from sync.integrity import check_mod_directories, check_files_mtime_correct, are_ts_plugins_installed, is_whitelisted
from utils import paths
from utils import unicode_helpers
from utils import walker
from utils.metadatafile import MetadataFile
class AdminRequiredError(Exception):
pass
def set_torrent_complete(mod):
metadata_file = MetadataFile(mod.foldername)
metadata_file.read_data(ignore_open_errors=True)
metadata_file.set_dirty(False)
metadata_file.set_torrent_url(mod.torrent_url)
metadata_file.set_torrent_content(mod.torrent_content)
metadata_file.set_torrent_resume_data('')
metadata_file.set_force_creator_complete(True)
metadata_file.write_data()
def is_complete_quick(mod):
"""Performs a quick check to see if the mod *seems* to be correctly installed.
This check assumes no external changes have been made to the mods.
1. Check if metadata file exists and can be opened (instant)
1a. WORKAROUND: Check if the file has just been created so it must be complete
2. Check if torrent is not dirty [download completed successfully] (instant)
3. Check if torrent url matches (instant)
4. Check if files have the right size and modification time (very quick)
5. Check if there are no superfluous files in the directory (very quick)"""
Logger.info('Is_complete: Checking mod {} for completeness...'.format(mod.foldername))
metadata_file = MetadataFile(mod.foldername)
# (1) Check if metadata can be opened
try:
metadata_file.read_data(ignore_open_errors=False)
except (IOError, ValueError):
Logger.info('Is_complete: Metadata file could not be read successfully. Marking as not complete')
return False
# Workaround
if metadata_file.get_force_creator_complete():
Logger.info('Is_complete: Torrent marked as (forced) complete by the creator. Treating as complete')
return True
# (2)
if metadata_file.get_dirty():
Logger.info('Is_complete: Torrent marked as dirty (not completed successfully). Marking as not complete')
return False
# (3)
if metadata_file.get_torrent_url() != mod.torrent_url:
Logger.info('Is_complete: Torrent urls differ. Marking as not complete')
return False
# Get data required for (4) and (5)
torrent_content = metadata_file.get_torrent_content()
if not torrent_content:
Logger.info('Is_complete: Could not get torrent file content. Marking as not complete')
return False
try:
torrent_info = get_torrent_info_from_bytestring(torrent_content)
except RuntimeError:
Logger.info('Is_complete: Could not parse torrent file content. Marking as not complete')
return False
resume_data_bencoded = metadata_file.get_torrent_resume_data()
if not resume_data_bencoded:
Logger.info('Is_complete: Could not get resume data. Marking as not complete')
return False
resume_data = libtorrent.bdecode(resume_data_bencoded)
# (4)
file_sizes = resume_data['file sizes']
files = torrent_info.files()
# file_path, size, mtime
files_data = map(lambda x, y: (y.path.decode('utf-8'), x[0], x[1]), file_sizes, files)
if not check_files_mtime_correct(mod.parent_location, files_data):
Logger.info('Is_complete: Some files seem to have been modified in the meantime. Marking as not complete')
return False
# (5) Check if there are no additional files in the directory
# TODO: Check if these checksums are even needed now
checksums = dict([(entry.path.decode('utf-8'), entry.filehash.to_bytes()) for entry in torrent_info.files()])
files_list = checksums.keys()
if not check_mod_directories(files_list, mod.parent_location, on_superfluous='warn'):
Logger.info('Is_complete: Superfluous files in mod directory. Marking as not complete')
return False
if not are_ts_plugins_installed(mod.parent_location, files_list):
Logger.info('Is_complete: TS plugin out of date or not installed.')
return False
return True
def get_torrent_info_from_bytestring(bencoded):
"""Get torrent metadata from a bencoded string and return info structure."""
torrent_metadata = libtorrent.bdecode(bencoded)
torrent_info = libtorrent.torrent_info(torrent_metadata)
return torrent_info
def get_torrent_info_from_file(filename):
"""Get torrent_info structure from a file.
The file should contain a bencoded string - the contents of a .torrent file."""
with open(filename, 'rb') as file_handle:
file_contents = file_handle.read()
return get_torrent_info_from_bytestring(file_contents)
def get_admin_error(text, path):
error_message = textwrap.dedent('''
Error: {}:
{}
Please fix the file permissions before continuing.
This may also happen if the file is open by another program.
Make sure that Steam is NOT updating files right now. Otherwise, wait
until Steam finishes updating files and retry.
If not, running the launcher as Administrator may help (not recommended).
If you reinstalled your system lately, [ref=http://superuser.com/a/846155][color=3572b0]you may need to fix files ownership.[/color][/ref]
''').format(text, path)
return error_message
def path_already_used_for_mod(path, all_existing_mods):
"""Check if a given path is already used by a mod and return its name.
Return None otherwise.
"""
path = unicode_helpers.casefold(os.path.realpath(path))
for mod in all_existing_mods:
mod_full_path = unicode_helpers.casefold(mod.get_full_path())
mod_real_full_path = unicode_helpers.casefold(mod.get_real_full_path())
if path == mod_full_path or \
path == mod_real_full_path or \
path.startswith(mod_full_path + os.path.sep) or \
path.startswith(mod_real_full_path + os.path.sep):
return mod.foldername
return None
def path_can_be_a_mod(path, mods_directory):
"""Check if a given path could be used by a mod.
path - patch to be checked.
mods_directory - the directory where mods are stored by the launcher.
"""
launcher_moddir = os.path.realpath(mods_directory)
launcher_moddir_casefold = unicode_helpers.casefold(launcher_moddir)
path_casefold = unicode_helpers.casefold(os.path.realpath(path))
# Loop to parent (infinite loop)
if launcher_moddir_casefold == path_casefold or \
launcher_moddir_casefold.startswith(path_casefold + os.path.sep):
Logger.info("path_can_be_a_mod: Rejecting {}. Loop to parent.".format(path_casefold))
return False
directory_name = os.path.basename(path_casefold)
if not directory_name: # Path ends with a '\' or '/'
directory_name = os.path.dirname(path_casefold)
# All names must be changed to lowercase
bad_directories = [
'steam',
'steamapps',
'workshop',
'content',
'107410',
'common',
'arma 3',
'desktop',
]
if directory_name in bad_directories:
Logger.info("path_can_be_a_mod: Rejecting {}. Blacklisted directory.".format(path_casefold))
return False
if len(path_casefold) == 3 and path_casefold.endswith(':\\'):
Logger.info("path_can_be_a_mod: Rejecting {}. Root directory.".format(path_casefold))
return False
if path_casefold == unicode_helpers.casefold(paths.get_user_home_directory()):
Logger.info("path_can_be_a_mod: Rejecting {}. Home directory.".format(path_casefold))
return False
return True
def set_node_read_write(node_path):
"""Set file or directory to read-write by removing the read-only bit."""
fs_node_path = unicode_helpers.u_to_fs(node_path)
try:
stat_struct = os.lstat(fs_node_path)
except OSError as e:
Logger.error('Torrent_utils: exception')
if e.errno == errno.ENOENT: # 2 - File not found
Logger.info('Torrent_utils: file not found')
return
raise
# If the file is read-only to the owner, change it to read-write
if not stat_struct.st_mode & stat.S_IWUSR:
Logger.info('Integrity: Setting write bit to file: {}'.format(node_path))
try:
os.chmod(fs_node_path, stat_struct.st_mode | stat.S_IWUSR)
except OSError as ex:
if ex.errno == errno.EACCES: # 13
error_message = get_admin_error('file/directory is read-only and cannot be changed', node_path)
Logger.error(error_message)
raise AdminRequiredError(error_message)
else:
raise
def ensure_directory_exists(base_directory):
"""Ensure the directory passed as the argument exists.
If the given directory is a broken Junction or Symlink, remove it.
Then try creating the directory and if that fails, try to mitigate the problem
by setting the parent directory to read-write and retrying the directory
creation. If that fails, raise an AdminRequiredError.
"""
try:
if paths.is_broken_junction(base_directory):
Logger.info('torrent_utils: Removing potentially broken Junction: {}'.format(base_directory))
os.rmdir(base_directory)
paths.mkdir_p(base_directory)
except OSError:
# Try fixing the situation by setting parent directory to read-write
set_node_read_write(os.path.dirname(base_directory))
try:
# Try again
if paths.is_broken_junction(base_directory):
Logger.info('torrent_utils: Removing potentially broken Junction: {}'.format(base_directory))
os.rmdir(base_directory)
paths.mkdir_p(base_directory)
except OSError:
error_message = get_admin_error('directory cannot be created or is not valid', base_directory)
Logger.error(error_message)
raise AdminRequiredError(error_message)
def remove_broken_junction(path):
try:
if paths.is_broken_junction(path):
os.rmdir(path)
except OSError:
error_message = get_admin_error('file/directory cannot be created or is not valid', path)
Logger.error(error_message)
raise AdminRequiredError(error_message)
def _replace_broken_junction_with_directory(path):
"""Perform a test whether the given path is a broken junction and fix it
if it is.
"""
if paths.is_broken_junction(path):
ensure_directory_exists(path)
set_node_read_write(path)
def ensure_directory_structure_is_correct(mod_directory):
"""Ensures all the files in the mod's directory have the write bit set and
there are no broken Junctions nor Symlinks in the directory structure.
Useful if some external tool has set them to read-only.
"""
Logger.info('Torrent_utils: Checking read-write file access in directory: {}.'.format(mod_directory))
set_node_read_write(mod_directory)
_replace_broken_junction_with_directory(mod_directory)
if not paths.is_dir_writable(mod_directory):
error_message = get_admin_error('directory is not writable', mod_directory)
raise AdminRequiredError(error_message)
for (dirpath, dirnames, filenames) in walker.walk(mod_directory):
# Needs to check the dirnames like this because if a child directory is
# a broken junction, it's never going to be used as dirpath
for node_name in dirnames:
node_path = os.path.join(dirpath, node_name)
Logger.info('Torrent_utils: Checking node: {}'.format(node_path))
set_node_read_write(node_path)
_replace_broken_junction_with_directory(node_path)
if not paths.is_dir_writable(node_path):
error_message = get_admin_error('directory is not writable', node_path)
raise AdminRequiredError(error_message)
for node_name in filenames:
node_path = os.path.join(dirpath, node_name)
Logger.info('Torrent_utils: Checking node: {}'.format(node_path))
set_node_read_write(node_path)
if not paths.is_file_writable(node_path):
error_message = get_admin_error('file is not writable', node_path)
raise AdminRequiredError(error_message)
def prepare_mod_directory(mod_full_path, check_writable=True):
"""Prepare the mod with the correct permissions, etc...
This should make sure the parent directories are present, the mod directory
is either not existing or it is present and has no broken symlinks.
Right now, there is a lot of duplicate code in here, that will hopefully be
refactored in the future, after the other features are implemented.
"""
# TODO: Simplify all the calls and remove duplicate code
parent_location = os.path.dirname(mod_full_path)
# Ensure the base directory exists
ensure_directory_exists(parent_location)
set_node_read_write(parent_location)
# If mod directory exists, check if it's valid
if os.path.lexists(mod_full_path):
if os.path.isdir(mod_full_path):
remove_broken_junction(mod_full_path)
else:
# If it's not a directory, remove it because we need a dir here
os.unlink(mod_full_path)
if os.path.lexists(mod_full_path):
if check_writable:
# Read-write everything
ensure_directory_structure_is_correct(mod_full_path)
else:
if not paths.is_dir_writable(parent_location):
error_message = get_admin_error('directory is not writable', parent_location)
raise AdminRequiredError(error_message)
def create_symlink(symlink_name, orig_path):
"""Create an NTFS Junction.
For now, just use subprocess. Maybe switch to native libs later.
"""
symlink_name_fs = unicode_helpers.u_to_fs(symlink_name)
orig_path_fs = unicode_helpers.u_to_fs(orig_path)
return subprocess.check_call([b'cmd', b'/c', b'mklink', b'/J', symlink_name_fs, orig_path_fs])
def symlink_mod(mod_full_path, real_location):
"""Set a new location for a mod.
This includes making sure the mod is ready for being user afterwards.
"""
if os.path.exists(mod_full_path): # sometimes the junction may already exist
try:
os.rmdir(mod_full_path)
except OSError as ex:
if ex.errno != 41: # TODO: Figure out which error this is
raise
# The directory is not empty
# Really ugly workaround...
import tkMessageBox
import Tkinter
root = Tkinter.Tk()
root.withdraw() # use to hide tkinter window
message = textwrap.dedent('''\
To perform this action, the following directory will first have to be completely deleted:
{}
Are you sure you want to continue?
'''.format(mod_full_path))
result = tkMessageBox.askquestion('Are you sure?', message, icon='warning', parent=root)
if result == 'yes':
import shutil
try:
shutil.rmtree(mod_full_path)
except:
message = textwrap.dedent('''
An error happened while deleting the directory:
{}
This may be because the laucnher does not have the permissions required.
You need to delete it manually to proceed.
''').format(mod_full_path)
raise AdminRequiredError(message)
else:
return
try:
prepare_mod_directory(mod_full_path, check_writable=False)
create_symlink(mod_full_path, real_location)
prepare_mod_directory(mod_full_path)
except:
t, v, tb = sys.exc_info()
try:
os.rmdir(mod_full_path)
except Exception as ex:
Logger.error('symlink_mod: Error while deleting: {} {}'.format(mod_full_path, repr(ex)))
raise t, v, tb
def create_add_torrent_flags(just_seed=False):
"""Create default flags for adding a new torrent to a syncer."""
f = libtorrent.add_torrent_params_flags_t
flags = 0
flags |= f.flag_apply_ip_filter # default
flags |= f.flag_update_subscribe # default
# flags |= f.flag_merge_resume_trackers # default off
# flags |= f.flag_paused
flags |= f.flag_auto_managed
flags |= f.flag_override_resume_data
# flags |= f.flag_seed_mode
if just_seed:
flags |= f.flag_upload_mode
# flags |= f.flag_share_mode
flags |= f.flag_duplicate_is_error # default?
# no_recheck_incomplete_resume
return flags
def create_torrent(directory, announces=None, output=None, comment=None, web_seeds=None):
if not output:
output = directory + ".torrent"
# "If a piece size of 0 is specified, a piece_size will be calculated such that the torrent file is roughly 40 kB."
piece_size_multiplier = 0
piece_size = (16 * 1024) * piece_size_multiplier # Must be multiple of 16KB
# http://www.libtorrent.org/make_torrent.html#create-torrent
flags = libtorrent.create_torrent_flags_t.calculate_file_hashes
if not os.path.isdir(directory):
raise Exception("The path {} is not a directory".format(directory))
fs = libtorrent.file_storage()
is_not_whitelisted = lambda node: not is_whitelisted(unicode_helpers.decode_utf8(node))
libtorrent.add_files(fs, unicode_helpers.encode_utf8(directory), is_not_whitelisted, flags=flags)
t = libtorrent.create_torrent(fs, piece_size=piece_size, flags=flags)
for announce in announces:
t.add_tracker(unicode_helpers.encode_utf8(announce))
if comment:
t.set_comment(unicode_helpers.encode_utf8(comment))
for web_seed in web_seeds:
t.add_url_seed(unicode_helpers.encode_utf8(web_seed))
# t.add_http_seed("http://...")
libtorrent.set_piece_hashes(t, unicode_helpers.encode_utf8(os.path.dirname(directory)))
with open(output, "wb") as file_handle:
file_handle.write(libtorrent.bencode(t.generate()))
return output
|
overfl0/Bulletproof-Arma-Launcher
|
src/sync/torrent_utils.py
|
Python
|
gpl-3.0
| 18,951
|