code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Features
class BaseAnalyteFeature(object):
"""
Base multi analyte feature. Use this to create new features.
A feature is anything calcaulted from multiple analytes
"""
def __init__(self):
self.analyte_list = []
self.name = ''
@property
def data(self):
return 'Not implemented'
class BaseAnalyteFeatureFactory(object):
requires = None
name = None
def __init__(self):
"""
Initialize any required variables here.
"""
raise Exception('Must be implemented in child')
def add_analyte_data(self, analyte_data):
"""
Function called when new analyte is added. If an analyte is required for this feature, ensure to save or
use analyte. Do not manipulate raw analyte data.
Parameters
----------
analyte_data
Returns
-------
"""
pass
|
nvenayak/impact
|
impact/core/features/Base.py
|
Python
|
gpl-3.0
| 910
|
# -*- coding: utf-8 -*-
""" Expose models to the admin view """
from __future__ import unicode_literals
from django.contrib import admin
from app.models.gym import Gym
from app.models.gym_item import GymItem
from app.models.profile import Profile
from app.models.raid_item import RaidItem
from app.models.ex_raid_pokemon import ExRaidPokemon
# Register your models here.
admin.site.register(Gym)
admin.site.register(GymItem)
admin.site.register(Profile)
admin.site.register(RaidItem)
admin.site.register(ExRaidPokemon)
|
Gimpneek/exclusive-raid-gym-tracker
|
app/admin.py
|
Python
|
gpl-3.0
| 521
|
# coding=utf-8
import json
from datetime import date, datetime
from singledispatch import singledispatch
from functools import update_wrapper
def methdispatch(func):
dispatcher = singledispatch(func)
def wrapper(*args, **kw):
return dispatcher.dispatch(args[1].__class__)(*args, **kw)
wrapper.register = dispatcher.register
update_wrapper(wrapper, func)
return wrapper
def json_serial(obj):
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
TypeError(repr(obj) + ' is not JSON serializable')
class Board(object):
def __init__(self, id, name, create_at=None):
self.id = id
self.name = name
if create_at is None:
create_at = datetime.now()
self.create_at = create_at
def to_dict(self):
return {'id': self.id, 'name': self.name,
'create_at': self.create_at}
@methdispatch
def get(self, arg):
return getattr(self, arg, None)
@get.register(list)
def _(self, arg):
return [self.get(x) for x in arg]
@singledispatch
def json_encoder(obj):
raise TypeError(repr(obj) + ' is not JSON serializable')
@json_encoder.register(date)
@json_encoder.register(datetime)
def encode_date_time(obj):
return obj.isoformat()
board = Board(1, 'board_1')
print(json.dumps(board.to_dict(), default=json_encoder))
print(board.get('name'))
print(board.get(['id', 'create_at']))
|
dongweiming/web_develop
|
chapter14/section3/json_singledispatch.py
|
Python
|
gpl-3.0
| 1,453
|
#! /usr/bin/env python
# This is tested in both python 2.7 and 3.4
import os
os.system('kill {}'.format(os.getpid()))
|
BlitzKraft/useless
|
useless.py
|
Python
|
gpl-3.0
| 118
|
import decimal
class Coord:
def __init__(self, lat, lon):
self.lat = lat
self.lon = lon
class Way:
def __init__(self, source, target):
self.source = source
self.target = target
f = open('nodes', 'r')
lat = None
nodes = []
for line in f:
loaded = decimal.Decimal(line)
if lat is None:
lat = loaded
else:
nodes.append(Coord(lat, loaded))
lat = None
f.close()
f = open('ways', 'r')
source = None
ways = []
for line in f:
loaded = int(line)
if source is None:
source = loaded
else:
ways.append(Way(source, loaded))
source = None
f.close()
before = """
<!DOCTYPE html>
<html>
<head>
<title>Leaflet Quick Start Guide Example</title>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="http://cdn.leafletjs.com/leaflet-0.7.3/leaflet.css" />
<style>
body {
padding: 0;
margin: 0;
}
html, body, #map {
height: 100%;
width: 100%;
}
</style>
</head>
<body>
<div id="map"></div>
<script src="http://cdn.leafletjs.com/leaflet-0.7.3/leaflet.js"></script>
<script>
var map = L.map('map').setView([50, 20], 13);
mapLink = '<a href="http://openstreetmap.org">OpenStreetMap</a>';
L.tileLayer('http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', {
maxZoom: 18,
attribution: '© ' + mapLink + ' Contributors',
}).addTo(map);
var polyline_options = {
color: 'red',
width: '5px'
};
L.polyline(["""
after = """ ], polyline_options).addTo(map).bindPopup("I am a polygon.");
var popup = L.popup();
function onMapClick(e) {
popup
.setLatLng(e.latlng)
.setContent("You clicked the map at " + e.latlng.toString())
.openOn(map);
}
map.on('click', onMapClick);
</script>
</body>
</html>
"""
print before
way = ways[80]
print "[" + str(nodes[way.source].lat) + ", " + str(nodes[way.source].lon) + "],"
print "[" + str(nodes[way.target].lat) + ", " + str(nodes[way.target].lon) + "]"
""" [51.509, -0.08],
[51.503, -0.06],
[51.51, -0.047]
"""
print after
|
matkoniecz/szkic_inz
|
dfs.py
|
Python
|
gpl-3.0
| 2,187
|
'''
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d,avg_pool_2d, conv_3d, max_pool_3d, avg_pool_3d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.merge_ops import merge
#used in v0.03-v0.06+
def otherception3(width, height, frame_count, lr, output=9, model_name = 'otherception.model', device = 'gpu', num = '0'):
with tf.device('/{}:{}'.format(device,num)):
network = input_data(shape=[None, width, height,3], name='input')
conv1_7_7 = conv_2d(network, 64, 28, strides=4, activation='relu', name = 'conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 9,strides=4)
pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce, 192,12, activation='relu', name='conv2_3_3')
conv2_3_3 = local_response_normalization(conv2_3_3)
pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=12, strides=2, name='pool2_3_3_s2')
inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce')
inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128,filter_size=12, activation='relu', name = 'inception_3a_3_3')
inception_3a_5_5_reduce = conv_2d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' )
inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=15, activation='relu', name= 'inception_3a_5_5')
inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=12, strides=1, )
inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')
# merge the inception_3a__
inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3)
inception_3b_1_1 = conv_2d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' )
inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce')
inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=9, activation='relu',name='inception_3b_3_3')
inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce')
inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=15, name = 'inception_3b_5_5')
inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=12, strides=1, name='inception_3b_pool')
inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1')
#merge the inception_3b_*
inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=3,name='inception_3b_output')
pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')
inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3')
inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5')
inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool')
inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1')
inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output')
inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1')
inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce')
inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3')
inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce')
inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5')
inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool')
inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1')
inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output')
inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1')
inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce')
inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3')
inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce')
inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5')
inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1)
inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1')
inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=3,name='inception_4c_output')
inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1')
inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce')
inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3')
inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce')
inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5')
inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool')
inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1')
inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output')
inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1')
inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce')
inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3')
inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce')
inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5')
inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool')
inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1')
inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=3, mode='concat')
pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3')
inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1')
inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3')
inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce')
inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5')
inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool')
inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1')
inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=3,mode='concat')
inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1')
inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce')
inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384, filter_size=3,activation='relu', name='inception_5b_3_3')
inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce')
inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce,128, filter_size=5, activation='relu', name='inception_5b_5_5' )
inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool')
inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1')
inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=3, mode='concat')
pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1)
pool5_7_7 = dropout(pool5_7_7, 0.4)
loss = fully_connected(pool5_7_7, output,activation='softmax')
network = regression(loss, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network,
max_checkpoints=0, tensorboard_verbose=0,tensorboard_dir='log')
return model
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
net = input_data(shape=[None, width, height, 3], name='input')
net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
net = tflearn.resnext_block(net, n-1, 32, 32)
net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
net = tflearn.resnext_block(net, n-1, 64, 32)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, output, activation='softmax')
opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
net = tflearn.regression(net, optimizer=opt,
loss='categorical_crossentropy')
model = tflearn.DNN(net,
max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')
return model
def sentnet_color_2d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
network = input_data(shape=[None, width, height, 3], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network,
max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')
return model
def inception_v3(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
network = input_data(shape=[None, width, height,3], name='input')
conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name = 'conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3,strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce, 192,3, activation='relu', name='conv2_3_3')
conv2_3_3 = local_response_normalization(conv2_3_3)
pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')
inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce')
inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128,filter_size=3, activation='relu', name = 'inception_3a_3_3')
inception_3a_5_5_reduce = conv_2d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' )
inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name= 'inception_3a_5_5')
inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=3, strides=1, )
inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')
# merge the inception_3a__
inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3)
inception_3b_1_1 = conv_2d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' )
inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce')
inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu',name='inception_3b_3_3')
inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce')
inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5, name = 'inception_3b_5_5')
inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool')
inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1')
#merge the inception_3b_*
inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=3,name='inception_3b_output')
pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')
inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3')
inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5')
inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool')
inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1')
inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output')
inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1')
inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce')
inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3')
inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce')
inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5')
inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool')
inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1')
inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output')
inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1')
inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce')
inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3')
inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce')
inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5')
inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1)
inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1')
inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=3,name='inception_4c_output')
inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1')
inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce')
inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3')
inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce')
inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5')
inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool')
inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1')
inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output')
inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1')
inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce')
inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3')
inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce')
inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5')
inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool')
inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1')
inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=3, mode='concat')
pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3')
inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1')
inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3')
inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce')
inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5')
inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool')
inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1')
inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=3,mode='concat')
inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1')
inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce')
inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384, filter_size=3,activation='relu', name='inception_5b_3_3')
inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce')
inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce,128, filter_size=5, activation='relu', name='inception_5b_5_5' )
inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool')
inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1')
inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=3, mode='concat')
pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1)
pool5_7_7 = dropout(pool5_7_7, 0.4)
loss = fully_connected(pool5_7_7, output,activation='softmax')
network = regression(loss, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network,
max_checkpoints=0, tensorboard_verbose=0,tensorboard_dir='log')
return model
def inception_v3_3d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
network = input_data(shape=[None, width, height,3, 1], name='input')
conv1_7_7 = conv_3d(network, 64, 7, strides=2, activation='relu', name = 'conv1_7_7_s2')
pool1_3_3 = max_pool_3d(conv1_7_7, 3,strides=2)
#pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_3d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce')
conv2_3_3 = conv_3d(conv2_3_3_reduce, 192,3, activation='relu', name='conv2_3_3')
#conv2_3_3 = local_response_normalization(conv2_3_3)
pool2_3_3 = max_pool_3d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')
inception_3a_1_1 = conv_3d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
inception_3a_3_3_reduce = conv_3d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce')
inception_3a_3_3 = conv_3d(inception_3a_3_3_reduce, 128,filter_size=3, activation='relu', name = 'inception_3a_3_3')
inception_3a_5_5_reduce = conv_3d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' )
inception_3a_5_5 = conv_3d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name= 'inception_3a_5_5')
inception_3a_pool = max_pool_3d(pool2_3_3, kernel_size=3, strides=1, )
inception_3a_pool_1_1 = conv_3d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')
# merge the inception_3a__
inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=4)
inception_3b_1_1 = conv_3d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' )
inception_3b_3_3_reduce = conv_3d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce')
inception_3b_3_3 = conv_3d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu',name='inception_3b_3_3')
inception_3b_5_5_reduce = conv_3d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce')
inception_3b_5_5 = conv_3d(inception_3b_5_5_reduce, 96, filter_size=5, name = 'inception_3b_5_5')
inception_3b_pool = max_pool_3d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool')
inception_3b_pool_1_1 = conv_3d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1')
#merge the inception_3b_*
inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=4,name='inception_3b_output')
pool3_3_3 = max_pool_3d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')
inception_4a_1_1 = conv_3d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
inception_4a_3_3_reduce = conv_3d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
inception_4a_3_3 = conv_3d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3')
inception_4a_5_5_reduce = conv_3d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
inception_4a_5_5 = conv_3d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5')
inception_4a_pool = max_pool_3d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool')
inception_4a_pool_1_1 = conv_3d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1')
inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=4, name='inception_4a_output')
inception_4b_1_1 = conv_3d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1')
inception_4b_3_3_reduce = conv_3d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce')
inception_4b_3_3 = conv_3d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3')
inception_4b_5_5_reduce = conv_3d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce')
inception_4b_5_5 = conv_3d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5')
inception_4b_pool = max_pool_3d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool')
inception_4b_pool_1_1 = conv_3d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1')
inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=4, name='inception_4b_output')
inception_4c_1_1 = conv_3d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1')
inception_4c_3_3_reduce = conv_3d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce')
inception_4c_3_3 = conv_3d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3')
inception_4c_5_5_reduce = conv_3d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce')
inception_4c_5_5 = conv_3d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5')
inception_4c_pool = max_pool_3d(inception_4b_output, kernel_size=3, strides=1)
inception_4c_pool_1_1 = conv_3d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1')
inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=4,name='inception_4c_output')
inception_4d_1_1 = conv_3d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1')
inception_4d_3_3_reduce = conv_3d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce')
inception_4d_3_3 = conv_3d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3')
inception_4d_5_5_reduce = conv_3d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce')
inception_4d_5_5 = conv_3d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5')
inception_4d_pool = max_pool_3d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool')
inception_4d_pool_1_1 = conv_3d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1')
inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=4, name='inception_4d_output')
inception_4e_1_1 = conv_3d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1')
inception_4e_3_3_reduce = conv_3d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce')
inception_4e_3_3 = conv_3d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3')
inception_4e_5_5_reduce = conv_3d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce')
inception_4e_5_5 = conv_3d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5')
inception_4e_pool = max_pool_3d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool')
inception_4e_pool_1_1 = conv_3d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1')
inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=4, mode='concat')
pool4_3_3 = max_pool_3d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3')
inception_5a_1_1 = conv_3d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1')
inception_5a_3_3_reduce = conv_3d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
inception_5a_3_3 = conv_3d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3')
inception_5a_5_5_reduce = conv_3d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce')
inception_5a_5_5 = conv_3d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5')
inception_5a_pool = max_pool_3d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool')
inception_5a_pool_1_1 = conv_3d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1')
inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=4,mode='concat')
inception_5b_1_1 = conv_3d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1')
inception_5b_3_3_reduce = conv_3d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce')
inception_5b_3_3 = conv_3d(inception_5b_3_3_reduce, 384, filter_size=3,activation='relu', name='inception_5b_3_3')
inception_5b_5_5_reduce = conv_3d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce')
inception_5b_5_5 = conv_3d(inception_5b_5_5_reduce,128, filter_size=5, activation='relu', name='inception_5b_5_5' )
inception_5b_pool = max_pool_3d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool')
inception_5b_pool_1_1 = conv_3d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1')
inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=4, mode='concat')
pool5_7_7 = avg_pool_3d(inception_5b_output, kernel_size=7, strides=1)
pool5_7_7 = dropout(pool5_7_7, 0.4)
loss = fully_connected(pool5_7_7, output,activation='softmax')
network = regression(loss, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path=model_name,
max_checkpoints=1, tensorboard_verbose=0,tensorboard_dir='log')
return model
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9):
network = input_data(shape=[None, width, height], name='input')
#network = tflearn.input_data(shape=[None, 28, 28], name='input')
network = tflearn.lstm(network, 128, return_seq=True)
network = tflearn.lstm(network, 128)
network = tflearn.fully_connected(network, 9, activation='softmax')
network = tflearn.regression(network, optimizer='adam',
loss='categorical_crossentropy', name="output1")
model = tflearn.DNN(network, checkpoint_path='model_lstm',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
def sentnet_color(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
network = input_data(shape=[None, width, height,3, 1], name='input')
network = conv_3d(network, 96, 11, strides=4, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 256, 5, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 256, 3, activation='relu')
network = max_pool_3d(network, 3, strides=2)
network = conv_3d(network, 256, 5, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 256, 3, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path=model_name,
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
def sentnet_frames(width, height, frame_count, lr, output=9):
network = input_data(shape=[None, width, height,frame_count, 1], name='input')
network = conv_3d(network, 96, 11, strides=4, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 256, 5, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 256, 3, activation='relu')
network = max_pool_3d(network, 3, strides=2)
network = conv_3d(network, 256, 5, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 256, 3, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
def sentnet2(width, height, frame_count, lr, output=9):
network = input_data(shape=[None, width, height, frame_count, 1], name='input')
network = conv_3d(network, 96, 11, strides=4, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 256, 5, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 256, 3, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 3, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
def sentnet(width, height, frame_count, lr, output=9):
network = input_data(shape=[None, width, height, frame_count, 1], name='input')
network = conv_3d(network, 96, 11, strides=4, activation='relu')
network = avg_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 256, 5, activation='relu')
network = avg_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 256, 3, activation='relu')
network = max_pool_3d(network, 3, strides=2)
network = conv_3d(network, 256, 5, activation='relu')
network = avg_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 384, 3, activation='relu')
network = conv_3d(network, 256, 3, activation='relu')
network = avg_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
def alexnet2(width, height, lr, output=3):
network = input_data(shape=[None, width, height, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
def sentnet_v0(width, height, frame_count, lr, output=9):
network = input_data(shape=[None, width, height, frame_count, 1], name='input')
network = conv_3d(network, 96, 11, strides=4, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 256, 5, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = conv_3d(network, 384, 3, 3, activation='relu')
network = conv_3d(network, 384, 3, 3, activation='relu')
network = conv_3d(network, 256, 3, 3, activation='relu')
network = max_pool_3d(network, 3, strides=2)
#network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
def alexnet(width, height, lr, output=3):
network = input_data(shape=[None, width, height, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
|
Sentdex/pygta5
|
models.py
|
Python
|
gpl-3.0
| 47,729
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 6 20:00:12 2019
@author: ander
"""
import numpy as np
def generate():
dim = 64
shape = (dim, dim, dim)
#constructing water sphere
matArr = np.zeros(shape, dtype=np.uint8)
densArr = np.zeros_like(matArr, dtype=np.double)
ind = np.indices(shape)
sphere_radius = dim//4
sphere_center = dim//2
s_ind = (ii-sphere_center for ii in ind)
idx, idy, idz = s_ind
r = (idx*idx+idy*idy+idz*idz) < sphere_radius**2
matArr[r] = 1
densArr[:] = 0.001225 # air density g/cm3
densArr[r] = 1.0 # water density
material_map = list()
material_map.append("0; Air; N0.75O0.25")
material_map.append("1; Water; H2O")
##writing files
with open("materialTemplate.dat", 'bw') as matFile:
matFile.write(matArr.tobytes())
with open("densityTemplate.dat", 'bw') as densFile:
densFile.write(densArr.tobytes())
with open("materialMapTemplate.dat", 'w') as matmapFile:
matmapFile.write('\n'.join(material_map))
if __name__=='__main__':
generate()
|
medicalphysics/OpenDXMC
|
data/binaryimporttemplate/template_generator.py
|
Python
|
gpl-3.0
| 1,121
|
# This file was autogenerated from lyt.md. Please do not edit!
#!/usr/bin/python
# Copyright (C) 2016 Adrien Lamarque
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""lyt is a literate programming tool, written in Python, that extracts fenced code blocks from Markdown documents. It is meant to be used as a preprocess tool before a conventional build.
"""
import re
import collections
import click
import sys
import os.path
DEFAULT_EXT = "txt"
matching_extensions = {
'python': 'py',
'c++': 'cpp',
'c': 'c',
'c#': 'cs',
'haskell': 'hs',
'ruby': 'rb',
'go': 'go',
'rust': 'rs',
'racket': 'rkt',
}
DEFAULT_COMMENT_SYMBOL = "#"
comment_symbols = collections.defaultdict(lambda: DEFAULT_COMMENT_SYMBOL, {
'python': '#',
'c++': '//',
'c': '//',
'c#': '//',
'haskell': '--',
'ruby': '#',
'go': '//',
'rust': '//',
'racket': ';',
})
e = click.echo
code_open_re = re.compile(r'^`{3,}(?!.*`)|^~{3,}(?!.*~)', re.MULTILINE) # /^`{3,}(?!.*`)|^~{3,}(?!.*~)/
code_close_re = re.compile(r'^(?:`{3,}|~{3,})(?= *$)', re.MULTILINE) # /^(?:`{3,}|~{3,})(?= *$)/
@click.command()
@click.argument("input_file", type=click.File('r'))
@click.option("--force", is_flag=True)
def lyt(input_file, force):
"""lyt extracts fenced code blocks from Markdown documents."""
out = collections.defaultdict(str)
lines = input_file.read()
start_pos = 0
while True:
open_match = code_open_re.search(lines, start_pos)
if not open_match:
break
start_pos = open_match.end()
fence_char = lines[open_match.start()]
infostring_end_idx = lines.find("\n", open_match.end())
infostring = lines[open_match.end():infostring_end_idx]
if infostring:
lang = infostring.split()[0]
else:
lang = "unknown"
start_pos = infostring_end_idx
found = False
while not found:
close_match = code_close_re.search(lines, start_pos)
if not close_match:
found = True
out[lang] += lines[start_pos:]
# Turns out it's valid to have a 'dangling' fenced block quote according to the CommonMark spec
# e("Mismatched fenced block quotes! Check that your Markdown is valid.")
# sys.exit(1)
if lines[close_match.start()] == fence_char:
found = True
out[lang] += lines[start_pos+1:close_match.start()]
start_pos = close_match.end()
lpy_ext_idx = input_file.name.rfind(".") + 1
basename = input_file.name[:lpy_ext_idx]
for (language, source) in out.items():
language = language.lower()
if language in matching_extensions:
ext = matching_extensions[language]
else:
if language == "unknown":
e("WARNING! The following blocks didn't have an infostring specifying the language. They were aggregated together and will be written to a .%s file." % DEFAULT_EXT)
e(source)
ext = DEFAULT_EXT
else:
e("Couldn't find extension to use for language %s. Using the first three letters: %s" % (language, language[:3]))
ext = language[:3]
output_filename = basename + ext
if os.path.isfile(output_filename) and not force:
e("WARNING! The file %s already exists. To allow overwriting, re-launch lyt with the --force option." % output_filename)
else:
with open(output_filename, "w") as output_file:
output_file.write(comment_symbols[language] + " This file was autogenerated from %s. Please do not edit!\n" % input_file.name)
output_file.write(source)
e("Wrote %s." % output_filename)
if __name__ == "__main__":
lyt()
|
lamarqua/lyt
|
lyt.py
|
Python
|
gpl-3.0
| 3,984
|
def _make_cloner(cls, base, *args):
def clone(self):
# create a new object without initializing it
cloned = cls.__new__(cls)
# clone C++ state
base.__init__(cloned, *args)
# clone Python state
cloned.__dict__.update(self.__dict__)
return cloned
cls.clone = clone
return cls
class genetic_value_is_trait_default_clone(object):
def __init__(self, ndim=1):
self.ndim = ndim
def __call__(self, cls):
from fwdpy11 import GeneticValueIsTrait
return _make_cloner(cls, GeneticValueIsTrait, self.ndim)
def genetic_value_noise_default_clone(cls):
from fwdpy11 import GeneticValueNoise
return _make_cloner(cls, GeneticValueNoise)
return cls
def default_update(cls):
def update(self, pop):
pass
cls.update = update
return cls
|
molpopgen/fwdpy11
|
fwdpy11/custom_genetic_value_decorators.py
|
Python
|
gpl-3.0
| 857
|
# Authors:
# Rob Crittenden <rcritten@redhat.com>
# Pavel Zuna <pzuna@redhat.com>
# Petr Viktorin <pviktori@redhat.com>
#
# Copyright (C) 2008, 2009 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib.plugins.host` module.
"""
from __future__ import print_function
import os
import tempfile
import base64
import pytest
from ipapython import ipautil
from ipalib import api, errors, x509
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
from ipatests.test_xmlrpc.xmlrpc_test import (XMLRPC_test,
fuzzy_uuid, fuzzy_digits, fuzzy_hash, fuzzy_date, fuzzy_issuer,
fuzzy_hex, raises_exact)
from ipatests.test_xmlrpc.test_user_plugin import get_group_dn
from ipatests.test_xmlrpc import objectclasses
from ipatests.test_xmlrpc.tracker.host_plugin import HostTracker
from ipatests.test_xmlrpc.testcert import get_testcert
from ipatests.util import assert_deepequal
# Constants DNS integration tests
# TODO: Use tracker fixtures for zones/records/users/groups
dnszone = u'test-zone.test'
dnszone_absolute = dnszone + '.'
dnszone_dn = DN(('idnsname', dnszone_absolute), api.env.container_dns, api.env.basedn)
dnszone_rname = u'root.%s' % dnszone_absolute
dnszone_rname_dnsname = DNSName(dnszone_rname)
revzone = u'29.16.172.in-addr.arpa.'
revzone_dn = DN(('idnsname', revzone), api.env.container_dns, api.env.basedn)
revipv6zone = u'0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.'
revipv6zone_dn = DN(('idnsname', revipv6zone), api.env.container_dns, api.env.basedn)
arec = u'172.16.29.22'
aaaarec = u'2001:db8:1::beef'
arec2 = u'172.16.29.33'
aaaarec2 = u'2001:db8:1::dead'
ipv4_fromip = u'testipv4fromip'
ipv4_fromip_ip = u'172.16.29.40'
ipv4_fromip_arec = ipv4_fromip_ip
ipv4_fromip_dnsname = DNSName(ipv4_fromip)
ipv4_fromip_dn = DN(('idnsname', ipv4_fromip), dnszone_dn)
ipv4_fromip_host_fqdn = u'%s.%s' % (ipv4_fromip, dnszone)
ipv4_fromip_ptr = u'40'
ipv4_fromip_ptr_dnsname = DNSName(ipv4_fromip_ptr)
ipv4_fromip_ptr_dn = DN(('idnsname', ipv4_fromip_ptr), revzone_dn)
ipv6_fromip = u'testipv6fromip'
ipv6_fromip_ipv6 = u'2001:db8:1::9'
ipv6_fromip_aaaarec = ipv6_fromip_ipv6
ipv6_fromip_dnsname = DNSName(ipv6_fromip)
ipv6_fromip_dn = DN(('idnsname', ipv6_fromip), dnszone_dn)
ipv6_fromip_ptr = u'9.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0'
ipv6_fromip_ptr_dnsname = DNSName(ipv6_fromip_ptr)
ipv6_fromip_ptr_dn = DN(('idnsname', ipv6_fromip_ptr), revipv6zone_dn)
sshpubkey = u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGAX3xAeLeaJggwTqMjxNwa6XHBUAikXPGMzEpVrlLDCZtv00djsFTBi38PkgxBJVkgRWMrcBsr/35lq7P6w8KGIwA8GI48Z0qBS2NBMJ2u9WQ2hjLN6GdMlo77O0uJY3251p12pCVIS/bHRSq8kHO2No8g7KA9fGGcagPfQH+ee3t7HUkpbQkFTmbPPN++r3V8oVUk5LxbryB3UIIVzNmcSIn3JrXynlvui4MixvrtX6zx+O/bBo68o8/eZD26QrahVbA09fivrn/4h3TM019Eu/c2jOdckfU3cHUV/3Tno5d6JicibyaoDDK7S/yjdn5jhaz8MSEayQvFkZkiF0L public key test'
sshpubkeyfp = u'13:67:6B:BF:4E:A2:05:8E:AE:25:8B:A1:31:DE:6F:1B public key test (ssh-rsa)'
user1 = u'tuser1'
user2 = u'tuser2'
group1 = u'group1'
group1_dn = get_group_dn(group1)
group2 = u'group2'
group2_dn = get_group_dn(group2)
hostgroup1 = u'testhostgroup1'
hostgroup1_dn = DN(('cn',hostgroup1),('cn','hostgroups'),('cn','accounts'),
api.env.basedn)
host_cert = get_testcert(DN(('CN', api.env.host), x509.subject_base()),
'host/%s@%s' % (api.env.host, api.env.realm))
@pytest.fixture(scope='class')
def host(request):
tracker = HostTracker(name=u'testhost1')
return tracker.make_fixture(request)
@pytest.fixture(scope='class')
def host2(request):
tracker = HostTracker(name=u'testhost2')
return tracker.make_fixture(request)
@pytest.fixture(scope='class')
def host3(request):
tracker = HostTracker(name=u'testhost3')
return tracker.make_fixture(request)
@pytest.fixture(scope='class')
def lab_host(request):
name = u'testhost1'
tracker = HostTracker(name=name,
fqdn=u'%s.lab.%s' % (name, api.env.domain))
return tracker.make_fixture(request)
@pytest.fixture(scope='class')
def this_host(request):
"""Fixture for the current master"""
tracker = HostTracker(name=api.env.host.partition('.')[0],
fqdn=api.env.host)
# This host is not created/deleted, so don't call make_fixture
tracker.exists = True
return tracker
@pytest.fixture(scope='class')
def invalid_host(request):
tracker = HostTracker(name='foo_bar',)
return tracker.make_fixture(request)
@pytest.fixture(scope='class')
def ipv6only_host(request):
name = u'testipv6onlyhost'
tracker = HostTracker(name=name, fqdn=u'%s.%s' % (name, dnszone))
return tracker.make_fixture(request)
@pytest.fixture(scope='class')
def ipv4only_host(request):
name = u'testipv4onlyhost'
tracker = HostTracker(name=name, fqdn=u'%s.%s' % (name, dnszone))
return tracker.make_fixture(request)
@pytest.fixture(scope='class')
def ipv46both_host(request):
name = u'testipv4and6host'
tracker = HostTracker(name=name, fqdn=u'%s.%s' % (name, dnszone))
return tracker.make_fixture(request)
@pytest.fixture(scope='class')
def ipv4_fromip_host(request):
name = u'testipv4fromip'
tracker = HostTracker(name=name, fqdn=u'%s.%s' % (name, dnszone))
return tracker.make_fixture(request)
@pytest.fixture(scope='class')
def ipv6_fromip_host(request):
name = u'testipv6fromip'
tracker = HostTracker(name=name, fqdn=u'%s.%s' % (name, dnszone))
return tracker.make_fixture(request)
@pytest.mark.tier1
class TestNonexistentHost(XMLRPC_test):
def test_retrieve_nonexistent(self, host):
host.ensure_missing()
command = host.make_retrieve_command()
with raises_exact(errors.NotFound(
reason=u'%s: host not found' % host.fqdn)):
command()
def test_update_nonexistent(self, host):
host.ensure_missing()
command = host.make_update_command(updates=dict(description=u'Nope'))
with raises_exact(errors.NotFound(
reason=u'%s: host not found' % host.fqdn)):
command()
def test_delete_nonexistent(self, host):
host.ensure_missing()
command = host.make_delete_command()
with raises_exact(errors.NotFound(
reason=u'%s: host not found' % host.fqdn)):
command()
@pytest.mark.tier1
class TestCRUD(XMLRPC_test):
def test_create_duplicate(self, host):
host.ensure_exists()
command = host.make_create_command(force=True)
with raises_exact(errors.DuplicateEntry(
message=u'host with name "%s" already exists' % host.fqdn)):
command()
def test_retrieve_simple(self, host):
host.retrieve()
def test_retrieve_all(self, host):
host.retrieve(all=True)
def test_search_simple(self, host):
host.find()
def test_search_all(self, host):
host.find(all=True)
def test_update_simple(self, host):
host.update(dict(
description=u'Updated host 1',
usercertificate=host_cert),
expected_updates=dict(
description=[u'Updated host 1'],
usercertificate=[base64.b64decode(host_cert)],
issuer=fuzzy_issuer,
md5_fingerprint=fuzzy_hash,
serial_number=fuzzy_digits,
serial_number_hex=fuzzy_hex,
sha1_fingerprint=fuzzy_hash,
subject=DN(('CN', api.env.host), x509.subject_base()),
valid_not_before=fuzzy_date,
valid_not_after=fuzzy_date,
))
host.retrieve()
def test_try_rename(self, host):
host.ensure_exists()
command = host.make_update_command(
updates=dict(setattr=u'fqdn=changed.example.com'))
with raises_exact(errors.NotAllowedOnRDN()):
command()
def test_add_mac_address(self, host):
host.update(dict(macaddress=u'00:50:56:30:F6:5F'),
expected_updates=dict(macaddress=[u'00:50:56:30:F6:5F']))
host.retrieve()
def test_add_mac_addresses(self, host):
host.update(dict(macaddress=[u'00:50:56:30:F6:5F',
u'00:50:56:2C:8D:82']))
host.retrieve()
def test_try_illegal_mac(self, host):
command = host.make_update_command(
updates=dict(macaddress=[u'xx']))
with raises_exact(errors.ValidationError(
name='macaddress',
error=u'Must be of the form HH:HH:HH:HH:HH:HH, where ' +
u'each H is a hexadecimal character.')):
command()
def test_add_ssh_pubkey(self, host):
host.update(dict(ipasshpubkey=[sshpubkey]),
expected_updates=dict(
ipasshpubkey=[sshpubkey],
sshpubkeyfp=[sshpubkeyfp],
))
host.retrieve()
def test_try_illegal_ssh_pubkey(self, host):
host.ensure_exists()
command = host.make_update_command(
updates=dict(ipasshpubkey=[u'no-pty %s' % sshpubkey]))
with raises_exact(errors.ValidationError(
name='sshpubkey', error=u'options are not allowed')):
command()
def test_delete_host(self, host):
host.delete()
def test_retrieve_nonexistent(self, host):
host.ensure_missing()
command = host.make_retrieve_command()
with raises_exact(errors.NotFound(
reason=u'%s: host not found' % host.fqdn)):
command()
def test_update_nonexistent(self, host):
host.ensure_missing()
command = host.make_update_command(
updates=dict(description=u'Nope'))
with raises_exact(errors.NotFound(
reason=u'%s: host not found' % host.fqdn)):
command()
def test_delete_nonexistent(self, host):
host.ensure_missing()
command = host.make_delete_command()
with raises_exact(errors.NotFound(
reason=u'%s: host not found' % host.fqdn)):
command()
def test_try_add_not_in_dns(self, host):
host.ensure_missing()
command = host.make_create_command(force=False)
with raises_exact(errors.DNSNotARecordError(
reason=u'Host does not have corresponding DNS A/AAAA record')):
command()
def test_add_host_with_null_password(self, host):
host.ensure_missing()
command = host.make_create_command()
result = command(userpassword=None)
host.track_create()
host.check_create(result)
@pytest.mark.tier1
class TestMultipleMatches(XMLRPC_test):
def test_try_show_multiple_matches_with_shortname(self, host, lab_host):
host.ensure_exists()
lab_host.ensure_exists()
assert host.shortname == lab_host.shortname
command = host.make_command('host_show', host.shortname)
with pytest.raises(errors.SingleMatchExpected):
command()
@pytest.mark.tier1
class TestHostWithService(XMLRPC_test):
"""Test deletion using a non-fully-qualified hostname.
Services associated with this host should also be removed.
"""
# TODO: Use a service tracker, when available
def test_host_with_service(self, host):
host.ensure_exists()
service1 = u'dns/%s@%s' % (host.fqdn, host.api.env.realm)
service1dn = DN(('krbprincipalname', service1.lower()),
('cn','services'), ('cn','accounts'),
host.api.env.basedn)
try:
result = host.run_command('service_add', service1, force=True)
assert_deepequal(dict(
value=service1,
summary=u'Added service "%s"' % service1,
result=dict(
dn=service1dn,
krbprincipalname=[service1],
objectclass=objectclasses.service,
managedby_host=[host.fqdn],
ipauniqueid=[fuzzy_uuid],
),
), result)
host.delete()
result = host.run_command('service_find', host.fqdn)
assert_deepequal(dict(
count=0,
truncated=False,
summary=u'0 services matched',
result=[],
), result)
finally:
try:
host.run_command('service_del', service1)
except errors.NotFound:
pass
@pytest.mark.tier1
class TestManagedHosts(XMLRPC_test):
def test_managed_hosts(self, host, host2, host3):
host.ensure_exists()
host2.ensure_exists()
host3.ensure_exists()
self.add_managed_host(host, host2)
host2.retrieve()
self.search_man_noman_hosts(host2, host)
self.search_man_hosts(host2, host3)
self.remove_man_hosts(host, host2)
host.retrieve()
host2.retrieve()
def add_managed_host(self, manager, underling):
command = manager.make_command('host_add_managedby',
underling.fqdn, host=manager.fqdn)
result = command()
underling.attrs['managedby_host'] = [manager.fqdn, underling.fqdn]
assert_deepequal(dict(
completed=1,
failed={'managedby': {'host': ()}},
result=underling.filter_attrs(underling.managedby_keys),
), result)
def search_man_noman_hosts(self, host, noman_host):
command = host.make_find_command(host.fqdn,
man_host=host.fqdn,
not_man_host=noman_host.fqdn)
result = command()
assert_deepequal(dict(
count=1,
truncated=False,
summary=u'1 host matched',
result=[host.filter_attrs(host.find_keys)],
), result)
def search_man_hosts(self, host1, host2):
command = host1.make_find_command(man_host=[host1.fqdn, host2.fqdn])
result = command()
assert_deepequal(dict(
count=0,
truncated=False,
summary=u'0 hosts matched',
result=[],
), result)
def remove_man_hosts(self, manager, underling):
command = manager.make_command('host_remove_managedby',
underling.fqdn, host=manager.fqdn)
result = command()
underling.attrs['managedby_host'] = [underling.fqdn]
assert_deepequal(dict(
completed=1,
failed={'managedby': {'host': ()}},
result=underling.filter_attrs(underling.managedby_keys),
), result)
@pytest.mark.tier1
class TestProtectedMaster(XMLRPC_test):
def test_try_delete_master(self, this_host):
command = this_host.make_delete_command()
with raises_exact(errors.ValidationError(
name='hostname',
error=u'An IPA master host cannot be deleted or disabled')):
command()
def test_try_disable_master(self, this_host):
command = this_host.make_command('host_disable', this_host.fqdn)
with raises_exact(errors.ValidationError(
name='hostname',
error=u'An IPA master host cannot be deleted or disabled')):
command()
@pytest.mark.tier1
class TestValidation(XMLRPC_test):
def test_try_validate_create(self, invalid_host):
command = invalid_host.make_create_command()
with raises_exact(errors.ValidationError(
name='hostname',
error=u"invalid domain-name: only letters, numbers, '-' are " +
u"allowed. DNS label may not start or end with '-'")):
command()
# The assumption on these next 4 tests is that if we don't get a
# validation error then the request was processed normally.
def test_try_validate_update(self, invalid_host):
command = invalid_host.make_update_command({})
with raises_exact(errors.NotFound(
reason=u'%s: host not found' % invalid_host.fqdn)):
command()
def test_try_validate_delete(self, invalid_host):
command = invalid_host.make_delete_command()
with raises_exact(errors.NotFound(
reason=u'%s: host not found' % invalid_host.fqdn)):
command()
def test_try_validate_retrieve(self, invalid_host):
command = invalid_host.make_retrieve_command()
with raises_exact(errors.NotFound(
reason=u'%s: host not found' % invalid_host.fqdn)):
command()
def test_try_validate_find(self, invalid_host):
command = invalid_host.make_find_command(invalid_host.fqdn)
result = command()
assert_deepequal(dict(
count=0,
truncated=False,
summary=u'0 hosts matched',
result=[],
), result)
@pytest.yield_fixture
def keytabname(request):
keytabfd, keytabname = tempfile.mkstemp()
try:
os.close(keytabfd)
yield keytabname
finally:
os.unlink(keytabname)
@pytest.mark.tier1
class TestHostFalsePwdChange(XMLRPC_test):
def test_join_host(self, host, keytabname):
"""
Create a test host and join it into IPA.
"""
join_command = 'ipa-client/ipa-join'
if not os.path.isfile(join_command):
pytest.skip("Command '%s' not found" % join_command)
# create a test host with bulk enrollment password
host.track_create()
del host.attrs['krbprincipalname']
host.attrs['has_password'] = True
objclass = list(set(
host.attrs['objectclass']) - {u'krbprincipal', u'krbprincipalaux'})
host.attrs['objectclass'] = objclass
command = host.make_create_command(force=True)
result = command(random=True)
random_pass = result['result']['randompassword']
host.attrs['randompassword'] = random_pass
host.check_create(result)
del host.attrs['randompassword']
# joint the host with the bulk password
new_args = [
join_command,
"-s", host.api.env.host,
"-h", host.fqdn,
"-k", keytabname,
"-w", random_pass,
"-q",
]
try:
ipautil.run(new_args)
except ipautil.CalledProcessError as e:
# join operation may fail on 'adding key into keytab', but
# the keytab is not necessary for further tests
print(e)
host.attrs['has_keytab'] = True
host.attrs['has_password'] = False
host.attrs['krbprincipalname'] = [u'host/%s@%s' % (host.fqdn,
host.api.env.realm)]
host.retrieve()
# Try to change the password of enrolled host with specified password
command = host.make_update_command(
updates=dict(userpassword=u'pass_123'))
with pytest.raises(errors.ValidationError):
command()
# Try to change the password of enrolled host with random password
command = host.make_update_command(updates=dict(random=True))
with pytest.raises(errors.ValidationError):
command()
@pytest.yield_fixture(scope='class')
def dns_setup(host):
try:
host.run_command('dnszone_del', dnszone, revzone, revipv6zone,
**{'continue': True})
except (errors.NotFound, errors.EmptyModlist):
pass
try:
host.run_command('dnszone_add', dnszone, idnssoarname=dnszone_rname)
host.run_command('dnszone_add', revzone, idnssoarname=dnszone_rname)
host.run_command('dnszone_add', revipv6zone,
idnssoarname=dnszone_rname)
yield
finally:
try:
host.run_command('dnszone_del', dnszone, revzone, revipv6zone,
**{'continue': True})
except (errors.NotFound, errors.EmptyModlist):
pass
@pytest.mark.tier1
class TestHostDNS(XMLRPC_test):
def test_add_ipv6only_host(self, dns_setup, ipv6only_host):
ipv6only_host.run_command('dnsrecord_add', dnszone,
ipv6only_host.shortname, aaaarecord=aaaarec)
try:
ipv6only_host.create(force=False)
finally:
command = ipv6only_host.run_command('dnsrecord_del', dnszone,
ipv6only_host.shortname,
aaaarecord=aaaarec)
def test_add_ipv4only_host(self, dns_setup, ipv4only_host):
ipv4only_host.run_command('dnsrecord_add', dnszone,
ipv4only_host.shortname, arecord=arec)
try:
ipv4only_host.create(force=False)
finally:
command = ipv4only_host.run_command('dnsrecord_del', dnszone,
ipv4only_host.shortname,
arecord=arec)
def test_add_ipv46both_host(self, dns_setup, ipv46both_host):
ipv46both_host.run_command('dnsrecord_add', dnszone,
ipv46both_host.shortname,
arecord=arec2, aaaarecord=aaaarec2)
try:
ipv46both_host.create(force=False)
finally:
command = ipv46both_host.run_command('dnsrecord_del', dnszone,
ipv46both_host.shortname,
arecord=arec2,
aaaarecord=aaaarec2)
def test_add_ipv4_host_from_ip(self, dns_setup, ipv4_fromip_host):
ipv4_fromip_host.ensure_missing()
ipv4_fromip_host.track_create()
command = ipv4_fromip_host.make_create_command(force=False)
result = command(ip_address=ipv4_fromip_ip)
ipv4_fromip_host.check_create(result)
result = ipv4_fromip_host.run_command('dnsrecord_show', dnszone,
ipv4_fromip_host.shortname)
assert_deepequal(dict(
value=ipv4_fromip_dnsname,
summary=None,
result=dict(
dn=ipv4_fromip_dn,
idnsname=[ipv4_fromip_dnsname],
arecord=[ipv4_fromip_arec],
),
), result)
result = ipv4_fromip_host.run_command('dnsrecord_show', revzone,
ipv4_fromip_ptr)
assert_deepequal(dict(
value=ipv4_fromip_ptr_dnsname,
summary=None,
result=dict(
dn=ipv4_fromip_ptr_dn,
idnsname=[ipv4_fromip_ptr_dnsname],
ptrrecord=[ipv4_fromip_host.fqdn + '.'],
),
), result)
def test_add_ipv6_host_from_ip(self, dns_setup, ipv6_fromip_host):
ipv6_fromip_host.ensure_missing()
ipv6_fromip_host.track_create()
command = ipv6_fromip_host.make_create_command(force=False)
result = command(ip_address=ipv6_fromip_ipv6)
ipv6_fromip_host.check_create(result)
result = ipv6_fromip_host.run_command('dnsrecord_show', dnszone,
ipv6_fromip_host.shortname)
assert_deepequal(dict(
value=ipv6_fromip_dnsname,
summary=None,
result=dict(
dn=ipv6_fromip_dn,
idnsname=[ipv6_fromip_dnsname],
aaaarecord=[ipv6_fromip_aaaarec],
),
), result)
result = ipv6_fromip_host.run_command('dnsrecord_show', revipv6zone,
ipv6_fromip_ptr)
assert_deepequal(dict(
value=ipv6_fromip_ptr_dnsname,
summary=None,
result=dict(
dn=ipv6_fromip_ptr_dn,
idnsname=[ipv6_fromip_ptr_dnsname],
ptrrecord=[ipv6_fromip_host.fqdn + '.'],
),
), result)
@pytest.fixture(scope='class')
def allowedto_context(request, host3):
def cleanup():
try:
host3.run_command('user_del', user1, user2, **{'continue': True})
except errors.NotFound:
pass
try:
host3.run_command('group_del', group1, group2,
**{'continue': True})
except errors.NotFound:
pass
try:
host3.run_command('hostgroup_del', hostgroup1)
except errors.NotFound:
pass
cleanup()
request.addfinalizer(cleanup)
host3.ensure_exists()
host3.run_command('user_add', givenname=u'Test', sn=u'User1')
host3.run_command('user_add', givenname=u'Test', sn=u'User2')
host3.run_command('group_add', group1)
host3.run_command('group_add', group2)
host3.run_command('hostgroup_add', hostgroup1,
description=u'Test hostgroup 1')
@pytest.mark.tier1
class TestHostAllowedTo(XMLRPC_test):
def test_user_allow_retrieve_keytab(self, allowedto_context, host):
host.ensure_exists()
result = host.run_command('host_allow_retrieve_keytab', host.fqdn,
user=user1)
host.attrs['ipaallowedtoperform_read_keys_user'] = [user1]
assert_deepequal(dict(
failed=dict(
ipaallowedtoperform_read_keys=dict(
group=[], host=[], hostgroup=[], user=[]),
),
completed=1,
result=host.filter_attrs(host.allowedto_keys),
), result)
# Duplicates should not be accepted
result = host.run_command('host_allow_retrieve_keytab', host.fqdn,
user=user1)
assert_deepequal(dict(
failed=dict(
ipaallowedtoperform_read_keys=dict(
group=[], host=[], hostgroup=[],
user=[[user1, u'This entry is already a member']],
),
),
completed=0,
result=host.filter_attrs(host.allowedto_keys),
), result)
def test_group_allow_retrieve_keytab(self, allowedto_context, host, host3):
host.ensure_exists()
host3.ensure_exists()
result = host.run_command('host_allow_retrieve_keytab', host.fqdn,
group=[group1, group2], host=[host3.fqdn],
hostgroup=[hostgroup1])
host.attrs['ipaallowedtoperform_read_keys_group'] = [group1, group2]
host.attrs['ipaallowedtoperform_read_keys_host'] = [host3.fqdn]
host.attrs['ipaallowedtoperform_read_keys_hostgroup'] = [hostgroup1]
assert_deepequal(dict(
failed=dict(
ipaallowedtoperform_read_keys=dict(
group=[], host=[], hostgroup=[], user=[]),
),
completed=4,
result=host.filter_attrs(host.allowedto_keys),
), result)
# Non-members cannot be removed
result = host.run_command('host_disallow_retrieve_keytab', host.fqdn,
user=[user2])
assert_deepequal(dict(
failed=dict(
ipaallowedtoperform_read_keys=dict(
group=[],
host=[],
hostgroup=[],
user=[[user2, u'This entry is not a member']],
),
),
completed=0,
result=host.filter_attrs(host.allowedto_keys),
), result)
# Disallow one of the existing allowed groups
result = host.run_command('host_disallow_retrieve_keytab', host.fqdn,
group=[group2])
host.attrs['ipaallowedtoperform_read_keys_group'] = [group1]
assert_deepequal(dict(
failed=dict(
ipaallowedtoperform_read_keys=dict(
group=[], host=[], hostgroup=[], user=[]),
),
completed=1,
result=host.filter_attrs(host.allowedto_keys),
), result)
host.retrieve()
def test_allow_create(self, allowedto_context, host, host3):
host.ensure_exists()
host3.ensure_exists()
result = host.run_command('host_allow_create_keytab', host.fqdn,
group=[group1, group2], user=[user1],
host=[host3.fqdn],
hostgroup=[hostgroup1])
host.attrs['ipaallowedtoperform_write_keys_user'] = [user1]
host.attrs['ipaallowedtoperform_write_keys_group'] = [group1, group2]
host.attrs['ipaallowedtoperform_write_keys_host'] = [host3.fqdn]
host.attrs['ipaallowedtoperform_write_keys_hostgroup'] = [hostgroup1]
assert_deepequal(dict(
failed=dict(
ipaallowedtoperform_write_keys=dict(
group=[], host=[], hostgroup=[], user=[]),
),
completed=5,
result=host.filter_attrs(host.allowedto_keys),
), result)
# Duplicates should not be accepted
result = host.run_command('host_allow_create_keytab', host.fqdn,
group=[group1], user=[user1],
host=[host3.fqdn], hostgroup=[hostgroup1])
assert_deepequal(dict(
failed=dict(
ipaallowedtoperform_write_keys=dict(
group=[[group1, u'This entry is already a member']],
host=[[host3.fqdn, u'This entry is already a member']],
user=[[user1, u'This entry is already a member']],
hostgroup=[[hostgroup1,
u'This entry is already a member']],
),
),
completed=0,
result=host.filter_attrs(host.allowedto_keys),
), result)
# Non-mambers cannot be removed
result = host.run_command('host_disallow_create_keytab', host.fqdn,
user=[user2])
assert_deepequal(dict(
failed=dict(
ipaallowedtoperform_write_keys=dict(
group=[],
host=[],
hostgroup=[],
user=[[user2, u'This entry is not a member']],
),
),
completed=0,
result=host.filter_attrs(host.allowedto_keys),
), result)
# Disallow one of the existing allowed groups
result = host.run_command('host_disallow_create_keytab', host.fqdn,
group=[group2])
host.attrs['ipaallowedtoperform_write_keys_group'] = [group1]
assert_deepequal(dict(
failed=dict(
ipaallowedtoperform_write_keys=dict(
group=[],
host=[],
hostgroup=[],
user=[],
),
),
completed=1,
result=host.filter_attrs(host.allowedto_keys),
), result)
host.retrieve()
def test_host_mod(self, host):
# Done (usually) at the end to ensure the tracking works well
host.update(updates=dict(description=u'desc'),
expected_updates=dict(description=[u'desc']))
|
tbabej/freeipa
|
ipatests/test_xmlrpc/test_host_plugin.py
|
Python
|
gpl-3.0
| 32,242
|
###### code to sample from the paramater posterior p(\phi | data) ########
import numpy
from numpy import *
from data import Datum
from tssb import *
from util import dirichletpdfln
from numpy.random import dirichlet
import subprocess as sp
import util2 as u2
import os
def get_c_fnames(tmp_dir):
def _make_c_fname(name):
fname = 'c_%s.txt' % (name)
return os.path.join(tmp_dir, fname)
FNAME_C_TREE = _make_c_fname('tree')
FNAME_C_DATA_STATES = _make_c_fname('data_states')
FNAME_C_PARAMS = _make_c_fname('params')
FNAME_C_MH_ARATIO = _make_c_fname('mh_ar')
return (FNAME_C_TREE, FNAME_C_DATA_STATES, FNAME_C_PARAMS, FNAME_C_MH_ARATIO)
# done for multi-sample
def metropolis(tssb,iters=1000,std=0.01,burnin=0,n_ssms=0,n_cnvs=0,fin1='',fin2='',rseed=1, ntps=5, tmp_dir='.'):
wts, nodes = tssb.get_mixture()
# file names
FNAME_SSM_DATA = fin1
FNAME_CNV_DATA = fin2
NTPS = str(ntps)
FNAME_C_TREE, FNAME_C_DATA_STATES, FNAME_C_PARAMS, FNAME_C_MH_ARATIO = get_c_fnames(tmp_dir)
## initialize the MH sampler###########
#for tp in arange(ntps):
# sample_cons_params(tssb,tp)
# update_params(tssb,tp)
######################################
## prepare to call the c++ code ###########
u2.set_node_height(tssb)
write_tree(tssb,n_ssms,FNAME_C_TREE) #write the current tree to the disk
u2.map_datum_to_node(tssb)
write_data_state(tssb,FNAME_C_DATA_STATES) # this is need for binomial parameter computations
###########################################
MH_ITR = str(iters)
MH_STD = str(std)
N_SSM_DATA = str(n_ssms)
N_CNV_DATA = str(n_cnvs)
NNODES = str(len(nodes))
TREE_HEIGHT = str(max([node.ht for node in nodes])+1)
script_dir = os.path.dirname(os.path.realpath(__file__))
sp.check_call(['%s/mh.o' % script_dir, MH_ITR, MH_STD, N_SSM_DATA, N_CNV_DATA, NNODES, TREE_HEIGHT, FNAME_SSM_DATA, FNAME_CNV_DATA, FNAME_C_TREE, FNAME_C_DATA_STATES, FNAME_C_PARAMS,FNAME_C_MH_ARATIO, NTPS])
ar = str(loadtxt(FNAME_C_MH_ARATIO,dtype='string'))
update_tree_params(tssb,FNAME_C_PARAMS) # update the tree with the new parameters sampled using the c++ code
return ar
# done for multi-sample
def write_tree(tssb,n_ssms,fname):
fh=open(fname,'w')
wts,nodes=tssb.get_mixture()
did_int_dict=dict()
for dat in tssb.data:
if dat.id[0]=='s':
did_int_dict[dat.id]=int(dat.id[1:])
else:
did_int_dict[dat.id]=n_ssms+int(dat.id[1:])
def descend(root):
for child in root.children():
descend(child)
# write data#
cids=''
for child in root.children():cids+=str(child.id)+','
cids=cids.strip(',')
if cids=='': cids=str(-1)
dids=''
for dat in root.get_data():dids+=str(did_int_dict[dat.id])+','
dids=dids.strip(',')
if dids=='': dids=str(-1)
line = str(root.id) + '\t' + list_to_string(root.params) + '\t' + list_to_string(root.pi) + '\t' + str(len(root.children())) + '\t' + cids + '\t' + str(len(root.get_data())) + '\t' + dids + '\t' + str(root.ht)
fh.write(line)
fh.write('\n')
fh.flush()
###############
descend(tssb.root['node'])
fh.flush()
fh.close()
def list_to_string(p):
o=''
for pp in p:o+=str(pp)+','
return o.strip(',')
# no changes for multi-sample
# data/node state format (parameter independent dot-product weights)
# datum_id node_id_1,pi,nr,nv;node_id_2,pi,nr,nv;....
# these weights are used to compute data log-likelihood
def write_data_state(tssb,fname):
fh = open(fname,'w')
wts,nodes=tssb.get_mixture()
for dat in tssb.data:
if not dat.cnv: continue # nothing to do for CNVs
if not dat.node: continue # todo: this won't happen
poss_n_genomes = dat.compute_n_genomes(0)
for node in nodes:
ssm_node = dat.node.path[-1]
mr_cnv = find_most_recent_cnv(dat,node)
ancestors = node.get_ancestors()
dat.state1 = '' # maternal
dat.state2 = '' # paternal
dat.state3 = '' # maternal
dat.state4 = '' # paternal
if (not ssm_node in ancestors) and (not mr_cnv):
dat.state1 += str(node.id) + ',' + str(2) + ',' + str(0) + ';'
dat.state2 += str(node.id) + ',' + str(2) + ',' + str(0) + ';'
dat.state3 += str(node.id) + ',' + str(2) + ',' + str(0) + ';'
dat.state4 += str(node.id) + ',' + str(2) + ',' + str(0) + ';'
elif ssm_node in ancestors and (not mr_cnv):
dat.state1 += str(node.id) + ',' + str(1) + ',' + str(1) + ';'
dat.state2 += str(node.id) + ',' + str(1) + ',' + str(1) + ';'
dat.state3 += str(node.id) + ',' + str(1) + ',' + str(1) + ';'
dat.state4 += str(node.id) + ',' + str(1) + ',' + str(1) + ';'
elif (not ssm_node in ancestors) and mr_cnv:
dat.state1 += str(node.id) + ',' + str(mr_cnv[1]+mr_cnv[2]) + ',' + str(0) + ';'
dat.state2 += str(node.id) + ',' + str(mr_cnv[1]+mr_cnv[2]) + ',' + str(0) + ';'
dat.state3 += str(node.id) + ',' + str(mr_cnv[1]+mr_cnv[2]) + ',' + str(0) + ';'
dat.state4 += str(node.id) + ',' + str(mr_cnv[1]+mr_cnv[2]) + ',' + str(0) + ';'
elif ssm_node in ancestors and mr_cnv:
dat.state3 += str(node.id) + ',' + str(max(0,mr_cnv[1]+mr_cnv[2]-1)) + ',' + str(min(1,mr_cnv[1]+mr_cnv[2])) + ';'
dat.state4 += str(node.id) + ',' + str(max(0,mr_cnv[1]+mr_cnv[2]-1)) + ',' + str(min(1,mr_cnv[1]+mr_cnv[2])) + ';'
if ssm_node in mr_cnv[0].node.get_ancestors():
dat.state1 += str(node.id) + ',' + str(mr_cnv[1]) + ',' + str(mr_cnv[2]) + ';' # maternal
dat.state2 += str(node.id) + ',' + str(mr_cnv[2]) + ',' + str(mr_cnv[1]) + ';' # paternal
else:
dat.state1 += str(node.id) + ',' + str(max(0,mr_cnv[1]+mr_cnv[2]-1)) + ',' + str(min(1,mr_cnv[1]+mr_cnv[2])) + ';'
dat.state2 += str(node.id) + ',' + str(max(0,mr_cnv[1]+mr_cnv[2]-1)) + ',' + str(min(1,mr_cnv[1]+mr_cnv[2])) + ';'
else:
print "PANIC"
if poss_n_genomes[0][1] == 0:
dat.state1 = dat.state2
elif poss_n_genomes[1][1] == 0:
dat.state2 = dat.state1
if len(poss_n_genomes) == 2:
dat.state3 = dat.state1
dat.state4 = dat.state2
fh.write(str(dat.id[1:]) + '\t' + dat.state1.strip(';') + '\t' + dat.state2.strip(';') +'\t' + dat.state3.strip(';') +'\t' + dat.state4.strip(';') +'\t')
fh.write('\n')
fh.flush()
fh.close()
# done for multi-sample
def find_most_recent_cnv(dat,nd):
out = None
for n in nd.get_ancestors()[::-1]:
if n in [x[0].node for x in dat.cnv]:
out = [x for x in dat.cnv if x[0].node == n][0]
break
return out
# done for multi sample
def update_tree_params(tssb,fname):
wts, nodes = tssb.get_mixture()
ndict = dict()
for node in nodes: ndict[node.id]=node
fh=open(fname)
params=[line.split() for line in fh.readlines()]
fh.close()
for p in params:
ndict[int(p[0])].params = string_to_list(p[1])
ndict[int(p[0])].pi = string_to_list(p[2])
#params=loadtxt('c_params.txt')
#for p in params:
# ndict[p[0]].params = p[1]
# ndict[p[0]].pi = p[2]
def string_to_list(p):
p=p.strip(',')
return array([float(pp) for pp in p.split(',')])
# done for multi-sample
# tree-structured finite-dimensional stick breaking
def sample_cons_params(tssb,tp):
def descend(root,tp):
if root.parent() is None:
root.params1[tp] = 1
root.pi1[tp] = root.params1[tp]*rand(1) # break nu stick
r = root.params1[tp]-root.pi1[tp] #mass assigned to children
p = rand(len(root.children()));p=r*p*1./sum(p)
index=0
for child in root.children():
child.params1[tp] = p[index]# break psi sticks
child.pi1[tp] = child.params1[tp]*(rand(1)**(len(child.children())>0)) # break nu stick
index+=1
for child in root.children():
descend(child,tp)
descend(tssb.root['node'],tp)
# done for multi-sample
def update_params(tssb,tp):
def descend(root,tp):
for child in root.children():
descend(child,tp)
root.params[tp] = root.params1[tp]
root.pi[tp] = root.pi1[tp]
descend(tssb.root['node'],tp)
###### old code, not in use #############
# data/node state format (parameter independent dot-product weights)
# datum_id node_id_1,pi,nr,nv;node_id_2,pi,nr,nv;....
# these weights are used to compute data log-likelihood
def write_data_state1111(tssb):
fh = open('c_data_states.txt','w')
wts,nodes=tssb.get_mixture()
for dat in tssb.data:
if not dat.cnv: continue # nothing to do for CNVs
if not dat.node: continue # todo: this won't happen
ancestors = dat.node.get_ancestors() # path from root to ssm node
mr_cnv = dat.cnv[0] # CNV corresponding to the SSM
dat.state1 = '' # maternal
dat.state2 = '' # paternal
# do this until we encounter the SSM node,
# i.e., along the path from root to the SSM node
visited_cnv = False
for node in ancestors:
if node != mr_cnv[0].node and visited_cnv==False: # until CNV is encountered
dat.state1 += str(node.id) + ',' + str(2) + ',' + str(0) + ';'
else:
visited_cnv = True
dat.state1 += str(node.id) + ',' + str(mr_cnv[1]+mr_cnv[2]) + ',' + str(0) + ';'
dat.state2=dat.state1
# do this after the SSM node, i.e, for all nodes in the subtree below the SSM node
# [node_id, nr, nv] format
def descend(n,d):
if n == mr_cnv[0].node:
d.state1 += str(n.id) + ',' + str(mr_cnv[1]) + ',' + str(mr_cnv[2]) + ';' # maternal
d.state2 += str(n.id) + ',' + str(mr_cnv[2]) + ',' + str(mr_cnv[1]) + ';' # paternal
else:
d.state1 += str(n.id) + ',' + str(mr_cnv[1]+mr_cnv[2]-1) + ',' + str(1) + ';'
d.state2 = d.state1
for child in n.children():
descend(child,d)
# traverse the tree below the ssm node
for child in node.children(): descend(child,dat)
fh.write(str(dat.id[1:]) + '\t' + dat.state1.strip(';') + '\t' + dat.state2.strip(';'))
fh.write('\n')
fh.flush()
fh.close()
|
morrislab/phylowgs
|
params.py
|
Python
|
gpl-3.0
| 9,595
|
from tkinter import *
from tkinter102 import MyGui
#应用主窗口
mainwin = Tk()
Label(mainwin, text=__name__).pack()
#弹出窗口
popup = Toplevel()
Label(popup, text='Attach').pack(side=LEFT)
MyGui(popup).pack(side=RIGHT)
mainloop()
|
ViMiao/PythonLearning
|
ProgrammingPython/C01/attachgui.py
|
Python
|
gpl-3.0
| 238
|
# Copyright (C) 2006-2007 Red Hat, Inc.
# Copyright (C) 2010 Collabora Ltd. <http://www.collabora.co.uk/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
from gettext import gettext as _
from gi.repository import Gtk
from gi.repository import Gio
from gi.repository import GObject
import dbus
from sugar3.graphics.palette import Palette
from sugar3.graphics.palettemenu import PaletteMenuItem
from sugar3.graphics.icon import Icon
from sugar3.graphics.alert import TimeoutAlert
from sugar3.graphics import style
from sugar3.bundle.activitybundle import ActivityBundle
from jarabe.model import shell
from jarabe.model import friends
from jarabe.model.session import get_session_manager
from jarabe.controlpanel.gui import ControlPanel
import jarabe.desktop.homewindow
class BuddyMenu(Palette):
def __init__(self, buddy):
self._buddy = buddy
buddy_icon = Icon(icon_name='computer-xo',
xo_color=buddy.get_color(),
pixel_size=style.STANDARD_ICON_SIZE)
nick = buddy.get_nick()
Palette.__init__(self, None, primary_text=nick, icon=buddy_icon)
self.menu_box = Gtk.VBox()
self.set_content(self.menu_box)
self.menu_box.show_all()
self._invite_menu = None
self._active_activity_changed_hid = None
# Fixme: we need to make the widget accessible through the Palette API
self._widget.connect('destroy', self.__destroy_cb)
self._buddy.connect('notify::nick', self.__buddy_notify_nick_cb)
if buddy.is_owner():
self._add_my_items()
else:
self._add_buddy_items()
def __destroy_cb(self, menu):
if self._active_activity_changed_hid is not None:
home_model = shell.get_model()
home_model.disconnect(self._active_activity_changed_hid)
self._buddy.disconnect_by_func(self.__buddy_notify_nick_cb)
def _add_buddy_items(self):
menu_item = None
if friends.get_model().has_buddy(self._buddy):
menu_item = PaletteMenuItem(_('Remove friend'), 'list-remove')
menu_item.connect('activate', self._remove_friend_cb)
else:
menu_item = PaletteMenuItem(_('Make friend'), 'list-add')
menu_item.connect('activate', self._make_friend_cb)
self.menu_box.pack_start(menu_item, True, True, 0)
self._invite_menu = PaletteMenuItem('')
self._invite_menu.connect('activate', self._invite_friend_cb)
self.menu_box.pack_start(self._invite_menu, True, True, 0)
home_model = shell.get_model()
self._active_activity_changed_hid = home_model.connect(
'active-activity-changed', self._cur_activity_changed_cb)
activity = home_model.get_active_activity()
self._update_invite_menu(activity)
def _add_my_items(self):
settings = Gio.Settings('org.sugarlabs')
show_shutdown = settings.get_boolean('show-shutdown')
show_restart = settings.get_boolean('show-restart')
show_logout = settings.get_boolean('show-logout')
if "SUGAR_SHOW_SHUTDOWN" in os.environ:
show_shutdown = os.environ["SUGAR_SHOW_SHUTDOWN"] == "yes"
if "SUGAR_SHOW_RESTART" in os.environ:
show_restart = os.environ["SUGAR_SHOW_RESTART"] == "yes"
if "SUGAR_SHOW_LOGOUT" in os.environ:
show_logout = os.environ["SUGAR_SHOW_LOGOUT"] == "yes"
if show_shutdown:
item = PaletteMenuItem(_('Shutdown'), 'system-shutdown')
item.connect('activate', self.__shutdown_activate_cb)
self.menu_box.pack_start(item, True, True, 0)
if show_restart:
item = PaletteMenuItem(_('Restart'), 'system-restart')
item.connect('activate', self.__reboot_activate_cb)
self.menu_box.pack_start(item, True, True, 0)
item.show()
if show_logout:
item = PaletteMenuItem(_('Logout'), 'system-logout')
item.connect('activate', self.__logout_activate_cb)
self.menu_box.pack_start(item, True, True, 0)
item.show()
item = PaletteMenuItem(_('My Settings'), 'preferences-system')
item.connect('activate', self.__controlpanel_activate_cb)
self.menu_box.pack_start(item, True, True, 0)
item.show()
def _quit(self, action):
jarabe.desktop.homewindow.get_instance().busy()
action()
GObject.timeout_add_seconds(3, self.__quit_timeout_cb)
def __quit_timeout_cb(self):
jarabe.desktop.homewindow.get_instance().unbusy()
alert = TimeoutAlert(30)
alert.props.title = _('An activity is not responding.')
alert.props.msg = _('You may lose unsaved work if you continue.')
alert.connect('response', self.__quit_accept_cb)
jarabe.desktop.homewindow.get_instance().add_alert(alert)
alert.show()
def __quit_accept_cb(self, alert, response_id):
jarabe.desktop.homewindow.get_instance().remove_alert(alert)
if response_id is Gtk.ResponseType.CANCEL:
get_session_manager().cancel_shutdown()
else:
jarabe.desktop.homewindow.get_instance().busy()
get_session_manager().shutdown_completed()
def __logout_activate_cb(self, menu_item):
self._quit(get_session_manager().logout)
def __reboot_activate_cb(self, menu_item):
self._quit(get_session_manager().reboot)
def __shutdown_activate_cb(self, menu_item):
self._quit(get_session_manager().shutdown)
def __controlpanel_activate_cb(self, menu_item):
# hide the frame when control panel is shown
import jarabe.frame
frame = jarabe.frame.get_view()
frame.hide()
# show the control panel
panel = ControlPanel()
panel.show()
def _update_invite_menu(self, activity):
buddy_activity = self._buddy.props.current_activity
if buddy_activity is not None:
buddy_activity_id = buddy_activity.activity_id
else:
buddy_activity_id = None
if activity is not None:
bundle_activity = ActivityBundle(activity.get_bundle_path())
if activity is None or activity.is_journal() or \
activity.get_activity_id() == buddy_activity_id or \
bundle_activity.get_max_participants() <= 1:
self._invite_menu.hide()
else:
title = activity.get_title()
self._invite_menu.set_label(_('Invite to %s') % title)
icon = Icon(file=activity.get_icon_path(),
pixel_size=style.SMALL_ICON_SIZE)
icon.props.xo_color = activity.get_icon_color()
self._invite_menu.set_image(icon)
icon.show()
self._invite_menu.show()
def _cur_activity_changed_cb(self, home_model, activity_model):
self._update_invite_menu(activity_model)
def __buddy_notify_nick_cb(self, buddy, pspec):
self.set_primary_text(buddy.props.nick)
def _make_friend_cb(self, menuitem):
friends.get_model().make_friend(self._buddy)
def _remove_friend_cb(self, menuitem):
friends.get_model().remove(self._buddy)
def _invite_friend_cb(self, menuitem):
activity = shell.get_model().get_active_activity()
service = activity.get_service()
if service:
try:
service.InviteContact(self._buddy.props.account,
self._buddy.props.contact_id)
except dbus.DBusException, e:
expected_exceptions = [
'org.freedesktop.DBus.Error.UnknownMethod',
'org.freedesktop.DBus.Python.NotImplementedError']
if e.get_dbus_name() in expected_exceptions:
logging.warning('Trying deprecated Activity.Invite')
service.Invite(self._buddy.props.key)
else:
raise
else:
logging.error('Invite failed, activity service not ')
|
icarito/sugar
|
src/jarabe/view/buddymenu.py
|
Python
|
gpl-3.0
| 8,711
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, sathishpy@gmail.com and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestCMAccountMapper(unittest.TestCase):
pass
|
sathishpy/corrugation
|
corrugation/corrugation/doctype/cm_account_mapper/test_cm_account_mapper.py
|
Python
|
gpl-3.0
| 225
|
import tables
from sapphire import CoincidencesESD, HiSPARCStations
STATIONS = [501, 510]
EVENTDATA_PATH = '/Users/arne/Datastore/501_510/e_501_510_141101_150201.h5'
# EVENTDATA_PATH = '/Users/arne/Datastore/501_510/e_501_510_150120_150201.h5'
def analyse_coincidences(data):
"""Find and store coincidences"""
station_groups = ['/s%d' % number for number in STATIONS]
cluster = get_cluster()
coin = CoincidencesESD(data, '/coincidences', station_groups)
coin.search_coincidences(window=2000)
coin.store_coincidences(cluster)
def get_cluster():
"""Get latest position from API"""
return HiSPARCStations(STATIONS)
if __name__ == '__main__':
with tables.open_file(EVENTDATA_PATH, 'a') as data:
analyse_coincidences(data)
|
HiSPARC/topaz
|
150224_501_510/coincidences.py
|
Python
|
gpl-3.0
| 773
|
#!/usr/bin/python3
""" home_dad.py:
"""
# Import Required Libraries (Standard, Third Party, Local) ************************************************************
import copy
import datetime
import logging
import multiprocessing
import time
from .home_general import HomeGeneral
from rpihome.modules.message import Message
# Authorship Info *****************************************************************************************************
__author__ = "Christopher Maue"
__copyright__ = "Copyright 2016, The RPi-Home Project"
__credits__ = ["Christopher Maue"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Christopher Maue"
__email__ = "csmaue@gmail.com"
__status__ = "Development"
# Device class ********************************************************************************************************
class HomeUser3(HomeGeneral):
def __init__(self, msg_out_queue, logger=None):
# Configure logger
self.logger = logger or logging.getLogger(__name__)
super().__init__(self.logger)
self.msg_out_queue = msg_out_queue
def by_schedule(self, **kwargs):
# Process input variables if present
if kwargs is not None:
for key, value in kwargs.items():
if key == "datetime":
self.dt = value
if key == "lastseen":
self.ishome.last_seen = value
# Monday
if self.dt.weekday() == 0:
if 2016 <= self.dt.date().isocalendar()[0] <= 2017:
# If even number week (non-custody week)
if self.dt.date().isocalendar()[1] % 2 == 0:
if self.dt.time() < datetime.time(7,0):
if self.yes is False:
self.logger.info("User3 is now home")
self.yes = True
else:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
else:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
else:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
# Tuesday
elif self.dt.weekday() == 1:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
# Wednesday
elif self.dt.weekday() == 2:
if self.dt.time() >= datetime.time(17,0):
if self.yes is False:
self.logger.info("User3 is now home")
self.yes = True
else:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
# Thursday
elif self.dt.weekday() == 3:
if self.dt.time() < datetime.time(7,0) or self.dt.time() >= datetime.time(17,0):
if self.yes is False:
self.logger.info("User3 is now home")
self.yes = True
else:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
# Friday
elif self.dt.weekday() == 4:
if 2016 <= self.dt.date().isocalendar()[0] <= 2017:
# If odd number week (custody week)
if self.dt.date().isocalendar()[1] % 2 == 1:
# Home before 7am or after 5pm
if self.dt.time() < datetime.time(7,0) or self.dt.time() >= datetime.time(17,0):
if self.yes is False:
self.logger.info("User3 is now home")
self.yes = True
else:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
else:
# Home only before 7am
if self.dt.time() < datetime.time(7,0):
if self.yes is False:
self.logger.info("User3 is now home")
self.yes = True
else:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
else:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
# Saturday
elif self.dt.weekday() == 5:
if 2016 <= self.dt.date().isocalendar()[0] <= 2017:
# If odd number week (custody week)
if self.dt.date().isocalendar()[1] % 2 == 1:
if self.yes is False:
self.logger.info("User3 is now home")
self.yes = True
else:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
# Sunday
elif self.dt.weekday() == 6:
if 2016 <= self.dt.date().isocalendar()[0] <= 2017:
# If odd number week (custody week)
if self.dt.date().isocalendar()[1] % 2 == 1:
if self.yes is False:
self.logger.info("User3 is now home")
self.yes = True
else:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
# Invalid day
else:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
# Return result
return self.yes
def by_mode(self, **kwargs):
# Process input variables if present
if kwargs is not None:
for key, value in kwargs.items():
if key == "datetime":
self.dt = value
if key == "mode":
self.mode = value
if key == "mac":
self.mac = value
if key == "ip":
self.ip = value
# Use correct rule-set based on home/away decision mode
# mode == 0 represents a mode of "force away"
if self.mode == 0:
if self.yes is True:
self.logger.info("User3 is no longer home")
self.yes = False
# mode == 1 represents a mode of "force home"
elif self.mode == 1:
if self.yes is False:
self.logger.info("User3 is home")
self.yes = True
# mode == 2 determines home/away based on each user's typical schedule
elif self.mode == 2:
self.by_schedule(datetime=self.dt)
# mode == 3 determines home/away based on a combination of arp tables and pings
elif self.mode == 3:
self.by_arp_and_ping(datetime=self.dt, mac=self.mac, ip=self.ip)
# mode == 4 determines home/away based solely of pings but with the 30 minute timeout on "away"
elif self.mode == 4:
self.by_ping_with_delay(ip=self.ip)
# mode == 5 determines home/away based on schedule, but performs periodic pings regardless to capture updates in the "homeTime" register
elif self.mode == 5:
self.by_ping_with_delay(datetime=self.dt, ip=self.ip)
self.by_schedule(datetime=self.dt)
else:
self.logger.error("Cannot make home/away decision based on invalid mode")
# Return result
return self.yes
def command(self):
if self.yes != self.mem:
if self.yes is True:
self.msg_to_send = Message(source="13", dest="11", type="100", name="user3", payload="1")
self.msg_out_queue.put_nowait(self.msg_to_send.raw)
self.logger.debug("Sending 'user3 home' message to logic solver")
else:
self.msg_to_send = Message(source="13", dest="11", type="100", name="user3", payload="0")
self.msg_out_queue.put_nowait(self.msg_to_send.raw)
self.logger.debug("Sending 'user3 NOT home' message to logic solver")
self.mem = copy.copy(self.yes)
|
csm0042/rpihome
|
rpihome/home/home_user3.py
|
Python
|
gpl-3.0
| 8,755
|
from encode2unicode import *
from unicode2encode import *
|
arulalant/txt2unicode
|
txt2unicode/__init__.py
|
Python
|
gpl-3.0
| 58
|
# list all test modules to be run by 'manage.py test ductus'
# (see http://stackoverflow.com/a/5160779)
from modules.flashcards.flashcard_deck import *
from modules.textwiki.textwiki import *
from wiki.new_lesson_page import *
|
wikiotics/ductus1
|
ductus/tests/__init__.py
|
Python
|
gpl-3.0
| 228
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import platform
from decimal import Decimal
from urllib import quote
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
from electrum_nmc_gui.qt.qrcodewidget import QRCodeWidget
from electrum_nmc.i18n import _
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
column_index = 4
class QR_Window(QWidget):
def __init__(self, win):
QWidget.__init__(self)
self.win = win
self.setWindowTitle('Electrum - '+_('Invoice'))
self.setMinimumSize(800, 250)
self.address = ''
self.label = ''
self.amount = 0
self.setFocusPolicy(QtCore.Qt.NoFocus)
main_box = QHBoxLayout()
self.qrw = QRCodeWidget()
main_box.addWidget(self.qrw, 1)
vbox = QVBoxLayout()
main_box.addLayout(vbox)
self.address_label = QLabel("")
#self.address_label.setFont(QFont(MONOSPACE_FONT))
vbox.addWidget(self.address_label)
self.label_label = QLabel("")
vbox.addWidget(self.label_label)
self.amount_label = QLabel("")
vbox.addWidget(self.amount_label)
vbox.addStretch(1)
self.setLayout(main_box)
def set_content(self, address, amount, message, url):
address_text = "<span style='font-size: 18pt'>%s</span>" % address if address else ""
self.address_label.setText(address_text)
if amount:
amount = self.win.format_amount(amount)
amount_text = "<span style='font-size: 21pt'>%s</span> <span style='font-size: 16pt'>%s</span> " % (amount, self.win.base_unit())
else:
amount_text = ''
self.amount_label.setText(amount_text)
label_text = "<span style='font-size: 21pt'>%s</span>" % message if message else ""
self.label_label.setText(label_text)
self.qrw.setData(url)
|
mazaclub/electrum-nmc
|
gui/qt/qrwindow.py
|
Python
|
gpl-3.0
| 2,752
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import os
import os.path as op
import logging
from pytest import mark
from hscommon.path import Path
import hscommon.conflict
import hscommon.util
from hscommon.testutil import eq_, log_calls
from hscommon.jobprogress.job import Job
from .base import TestApp
from .results_test import GetTestGroups
from .. import app, fs, engine
from ..scanner import ScanType
def add_fake_files_to_directories(directories, files):
directories.get_files = lambda j=None: iter(files)
directories._dirs.append("this is just so Scan() doesnt return 3")
class TestCaseDupeGuru:
def test_apply_filter_calls_results_apply_filter(self, monkeypatch):
dgapp = TestApp().app
monkeypatch.setattr(
dgapp.results, "apply_filter", log_calls(dgapp.results.apply_filter)
)
dgapp.apply_filter("foo")
eq_(2, len(dgapp.results.apply_filter.calls))
call = dgapp.results.apply_filter.calls[0]
assert call["filter_str"] is None
call = dgapp.results.apply_filter.calls[1]
eq_("foo", call["filter_str"])
def test_apply_filter_escapes_regexp(self, monkeypatch):
dgapp = TestApp().app
monkeypatch.setattr(
dgapp.results, "apply_filter", log_calls(dgapp.results.apply_filter)
)
dgapp.apply_filter("()[]\\.|+?^abc")
call = dgapp.results.apply_filter.calls[1]
eq_("\\(\\)\\[\\]\\\\\\.\\|\\+\\?\\^abc", call["filter_str"])
dgapp.apply_filter(
"(*)"
) # In "simple mode", we want the * to behave as a wilcard
call = dgapp.results.apply_filter.calls[3]
eq_(r"\(.*\)", call["filter_str"])
dgapp.options["escape_filter_regexp"] = False
dgapp.apply_filter("(abc)")
call = dgapp.results.apply_filter.calls[5]
eq_("(abc)", call["filter_str"])
def test_copy_or_move(self, tmpdir, monkeypatch):
# The goal here is just to have a test for a previous blowup I had. I know my test coverage
# for this unit is pathetic. What's done is done. My approach now is to add tests for
# every change I want to make. The blowup was caused by a missing import.
p = Path(str(tmpdir))
p["foo"].open("w").close()
monkeypatch.setattr(
hscommon.conflict,
"smart_copy",
log_calls(lambda source_path, dest_path: None),
)
# XXX This monkeypatch is temporary. will be fixed in a better monkeypatcher.
monkeypatch.setattr(app, "smart_copy", hscommon.conflict.smart_copy)
monkeypatch.setattr(
os, "makedirs", lambda path: None
) # We don't want the test to create that fake directory
dgapp = TestApp().app
dgapp.directories.add_path(p)
[f] = dgapp.directories.get_files()
dgapp.copy_or_move(f, True, "some_destination", 0)
eq_(1, len(hscommon.conflict.smart_copy.calls))
call = hscommon.conflict.smart_copy.calls[0]
eq_(call["dest_path"], op.join("some_destination", "foo"))
eq_(call["source_path"], f.path)
def test_copy_or_move_clean_empty_dirs(self, tmpdir, monkeypatch):
tmppath = Path(str(tmpdir))
sourcepath = tmppath["source"]
sourcepath.mkdir()
sourcepath["myfile"].open("w")
app = TestApp().app
app.directories.add_path(tmppath)
[myfile] = app.directories.get_files()
monkeypatch.setattr(app, "clean_empty_dirs", log_calls(lambda path: None))
app.copy_or_move(myfile, False, tmppath["dest"], 0)
calls = app.clean_empty_dirs.calls
eq_(1, len(calls))
eq_(sourcepath, calls[0]["path"])
def test_Scan_with_objects_evaluating_to_false(self):
class FakeFile(fs.File):
def __bool__(self):
return False
# At some point, any() was used in a wrong way that made Scan() wrongly return 1
app = TestApp().app
f1, f2 = [FakeFile("foo") for i in range(2)]
f1.is_ref, f2.is_ref = (False, False)
assert not (bool(f1) and bool(f2))
add_fake_files_to_directories(app.directories, [f1, f2])
app.start_scanning() # no exception
@mark.skipif("not hasattr(os, 'link')")
def test_ignore_hardlink_matches(self, tmpdir):
# If the ignore_hardlink_matches option is set, don't match files hardlinking to the same
# inode.
tmppath = Path(str(tmpdir))
tmppath["myfile"].open("w").write("foo")
os.link(str(tmppath["myfile"]), str(tmppath["hardlink"]))
app = TestApp().app
app.directories.add_path(tmppath)
app.options["scan_type"] = ScanType.Contents
app.options["ignore_hardlink_matches"] = True
app.start_scanning()
eq_(len(app.results.groups), 0)
def test_rename_when_nothing_is_selected(self):
# Issue #140
# It's possible that rename operation has its selected row swept off from under it, thus
# making the selected row None. Don't crash when it happens.
dgapp = TestApp().app
# selected_row is None because there's no result.
assert not dgapp.result_table.rename_selected("foo") # no crash
class TestCaseDupeGuru_clean_empty_dirs:
def pytest_funcarg__do_setup(self, request):
monkeypatch = request.getfuncargvalue("monkeypatch")
monkeypatch.setattr(
hscommon.util,
"delete_if_empty",
log_calls(lambda path, files_to_delete=[]: None),
)
# XXX This monkeypatch is temporary. will be fixed in a better monkeypatcher.
monkeypatch.setattr(app, "delete_if_empty", hscommon.util.delete_if_empty)
self.app = TestApp().app
def test_option_off(self, do_setup):
self.app.clean_empty_dirs(Path("/foo/bar"))
eq_(0, len(hscommon.util.delete_if_empty.calls))
def test_option_on(self, do_setup):
self.app.options["clean_empty_dirs"] = True
self.app.clean_empty_dirs(Path("/foo/bar"))
calls = hscommon.util.delete_if_empty.calls
eq_(1, len(calls))
eq_(Path("/foo/bar"), calls[0]["path"])
eq_([".DS_Store"], calls[0]["files_to_delete"])
def test_recurse_up(self, do_setup, monkeypatch):
# delete_if_empty must be recursively called up in the path until it returns False
@log_calls
def mock_delete_if_empty(path, files_to_delete=[]):
return len(path) > 1
monkeypatch.setattr(hscommon.util, "delete_if_empty", mock_delete_if_empty)
# XXX This monkeypatch is temporary. will be fixed in a better monkeypatcher.
monkeypatch.setattr(app, "delete_if_empty", mock_delete_if_empty)
self.app.options["clean_empty_dirs"] = True
self.app.clean_empty_dirs(Path("not-empty/empty/empty"))
calls = hscommon.util.delete_if_empty.calls
eq_(3, len(calls))
eq_(Path("not-empty/empty/empty"), calls[0]["path"])
eq_(Path("not-empty/empty"), calls[1]["path"])
eq_(Path("not-empty"), calls[2]["path"])
class TestCaseDupeGuruWithResults:
def pytest_funcarg__do_setup(self, request):
app = TestApp()
self.app = app.app
self.objects, self.matches, self.groups = GetTestGroups()
self.app.results.groups = self.groups
self.dpanel = app.dpanel
self.dtree = app.dtree
self.rtable = app.rtable
self.rtable.refresh()
tmpdir = request.getfuncargvalue("tmpdir")
tmppath = Path(str(tmpdir))
tmppath["foo"].mkdir()
tmppath["bar"].mkdir()
self.app.directories.add_path(tmppath)
def test_GetObjects(self, do_setup):
objects = self.objects
groups = self.groups
r = self.rtable[0]
assert r._group is groups[0]
assert r._dupe is objects[0]
r = self.rtable[1]
assert r._group is groups[0]
assert r._dupe is objects[1]
r = self.rtable[4]
assert r._group is groups[1]
assert r._dupe is objects[4]
def test_GetObjects_after_sort(self, do_setup):
objects = self.objects
groups = self.groups[:] # we need an un-sorted reference
self.rtable.sort("name", False)
r = self.rtable[1]
assert r._group is groups[1]
assert r._dupe is objects[4]
def test_selected_result_node_paths_after_deletion(self, do_setup):
# cases where the selected dupes aren't there are correctly handled
self.rtable.select([1, 2, 3])
self.app.remove_selected()
# The first 2 dupes have been removed. The 3rd one is a ref. it stays there, in first pos.
eq_(self.rtable.selected_indexes, [1]) # no exception
def test_selectResultNodePaths(self, do_setup):
app = self.app
objects = self.objects
self.rtable.select([1, 2])
eq_(len(app.selected_dupes), 2)
assert app.selected_dupes[0] is objects[1]
assert app.selected_dupes[1] is objects[2]
def test_selectResultNodePaths_with_ref(self, do_setup):
app = self.app
objects = self.objects
self.rtable.select([1, 2, 3])
eq_(len(app.selected_dupes), 3)
assert app.selected_dupes[0] is objects[1]
assert app.selected_dupes[1] is objects[2]
assert app.selected_dupes[2] is self.groups[1].ref
def test_selectResultNodePaths_after_sort(self, do_setup):
app = self.app
objects = self.objects
groups = self.groups[:] # To keep the old order in memory
self.rtable.sort("name", False) # 0
# Now, the group order is supposed to be reversed
self.rtable.select([1, 2, 3])
eq_(len(app.selected_dupes), 3)
assert app.selected_dupes[0] is objects[4]
assert app.selected_dupes[1] is groups[0].ref
assert app.selected_dupes[2] is objects[1]
def test_selected_powermarker_node_paths(self, do_setup):
# app.selected_dupes is correctly converted into paths
self.rtable.power_marker = True
self.rtable.select([0, 1, 2])
self.rtable.power_marker = False
eq_(self.rtable.selected_indexes, [1, 2, 4])
def test_selected_powermarker_node_paths_after_deletion(self, do_setup):
# cases where the selected dupes aren't there are correctly handled
app = self.app
self.rtable.power_marker = True
self.rtable.select([0, 1, 2])
app.remove_selected()
eq_(self.rtable.selected_indexes, []) # no exception
def test_selectPowerMarkerRows_after_sort(self, do_setup):
app = self.app
objects = self.objects
self.rtable.power_marker = True
self.rtable.sort("name", False)
self.rtable.select([0, 1, 2])
eq_(len(app.selected_dupes), 3)
assert app.selected_dupes[0] is objects[4]
assert app.selected_dupes[1] is objects[2]
assert app.selected_dupes[2] is objects[1]
def test_toggle_selected_mark_state(self, do_setup):
app = self.app
objects = self.objects
app.toggle_selected_mark_state()
eq_(app.results.mark_count, 0)
self.rtable.select([1, 4])
app.toggle_selected_mark_state()
eq_(app.results.mark_count, 2)
assert not app.results.is_marked(objects[0])
assert app.results.is_marked(objects[1])
assert not app.results.is_marked(objects[2])
assert not app.results.is_marked(objects[3])
assert app.results.is_marked(objects[4])
def test_toggle_selected_mark_state_with_different_selected_state(self, do_setup):
# When marking selected dupes with a heterogenous selection, mark all selected dupes. When
# it's homogenous, simply toggle.
app = self.app
self.rtable.select([1])
app.toggle_selected_mark_state()
# index 0 is unmarkable, but we throw it in the bunch to be sure that it doesn't make the
# selection heterogenoug when it shouldn't.
self.rtable.select([0, 1, 4])
app.toggle_selected_mark_state()
eq_(app.results.mark_count, 2)
app.toggle_selected_mark_state()
eq_(app.results.mark_count, 0)
def test_refreshDetailsWithSelected(self, do_setup):
self.rtable.select([1, 4])
eq_(self.dpanel.row(0), ("Filename", "bar bleh", "foo bar"))
self.dpanel.view.check_gui_calls(["refresh"])
self.rtable.select([])
eq_(self.dpanel.row(0), ("Filename", "---", "---"))
self.dpanel.view.check_gui_calls(["refresh"])
def test_makeSelectedReference(self, do_setup):
app = self.app
objects = self.objects
groups = self.groups
self.rtable.select([1, 4])
app.make_selected_reference()
assert groups[0].ref is objects[1]
assert groups[1].ref is objects[4]
def test_makeSelectedReference_by_selecting_two_dupes_in_the_same_group(
self, do_setup
):
app = self.app
objects = self.objects
groups = self.groups
self.rtable.select([1, 2, 4])
# Only [0, 0] and [1, 0] must go ref, not [0, 1] because it is a part of the same group
app.make_selected_reference()
assert groups[0].ref is objects[1]
assert groups[1].ref is objects[4]
def test_removeSelected(self, do_setup):
app = self.app
self.rtable.select([1, 4])
app.remove_selected()
eq_(len(app.results.dupes), 1) # the first path is now selected
app.remove_selected()
eq_(len(app.results.dupes), 0)
def test_addDirectory_simple(self, do_setup):
# There's already a directory in self.app, so adding another once makes 2 of em
app = self.app
# any other path that isn't a parent or child of the already added path
otherpath = Path(op.dirname(__file__))
app.add_directory(otherpath)
eq_(len(app.directories), 2)
def test_addDirectory_already_there(self, do_setup):
app = self.app
otherpath = Path(op.dirname(__file__))
app.add_directory(otherpath)
app.add_directory(otherpath)
eq_(len(app.view.messages), 1)
assert "already" in app.view.messages[0]
def test_addDirectory_does_not_exist(self, do_setup):
app = self.app
app.add_directory("/does_not_exist")
eq_(len(app.view.messages), 1)
assert "exist" in app.view.messages[0]
def test_ignore(self, do_setup):
app = self.app
self.rtable.select([4]) # The dupe of the second, 2 sized group
app.add_selected_to_ignore_list()
eq_(len(app.ignore_list), 1)
self.rtable.select([1]) # first dupe of the 3 dupes group
app.add_selected_to_ignore_list()
# BOTH the ref and the other dupe should have been added
eq_(len(app.ignore_list), 3)
def test_purgeIgnoreList(self, do_setup, tmpdir):
app = self.app
p1 = str(tmpdir.join("file1"))
p2 = str(tmpdir.join("file2"))
open(p1, "w").close()
open(p2, "w").close()
dne = "/does_not_exist"
app.ignore_list.Ignore(dne, p1)
app.ignore_list.Ignore(p2, dne)
app.ignore_list.Ignore(p1, p2)
app.purge_ignore_list()
eq_(1, len(app.ignore_list))
assert app.ignore_list.AreIgnored(p1, p2)
assert not app.ignore_list.AreIgnored(dne, p1)
def test_only_unicode_is_added_to_ignore_list(self, do_setup):
def FakeIgnore(first, second):
if not isinstance(first, str):
self.fail()
if not isinstance(second, str):
self.fail()
app = self.app
app.ignore_list.Ignore = FakeIgnore
self.rtable.select([4])
app.add_selected_to_ignore_list()
def test_cancel_scan_with_previous_results(self, do_setup):
# When doing a scan with results being present prior to the scan, correctly invalidate the
# results table.
app = self.app
app.JOB = Job(1, lambda *args, **kw: False) # Cancels the task
add_fake_files_to_directories(
app.directories, self.objects
) # We want the scan to at least start
app.start_scanning() # will be cancelled immediately
eq_(len(app.result_table), 0)
def test_selected_dupes_after_removal(self, do_setup):
# Purge the app's `selected_dupes` attribute when removing dupes, or else it might cause a
# crash later with None refs.
app = self.app
app.results.mark_all()
self.rtable.select([0, 1, 2, 3, 4])
app.remove_marked()
eq_(len(self.rtable), 0)
eq_(app.selected_dupes, [])
def test_dont_crash_on_delta_powermarker_dupecount_sort(self, do_setup):
# Don't crash when sorting by dupe count or percentage while delta+powermarker are enabled.
# Ref #238
self.rtable.delta_values = True
self.rtable.power_marker = True
self.rtable.sort("dupe_count", False)
# don't crash
self.rtable.sort("percentage", False)
# don't crash
class TestCaseDupeGuru_renameSelected:
def pytest_funcarg__do_setup(self, request):
tmpdir = request.getfuncargvalue("tmpdir")
p = Path(str(tmpdir))
fp = open(str(p["foo bar 1"]), mode="w")
fp.close()
fp = open(str(p["foo bar 2"]), mode="w")
fp.close()
fp = open(str(p["foo bar 3"]), mode="w")
fp.close()
files = fs.get_files(p)
for f in files:
f.is_ref = False
matches = engine.getmatches(files)
groups = engine.get_groups(matches)
g = groups[0]
g.prioritize(lambda x: x.name)
app = TestApp()
app.app.results.groups = groups
self.app = app.app
self.rtable = app.rtable
self.rtable.refresh()
self.groups = groups
self.p = p
self.files = files
def test_simple(self, do_setup):
app = self.app
g = self.groups[0]
self.rtable.select([1])
assert app.rename_selected("renamed")
names = [p.name for p in self.p.listdir()]
assert "renamed" in names
assert "foo bar 2" not in names
eq_(g.dupes[0].name, "renamed")
def test_none_selected(self, do_setup, monkeypatch):
app = self.app
g = self.groups[0]
self.rtable.select([])
monkeypatch.setattr(logging, "warning", log_calls(lambda msg: None))
assert not app.rename_selected("renamed")
msg = logging.warning.calls[0]["msg"]
eq_("dupeGuru Warning: list index out of range", msg)
names = [p.name for p in self.p.listdir()]
assert "renamed" not in names
assert "foo bar 2" in names
eq_(g.dupes[0].name, "foo bar 2")
def test_name_already_exists(self, do_setup, monkeypatch):
app = self.app
g = self.groups[0]
self.rtable.select([1])
monkeypatch.setattr(logging, "warning", log_calls(lambda msg: None))
assert not app.rename_selected("foo bar 1")
msg = logging.warning.calls[0]["msg"]
assert msg.startswith("dupeGuru Warning: 'foo bar 1' already exists in")
names = [p.name for p in self.p.listdir()]
assert "foo bar 1" in names
assert "foo bar 2" in names
eq_(g.dupes[0].name, "foo bar 2")
class TestAppWithDirectoriesInTree:
def pytest_funcarg__do_setup(self, request):
tmpdir = request.getfuncargvalue("tmpdir")
p = Path(str(tmpdir))
p["sub1"].mkdir()
p["sub2"].mkdir()
p["sub3"].mkdir()
app = TestApp()
self.app = app.app
self.dtree = app.dtree
self.dtree.add_directory(p)
self.dtree.view.clear_calls()
def test_set_root_as_ref_makes_subfolders_ref_as_well(self, do_setup):
# Setting a node state to something also affect subnodes. These subnodes must be correctly
# refreshed.
node = self.dtree[0]
eq_(len(node), 3) # a len() call is required for subnodes to be loaded
subnode = node[0]
node.state = 1 # the state property is a state index
node = self.dtree[0]
eq_(len(node), 3)
subnode = node[0]
eq_(subnode.state, 1)
self.dtree.view.check_gui_calls(["refresh_states"])
|
mahmutf/dupeguru
|
core/tests/app_test.py
|
Python
|
gpl-3.0
| 20,618
|
hours = float(raw_input("How many hours?"))
rate = float(raw_input("Hourly rate?"))
if hours > 40:
pay = 40 * rate + (hours - 40) * rate * 1.5
else :
pay = hours * rate
print pay
|
RobMcZag/learning-python
|
Chapter3/salary2.py
|
Python
|
gpl-3.0
| 191
|
#coding=utf-8
from collections import defaultdict
import numpy as np
import pandas as pd
import sqlite3,csv
import pandas.io.sql as sql
# package-depend : pygraph, python-graph-core
# pip install pygrah
# pip install python-graph-core
from pygraph.classes.digraph import digraph
from pygraph.algorithms.pagerank import pagerank
from numpy import nan as NA
import math
import os
import glob
import PM
# |paper_index | paper_title | paper_authors | paper_year | paper_publish | paper_reference |
def load_paper(src_file,chunck = 20000):
print("- - - - - - - - - - - begin to load " + src_file)
paper_column = {'#i':'paper_index',
'#*':'paper_title',
'#@':'paper_authors',
'#t':'paper_year',
'#c':'paper_conf',
'#%':'paper_reference'}
with open(src_file) as f:
nu = 1 ;
paper = {}
columnId =""
flag = False
paper_data={}
for line in f:
if(line != "\n"):
f2 = line.strip()[0:2]
item = line.strip()[2:]
if (f2== '#i'):
columnId = line[6:].strip()
elif f2 == '#%':
if paper_column['#%'] not in paper.keys():
paper[paper_column['#%']] = str(item)
else:
paper[paper_column['#%']] += ","+str(item)
else:
paper[paper_column[f2]] = line[2:].strip()
else:
flag = True
if flag:
# obj = pd.Series(paper)
paper_data[columnId] = paper;
flag = False
nu +=1
paper={}
if nu % chunck == 0:
dict2dataframe(paper_data,int(nu/chunck))
paper_data={}
dict2dataframe(paper_data,nu)
print("- - - - - - - -- - -finished load " + src_file)
f.close()
def dict2dataframe(paper_data,nu,csv_file=PM.tmp_path):
columns =['paper_title','paper_authors',
'paper_year','paper_conf','paper_reference']
paper_dataframe = pd.DataFrame(paper_data,index=columns)
paper_dataframe = paper_dataframe.T
paper_dataframe.index.name = 'paper_index'
paper_dataframe.reindex(columns = columns)
csv = csv_file+"tmp-"+str(nu)+".csv"
paper_dataframe.to_csv(csv,sep='|',header=0)
print(csv)
def merge_paper(result, paper= PM.tmp_path):
print("- - - - -- - - - begin to merge - - - - - -- - - " + paper)
csv_list = glob.glob(paper+"/*-*.csv")
for p in csv_list:
f = open(p,'r').read()
with open(result,'a') as t:
t.write(f)
print("meger - > " + p)
os.remove(p)
print("- - - - - - - - - - - finished merge, the reusult is ---->"+result)
columns =['paper_index','paper_title','paper_authors',
'paper_year','paper_publish','paper_reference']
papers = pd.read_csv(result,sep=u'|',header=None)
papers.columns = columns;
# papers['paper_index'] = papers['paper_index'].map(lambda x: int(x))
frame = papers.sort_values(by = 'paper_index')
frame.to_csv(result,sep =u'|',header = 1,index = False)
# -----------------------------------------------------------------------------------------
def transform_src(src_file=PM.task3_papers_txt,paper_info_result=PM.paper_info):
if glob.glob(paper_info_result):
os.remove(paper_info_result)
load_paper(src_file)
merge_paper(paper_info_result)
# ----------------------------------------------------------------------------------------
# transform_src()
# ----------------------------------------------------------------------------------------
# remove the duplicated part of reference
def remove_dup_ref(paper_info_result=PM.paper_info):
print('start to read.........')
pi = pd.read_csv(paper_info_result,sep=u'|',header=0,index_col='paper_index')
t = pi['paper_reference'].dropna()
pi['paper_reference'] = t.map(lambda x : ','.join(set(x.split(','))))
print('write to file.........')
pi.to_csv(paper_info_result,sep=u'|',index=True,header=1)
# remove_dup_ref()
# ----------------------------------------------------------------------------------------
# ----> paper_info.csv
# build_db_tables.py and make it easier for data operation
# ----------------------------------------------------------------------------------------
# author's feature
#
class task3_feature():
"""docstring for task3"""
def __init__(self):
self.conn = sqlite3.connect(PM.db)
print("="*60)
print('extracting features......')
print("="*60)
# -------------------------------------------------------------------------
def pp_place(self,author_tab,i):
qry = 'select author_name,sum(times) as num_%d\
from PAPER_PLACE_COUNT\
where place = %d\
group by author_name'%(i,i)
tt = sql.read_sql_query(qry,self.conn,index_col='author_name')
author_tab['pp_%d_num'%(i)] = pd.Series(tt['num_%d'%(i)])
def author_paper_place(self,N=10):
self.conn = sqlite3.connect(PM.db)
print("------place feature....---")
query = 'select author_name from author'
author_tab = sql.read_sql_query(query,self.conn,index_col = 'author_name')
# dataframe: author_name as the index
# self.conn.execute('drop table if exists PAPER_PLACE_COUNT')
paper_count_qry='create table If Not Exists PAPER_PLACE_COUNT as\
select author_name,place, count(*) AS times\
from write\
group by author_name, place'
self.conn.execute(paper_count_qry)
print('create table PAPER_PLACE_COUNT ....')
# total_pp_num : how many paper he present?
qry = 'select author_name, sum(times) as total_num \
from PAPER_PLACE_COUNT\
group by author_name'
total_pp = sql.read_sql_query(qry,self.conn,index_col='author_name')
author_tab['pp_total_num'] = pd.Series(total_pp['total_num'])
# pp_often_place| pp_often_place_count: usually, his position of the paper?
qry = 'select author_name,max(times) As often_times,place AS often_place \
from PAPER_PLACE_COUNT\
group by author_name'
often_place = sql.read_sql_query(qry,self.conn,index_col='author_name')
author_tab['pp_often_place'] = often_place['often_place']
author_tab['pp_often_place_count'] = often_place['often_times']
# pp_last_place: usually, the boss's position is the last one!
qry = 'select author_name, count(*) AS last_place from \
(select paper_index,author_name,max(place) from write group by paper_index)\
group by author_name'
last_place = sql.read_sql_query(qry,self.conn,index_col='author_name')
author_tab['pp_last_place_count'] = last_place['last_place']
# 'select * from write a where (select count(*) from write where paper_index = a.paper_index and place > a.place) < 3 order by a.paper_index,a.place desc;'
qry = 'select author_name,count(*) as last_place_2 from\
()\
group by author_name'
# pp_(i)_num : position= 1,2,...N ?
for i in range(1,int(N),2):
self.pp_place(author_tab,i)
print(author_tab[:2])
author_tab.to_csv(PM.author_feature_path+'place.csv',sep='|',header=1,index = True)
self.conn.commit()
self.conn.close()
# -------------------------------------------------------------------------------------
def place_rank(self,auth_tabl,i,rk='<='):
qry = 'select author_name,sum(times) as num_%d\
from PAPER_PLACE_COUNT\
where place %s %d\
group by author_name'%(i,rk,i)
tt = sql.read_sql_query(qry,self.conn,index_col='author_name')
f ={}
f['<=']='top'
f['>']='under'
auth_tabl['%s_place_%d_num'%(f[rk],i)] = pd.Series(tt['num_%d'%(i)])
print('%s_place_%d_num'%(f[rk],i))
def author_place_rank(self):
self.conn = sqlite3.connect(PM.db)
print("------place ranking feature....---")
query = 'select author_name from author'
author_tab = sql.read_sql_query(query,self.conn,index_col = 'author_name')
for i in range(1,16,1):
self.place_rank(author_tab,i,rk='<=')
# print('rank %d ....'%i)
for i in range(2,21,2):
self.place_rank(author_tab,i,rk='>')
# print('rank %d ....'%i)
print('to csv ........')
author_tab.to_csv(PM.author_feature_path+'toprank.csv',sep='|',header=1,index = True)
# -----------------------------------------------------------------------------------
# how many paper the author published in the year of 2013,2012,...:
# def author_paper_year(self):
# self.conn = sqlite3.connect(PM.db)
# print('------paper----year---csv-------')
# qry = 'select author_name,paper_year\
# from author_paper\
# '# author_pi_place
# year = sql.read_sql_query(qry,self.conn)
# year = year[year['paper_year'].astype(np.int64) >= 2000]
# author_year = year.groupby(['author_name','paper_year']).size().unstack()
# print(author_year[:2])
# author_year.to_csv(PM.author_feature_path+'year.csv',sep=u'|',header =1,index = True)
# -----------------------------------------------------------------------------------
def pub_year(self,author_year,year,i,rk = '>=',f = {'>=':'after','<':'before'}):
year['paper_year'] = year['paper_year'].astype(np.int64)
if rk == '>=':
year = year[year['paper_year'] >= i]
elif rk == '<':
year = year[year['paper_year'] < i]
author_year['%s_%d_num'%(f[rk],i)] = year.groupby(['author_name'])['year_count'].agg('sum')
print('%s_%d_num'%(f[rk],i))
def author_year_count(self):
self.conn = sqlite3.connect(PM.db)
print('publish paper for years ........')
query = 'select author_name from author'
author_tab = sql.read_sql_query(query,self.conn,index_col = 'author_name')
qry = 'select author_name,paper_year,count(*) as year_count\
from author_paper group by author_name,paper_year'
year = sql.read_sql_query(qry,self.conn)
for i in range(2013,1999,-1):
self.pub_year(author_tab,year,i,rk='>=')
for i in range(2015,1980,-3):
self.pub_year(author_tab,year,i,rk='<')
print('to csv ..........')
author_tab.to_csv(PM.author_feature_path+'countyear.csv',sep=u'|',header =1,index = True)
# -----------------------------------------------------------------------------------------
# how many publishment the author's papers are ?
def author_paper_publish(self):
print('------paper----publish---csv-------')
self.conn = sqlite3.connect(PM.db)
qry = 'select author_name,paper_publish,count(*) as pub_count\
from author_paper \
group by author_name,paper_publish\
'# author_pi_place
publish = sql.read_sql_query(qry,self.conn)
print(str(publish.shape)+'<------publish ')
# publish variety, how many kinds publishment the author's paper are?
tmp = publish.groupby('author_name')['paper_publish'].unique()
tmp = tmp.groupby('author_name').size()
print('generate pub information .... ')
pub = pd.DataFrame(tmp)
pub.index.name = 'author_name'
pub.columns = ['author_pub_kinds']
# publish statistics information
pub['author_pub_max_times'] = publish.groupby(['author_name'])['pub_count'].agg('max')
pub['author_pub_mean_times'] = publish.groupby(['author_name'])['pub_count'].agg('mean')
pub['author_pub_median_times'] = publish.groupby(['author_name'])['pub_count'].agg('median')
pub.to_csv(PM.author_feature_path+'publish.csv',sep=u'|',header =1,index = True)
# ---------------------------------------------------------------------------------------------------
def author_paper_coauthor(self):
self.conn = sqlite3.connect(PM.db)
print("----coauthor--csv----")
qry = 'select author_name,paper_index\
from write'
ai = sql.read_sql_query(qry,self.conn)
sz = ai.groupby('paper_index').size()
ai['sz'] = ai['paper_index'].map(sz)
coauthor_df = ai.groupby('author_name')['sz'].agg([\
('author_pub_max_coauthor','max'),\
('author_pub_mean_coauthor','mean'),\
('author_pub_median_coauthor','median'),\
('author_pub_min_coauthor','min'),\
('author_pub_sum_coauthor','sum')])
coauthor_df.to_csv(PM.author_feature_path+"coauthor.csv",header=1,index=True,sep=u'|')
print(coauthor_df[:2])
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def maiden_work(self):
print('maiden work............... ')
conn = sqlite3.connect(PM.db)
qry = 'select author_name,paper_index,min(paper_year) as f3_maiden_time\
from author_paper group by author_name'
maiden = sql.read_sql_query(qry,conn,index_col = 'author_name')
print('maiden paper index finished .......!')
#
qry = 'select * from paper_pr_cc_year_pub'
pr = sql.read_sql_query(qry,conn,index_col='paper_index')
#
print('maiden + pagerank = ................')
maiden['paper_index'] = maiden['paper_index'].astype(np.int64)
pr.index = pr.index.astype(np.int64)
maiden = pd.merge(maiden,pr,left_on='paper_index',right_index=True,how = 'left')
print('page rank of maiden work .......')
#
qry = 'select * from publish_rank'
pub = sql.read_sql_query(qry,conn,index_col='paper_publish')
maiden = pd.merge(maiden,pub,left_on = 'paper_publish',right_index=True,how='left')
print('first publishment surely influence the author ....... ')
#
maiden['f3_maiden_time'] = 2017 - maiden['f3_maiden_time'].astype(np.int64)
maiden = maiden.drop(['paper_index','paper_year','paper_publish'],axis=1)
maiden.to_csv(PM.author_feature_path+'f3_maiden.csv',sep='|',header=1,index=True)
print(maiden[:6])
print(' * -*'*20)
def guamin_work(self):
print("gua ming work ............")
conn = sqlite3.connect(PM.db)
qry = 'select author_name,count(*) as gm_last_times from (select author_name, paper_index, max(place) from write \
group by author_name, paper_index) group by author_name'
gm = sql.read_sql_query(qry,conn,index_col='author_name')
print(gm[:6])
gm.to_csv(PM.author_feature_path+'gm_last.csv',header=1,index=True,sep='|')
def place_k_year_n_citation(self,qry1,qry2,filename,k):
print('k place work .......')
conn = sqlite3.connect(PM.db)
author = sql.read_sql_query(qry1,conn)
paper = sql.read_sql_query(qry2,conn)
author['paper_index'] = author['paper_index'].astype(np.int64)
paper['paper_index'] = paper['paper_index'].astype(np.int64)
info = pd.merge(author,paper,left_on = 'paper_index',right_on = 'paper_index',how = 'left')
group = info.groupby('author_name')
df1 = group['paper_citations'].agg([
('max_citation_'+str(k),'max'),
('sum_citation_'+str(k),'sum'),
('median_citation_'+str(k),'median')
])
df2 = 1e8*group['pp_ranking'].agg([
('max_pr_'+str(k),'max'),
('sum_pr_'+str(k),'sum'),
('median_pr_'+str(k),'median')
])
df = pd.concat([df1,df2],axis=1)
df[str(k)+'_count'] = group.size()
df.to_csv(PM.author_feature_path+filename,sep='|',header = 1, index = True)
print(df[:6])
print(' * -*'*20)
def place_1_work_citation(self):
print('place_1_work_citation ..........')
qry1 = 'select author_name, paper_index from write where place=1'
qry2 = 'select * from paper_pr_cc_year_pub where paper_year < 2014'
self.place_k_year_n_citation(qry1,qry2,'f3_1111_citation.csv',1111)
def place_1_work_3y_citation(self):
print('place_1_work_3y_citation ..........')
qry1 = 'select author_name, paper_index from write where place=1'
qry2 = 'select * from paper_pr_cc_year_pub where paper_year < 2014 and paper_year >=2010'
self.place_k_year_n_citation(qry1,qry2,'f3_13y_citation.csv',13)
def place_all_work_citation(self):
print('place_all_work_citation ..............')
qry1 = 'select author_name, paper_index from write'
qry2 = 'select * from paper_pr_cc_year_pub where paper_year < 2014'
self.place_k_year_n_citation(qry1,qry2,'f3_10000_citation.csv',10000)
def place_all_before_2000_citation(self):
print('place_all_before_2000_citation ..........')
qry1 = 'select author_name, paper_index from write'
qry2 = 'select * from paper_pr_cc_year_pub where paper_year < 2000'
self.place_k_year_n_citation(qry1,qry2,'f3_1999_citation.csv',1999)
def place_all_in_2000_2010citation(self):
print('place_all_in_2000_2010citation ..........')
qry1 = 'select author_name, paper_index from write'
qry2 = 'select * from paper_pr_cc_year_pub where paper_year >= 2000 and paper_year < 2010'
self.place_k_year_n_citation(qry1,qry2,'f3_2009_citation.csv',2009)
def place_all_in_2010_2014citation(self):
print('place_all_in_2010_2014citation .........')
qry1 = 'select author_name, paper_index from write'
qry2 = 'select * from paper_pr_cc_year_pub where paper_year >= 2010 and paper_year < 2014'
self.place_k_year_n_citation(qry1,qry2,'f3_2013_citation.csv',2013)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def author_cited_count(self):
self.conn = sqlite3.connect(PM.db)
print('----author---been---cited-----')
qry = 'select p_citer , p_cited from reference'
citation = sql.read_sql_query(qry,self.conn)
sz = citation.dropna().groupby('p_cited').size()
cited_times = pd.DataFrame(sz)
cited_times.columns = ['cited_times']
cited_times.index.name = 'paper_index'
#
author_tab = sql.read_sql_query('select author_name,paper_index from write',\
self.conn, index_col='paper_index')
df = pd.merge(author_tab,cited_times,left_index=True,right_index=True,how='inner')
print(df.columns)
author_cited = df.groupby('author_name')['cited_times'].agg([\
('cited_sum_times','sum'),\
('cited_mean_times','mean'),\
('cited_max_times','max'),\
('cited_median_times','median'),\
('cited_std_times','std'),\
('cited_min_times','min')
])
author_cited.index.name = 'author_name'
author_cited.to_csv(PM.author_feature_path+'citedtimes.csv',header=1,index=True,sep=u'|')
# the quality of the citation ?
def author_paper_cited(self):
self.conn = sqlite3.connect(PM.db)
print("-------paper--citation--info--csv-----")
qry = 'select p_citer , p_cited from reference'
er_ed = sql.read_sql_query(qry,self.conn)
sz = er_ed.dropna().groupby('p_cited').size()
er_ed['sz'] = er_ed['p_cited'].map(sz)
citation_df = er_ed.groupby('p_citer')['sz'].agg([\
('author_pub_max_cited','max'),\
('author_pub_mean_cited','mean'),\
('author_pub_median_cited','median'),\
('author_pub_min_cited','min'),\
('author_pub_sum_cited','sum')])
# citation_df.to_csv(PM.paper_citation,header=1,index=True,sep=u'|')
# def author_paper_quality(self):
# citation_df = pd.read_csv(PM.paper_citation,header=0,index_col='p_citer',sep=u'|')
citation_df.index.name = 'paper_index'
citation_df.index = citation_df.index.astype(np.int64)
qry = 'select author_name,paper_index from write'
author = sql.read_sql_query(qry,self.conn,index_col='paper_index')
author.index = author.index.astype(np.int64)
df = pd.merge(author,citation_df,left_index=True,right_index=True,how='left')
print('start generating information ... ')
df1 = df.groupby('author_name')['author_pub_max_cited'].agg([\
('pubmaxcited_max','max'),\
('pubmaxcited_min','min'),\
('pubmaxcited_mean','mean'),\
('pubmaxcited_median','median'),\
('pubmaxcited_sum','sum'),\
('pubmaxcited_std','std')
])
df2 = df.groupby('author_name')['author_pub_mean_cited'].agg([\
('pubmeancited_max','max'),\
('pubmeancited_min','min'),\
('pubmeancited_mean','mean'),\
('pubmeancited_sum','sum'),\
('pubmeancited_std','std')
])
df3 = df.groupby('author_name')['author_pub_sum_cited'].agg([\
('pubsumcited_max','max'),\
('pubsumcited_min','min'),\
('pubsumcited_mean','mean'),\
('pubsumcited_median','median'),\
('pubsumcited_sum','sum'),\
('pubsumcited_std','std')
])
citation_df = pd.concat([df1,df2,df3],axis=1)
print('to csv ......')
citation_df.to_csv(PM.author_feature_path+"refquality.csv",header=1,index=True,sep=u'|')
# -----------------------------------------------------------------------------------------
# tf = task3_feature()
# tf.author_paper_place()
#
# tf.author_year_count()
# tf.author_paper_publish()
# tf.author_paper_coauthor()
# tf.author_paper_cited()
# tf.author_cited_count()
# -----------------------------------------------------------------------------------------
# paper ranking
def paper_rank():
print("start page ranking .....")
dg = digraph()
conn = sqlite3.connect(PM.db)
qry = 'select p_citer,p_cited from reference'
p_id = sql.read_sql_query(qry,conn)
print(str(p_id.shape)+'<---------p_id')
citer = p_id.p_citer.unique()
p_id = p_id.dropna(axis=0)
cited = p_id.p_cited.unique()
nd = set(citer).union(set(cited))
nd = list(nd)
print('node is created .....')
# add nodes
nodes = np.array(nd).astype(np.int64)
dg.add_nodes(nodes)
print("add nodes finished .... ")
# add edges
edges = [x for x in zip(p_id['p_citer'].astype(np.int64),p_id['p_cited'].astype(np.int64))]
for ed in edges:
dg.add_edge(ed)
print('add edges finished ....')
pg = pagerank(dg, damping_factor=0.85, max_iterations=100, min_delta=1e-06)
pprk = pd.DataFrame(pd.Series(pg))
pprk.columns = ['pp_ranking']
pprk.index.name = 'paper_index'
pprk.to_csv(PM.paper_rank,sep=u'|',header=1,index=True)
print(pprk[:2])
# paper_rank()
def author_rank():
qry = 'select author_name,paper_index from write'
conn = sqlite3.connect(PM.db)
author = sql.read_sql_query(qry,conn,index_col='paper_index')
author.index = author.index.astype(np.int64)
pp_rank = pd.read_csv(PM.paper_rank,sep=u'|',header=0,index_col='paper_index')
pp_rank['pp_ranking'] = pp_rank['pp_ranking']*1e8
print('reading finished ---> %s and %s'%(author.shape,pp_rank.shape))
df = pd.merge(author,pp_rank,left_index=True,right_index=True,how='left')
df = df.groupby('author_name')['pp_ranking'].agg([
('pr_sum_value','sum'),\
('pr_mean_value','mean'),\
('pr_max_value','max'),\
('pr_median_value','median'),\
('pr_min_value','min'),\
('pr_std_value','std')
])
df.index.name='author_name'
df.to_csv(PM.author_feature_path+'authorrank.csv',header=1,index=True,sep=u'|')
# author_rank()
def publish_rank():
#
print('publish_rank ...........')
conn = sqlite3.connect(PM.db)
qry = 'select paper_publish, paper_citations \
from paper_pr_cc_year_pub group by paper_publish'
pub = sql.read_sql_query(qry,conn)
pd = pub.groupby('paper_publish')['paper_citations'].agg([
('citation_sums','sum'),
('citation_mean','mean'),
('citation_median','median'),
('citation_max','max')
])
pd['pubpaper_nums'] = pub.groupby('paper_publish').size()
nx = pd.index.dropna()
pd = pd.loc[nx]
pd.to_sql('publish_rank',conn,index=True)
# pd.to_csv(PM.publish_rank,header=1,index=True,sep='|')
print('publish_rank ...........!!')
print(pd[:10])
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# --------------------------^--feature of author ^-----------------------------------
# --------------------------|--------------------|-----------------------------------
# --------------------------|--------------------|-----------------------------------
# -----------------------------------------------------------------------------------
def merge_feature(path = [PM.author_feature_path]):
print('merge feature...')
conn = sqlite3.connect(PM.db)
query = 'select author_name from author'
author_desc = sql.read_sql_query(query,conn,index_col = 'author_name')
#
fea_list = []
for x in path:
fea_list += glob.glob(x+'*.csv')
#
print(fea_list)
for csv in fea_list:
df = pd.read_csv(csv,sep=u'|',header=0,index_col='author_name')
print(csv + '.....')
author_desc = pd.merge(author_desc,df,left_index=True,right_index=True,how='left')
# author_desc.to_csv(PM.author_description_path+'authordesc.csv',header=1,index=True,sep=u'|')
print('feature merge finished, \n start merge label(trainset.csv) ......')
traincsv = pd.read_csv(PM.task3_train_csv,header=0,index_col='AUTHOR')
print('start to merge columns .....')
df = pd.merge(author_desc,traincsv,left_index=True,right_index=True,how='right')
df.index.name='author_name'
df.to_csv(PM.author_train_tabl,header=1,index=True,sep=u'|')
print('train set generated ....')
validationcsv = pd.read_csv(PM.task3_validation_csv,header=0,index_col='AUTHOR')
df = pd.merge(author_desc,validationcsv,left_index=True,right_index=True,how='right')
df.index.name ='author_name'
df.to_csv(PM.author_validation_tabl,header=1,index=True,sep=u'|')
print('validation set generated...')
# merge_feature()
def sub_task3_file():
result = pd.read_csv(PM.author_pred_path+'task3.csv',sep='\t',index_col = 'authorname',header =0)
result.to_csv(PM.sub_task3,sep='\t',header=1,index = True)
class author_feature():
def __init__(self):
print(' - * -'*20)
print('time and not time related feature work........ ')
self
pass
def maiden_work(self):
print('maiden work............... ')
conn = sqlite3.connect(PM.db)
qry = 'select author_name,paper_index,min(paper_year) as f3_maiden_time\
from author_paper group by author_name'
maiden = sql.read_sql_query(qry,conn,index_col = 'author_name')
print('maiden paper index finished .......!')
#
qry = 'select * from paper_pr_cc_year_pub'
pr = sql.read_sql_query(qry,conn,index_col='paper_index')
#
print('maiden + pagerank = ................')
maiden['paper_index'] = maiden['paper_index'].astype(np.int64)
pr.index = pr.index.astype(np.int64)
maiden = pd.merge(maiden,pr,left_on='paper_index',right_index=True,how = 'left')
print('page rank of maiden work .......')
#
qry = 'select * from publish_rank'
pub = sql.read_sql_query(qry,conn,index_col='paper_publish')
maiden = pd.merge(maiden,pub,left_on = 'paper_publish',right_index=True,how='left')
print('first publishment surely influence the author ....... ')
#
maiden['f3_maiden_time'] = 2017 - maiden['f3_maiden_time'].astype(np.int64)
maiden = maiden.drop(['paper_index','paper_year','paper_publish'],axis=1)
maiden.to_csv(PM.task3_year_related_path+'f3_maiden.csv',sep='|',header=1,index=True)
print(maiden[:5])
print(' * -*'*20)
def place_k_year_n_citation(self,qry1,qry2,filename,k):
print('k place work .......')
conn = sqlite3.connect(PM.db)
author = sql.read_sql_query(qry1,conn)
paper = sql.read_sql_query(qry2,conn)
author['paper_index'] = author['paper_index'].astype(np.int64)
paper['paper_index'] = paper['paper_index'].astype(np.int64)
info = pd.merge(author,paper,left_on = 'paper_index',right_on = 'paper_index',how = 'left')
group = info.groupby('author_name')
df1 = group['paper_citations'].agg([
('max_citation_'+str(k),'max'),
('sum_citation_'+str(k),'sum'),
('median_citation_'+str(k),'median')
])
df2 = 1e8*group['pp_ranking'].agg([
('max_pr_'+str(k),'max'),
('sum_pr_'+str(k),'sum'),
('median_pr_'+str(k),'median')
])
df = pd.concat([df1,df2],axis=1)
df[str(k)+'_count'] = group.size()
df.to_csv(PM.task3_year_related_path+filename,sep='|',header = 1, index = True)
print(df[:10])
print(' * -*'*20)
def place_1_work_citation(self):
print('place_1_work_citation ..........')
qry1 = 'select author_name, paper_index from write where place=1'
qry2 = 'select * from paper_pr_cc_year_pub where paper_year < 2014'
self.place_k_year_n_citation(qry1,qry2,'f3_1111_citation.csv',1111)
def place_1_work_3y_citation(self):
print('place_1_work_3y_citation ..........')
qry1 = 'select author_name, paper_index from write where place=1'
qry2 = 'select * from paper_pr_cc_year_pub where paper_year < 2014 and paper_year >=2010'
self.place_k_year_n_citation(qry1,qry2,'f3_13y_citation.csv',13)
def place_all_work_citation(self):
print('place_all_work_citation ..............')
qry1 = 'select author_name, paper_index from write'
qry2 = 'select * from paper_pr_cc_year_pub where paper_year < 2014'
self.place_k_year_n_citation(qry1,qry2,'f3_10000_citation.csv',10000)
def place_all_before_2000_citation(self):
print('place_all_before_2000_citation ..........')
qry1 = 'select author_name, paper_index from write'
qry2 = 'select * from paper_pr_cc_year_pub where paper_year < 2000'
self.place_k_year_n_citation(qry1,qry2,'f3_1999_citation.csv',1999)
def place_all_in_2000_2010citation(self):
print('place_all_in_2000_2010citation ..........')
qry1 = 'select author_name, paper_index from write'
qry2 = 'select * from paper_pr_cc_year_pub where paper_year >= 2000 and paper_year < 2010'
self.place_k_year_n_citation(qry1,qry2,'f3_2009_citation.csv',2009)
def place_all_in_2010_2014citation(self):
print('place_all_in_2010_2014citation .........')
qry1 = 'select author_name, paper_index from write'
qry2 = 'select * from paper_pr_cc_year_pub where paper_year >= 2010 and paper_year < 2014'
self.place_k_year_n_citation(qry1,qry2,'f3_2013_citation.csv',2013)
|
nemo-tj/biendata
|
source/task3.py
|
Python
|
gpl-3.0
| 28,256
|
#!/usr/bin/python
# (c) 2015, ravellosystems
#
# author zoza
#
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
# TODO
# multiple IPs per nic
# tags/groups
import random, string
try:
from ravello_sdk import *
HAS_RAVELLO_SDK = True
except ImportError:
HAS_RAVELLO_SDK = False
except ImportError:
print "failed=True msg='ravello sdk required for this module'"
sys.exit(1)
from ravello_cli import get_diskimage
DOCUMENTATION = '''
---
module: ravello_app
short_description: Create/delete/start/stop an application in ravellosystems
description:
- Create/delete/start/stop an application in ravellosystems and wait for it (optionally) to be 'running'
- list state will return a fqdn list of exist application hosts with their external services
- blueprint state will create a blueprint from an existing app (must provide blueprint_name)
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['design', 'present', 'started', 'absent', 'stopped','list','blueprint']
username:
description:
- ravello username
password:
description:
- ravello password
service_name:
description:
- Supplied Service name for list state
default: ssh
name:
description:
- application name
description:
description:
- application description
blueprint_id:
description:
- create app, based on this blueprint
#publish options
cloud:
description:
- cloud to publish
region:
description:
- region to publish
publish_optimization:
default: cost
choices: ['cost', 'performance']
application_ttl:
description:
- application autostop in mins
default: -1 # never
wait
description:
- Wait for the app to be in state 'running' before returning.
default: True
choices: [ True, False ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 600
blueprint_name:
description:
- Specify a name for a new blueprint based on existing app
blueprint_description:
description:
- Description of new blueprint
app_template:
description:
- Path to a YML file that defines an application infrastructure then creates a blueprint for further processing with follow-on playbooks. Must use state=design
cost_bucket:
description:
- Path to a YML file that defines an application infrastructure then creates a blueprint for further processing with follow-on playbooks. Must use state=design
'''
EXAMPLES = '''
# Create app, based on blueprint, start it and wait for started
- local_action:
module: ravello_app
username: user@ravello.com
password: password
name: 'my-application-name'
description: 'app desc'
blueprint_id: '2452'
wait: True
wait_timeout: 600
state: present
# Create app, based on blueprint
- local_action:
module: ravello_app
username: user@ravello.com
password: password
name: 'my-application-name'
description: 'app desc'
publish_optimization: performance
cloud:AMAZON
region: Oregon
state: present
# List application example
- local_action:
module: ravello_app
name: 'my-application-name'
service_name: 'ssh'
state: list
# Delete application example
- local_action:
module: ravello_app
name: 'my-application-name'
state: absent
# Create blueprint from existing app
- local_action:
module: ravello_app
name: 'my-application-name'
blueprint_name: 'my-application-bp'
blueprint_description: 'Blueprint of app xyz'
state: blueprint
# Create blueprint based on app_template.yml
- local_action:
module: ravello_app
name: 'my-new-baseline'
description: 'My new baseline'
app_template: 'app_template.yml'
state: design
register: design_results
'''
import os
import base64
import getpass
import logging
import logging.handlers
##### Ravello API Wrappers #####
def set_cost_bucket(appID, appType, cost_bucket_name, client):
available_cbs = []
cost_buckets = client.get_cost_buckets(permissions='execute')
for cost_bucket in cost_buckets:
available_cbs.append(cost_bucket['name'])
if cost_bucket['name'] == cost_bucket_name:
client.associate_resource_to_cost_bucket(
cost_bucket['id'],
{'resourceId': appID, 'resourceType': appType})
return
if (cost_bucket_name == "Default") and (len(cost_buckets) >= 1):
client.associate_resource_to_cost_bucket(
cost_buckets[0]['id'],
{'resourceId': appID, 'resourceType': appType})
return
raise Exception("Cost Bucket: " + cost_bucket_name + " - not found. Available cost buckets: " + ', '.join(available_cbs))
return
def get_credentials():
with open(os.path.expanduser("~/.ravello_login"),"r") as pf:
username = pf.readline().strip()
encrypted_password = pf.readline().strip()
password = base64.b64decode(encrypted_password).decode()
return username,password
def get_user_credentials(username):
password = None
if username:
password = getpass.getpass('Enter a Password: ')
else:
#read user credentials from .ravello_login file in user HOMEDIR
username,password = get_credentials()
if not username or not password:
log.error('User credentials are not set')
print('Error: User credentials are not set')
return None,None
return username,password
def initlog(log_file):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logpath=os.path.join(os.getcwd(),log_file)
handler = logging.handlers.RotatingFileHandler(logpath, maxBytes=1048576, backupCount=10)
fmt = '%(asctime)s: %(filename)-20s %(levelname)-8s %(message)s'
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
def connect(username, password):
client = RavelloClient()
try:
client.login(username, password)
except Exception as e:
sys.stderr.write('Error: {!s}\n'.format(e))
log.error('Invalid user credentials, username {0}'.format(username))
print('Error: Invalid user credentials, username {0}'.format(username))
return None
return client
def get_app_id(app_name,client):
app_id=0
for app in client.get_applications():
if app['app_name'].lower() == app_name.lower():
app_id = app['id']
break
if app_id == 0:
module.fail_json(msg = 'ERROR: Cloud not find app: %s' % app_name)
return app_id
def get_blueprint_id(blueprint_name,client):
blueprint_id=0
for blueprint in client.get_blueprints():
if blueprint['name'].lower() == blueprint_name.lower():
blueprint_id = blueprint['id']
break
if blueprint_id == 0:
module.fail_json(msg = 'ERROR: Cloud not find blueprint: %s' % blueprint_name)
return blueprint_id
def get_image_id(image_name,client):
image_id=0
for image in client.get_images():
if image['name'].lower() == image_name.lower():
image_id = image['id']
break
if image_id == 0:
module.fail_json(msg = 'ERROR: Cloud not find VM image named: %s' % image_name)
return image_id
def get_image(image_id,client):
try:
image = client.get_image(image_id)
except Exception as e:
module.fail_json(msg = 'ERROR: Cloud not find VM image id: %s' % image_id)
return image
def main():
ch = logging.StreamHandler(log_capture_string)
ch.setLevel(logging.DEBUG)
### Optionally add a formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
### Add the console handler to the logger
logger.addHandler(ch)
argument_spec=dict(
# for nested babu only
url=dict(required=False, type='str'),
state=dict(default='present', choices=['design', 'present', 'started', 'absent', 'stopped', 'list', 'test', 'blueprint','blueprint_delete','blueprint_location']),
username=dict(required=False, type='str'),
password=dict(required=False, type='str'),
name=dict(required=False, type='str'),
app_name=dict(required=False, type='str'),
description=dict(required=False, type='str'),
blueprint_id=dict(required=False, type='str'),
app_template=dict(required=False, default=None, type='path'),
cloud=dict(required=False, type='str'),
region=dict(required=False, type='str'),
publish_optimization=dict(default='cost', choices=['cost', 'performance']),
application_ttl=dict(default='-1', type='int'),
service_name=dict(default='ssh', type='str'),
blueprint_description=dict(required=False, type='str'),
blueprint_name=dict(required=False, type='str'),
wait=dict(type='bool', default=True ,choices=BOOLEANS),
wait_timeout=dict(default=1200, type='int'),
cost_bucket=dict(default='Organization', type='str')
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['blueprint', 'app_template']],
# We really really should support this...
# supports_check_mode = True
)
module_fail.attach_ansible_modle(module)
if not HAS_RAVELLO_SDK:
module.fail_json(msg='ravello_sdk required for this module')
# Get User credentials from Ansible (not too secure) or ENV variables (a little more secure)
username = module.params.get('username', os.environ.get('RAVELLO_USERNAME', None))
password = module.params.get('password', os.environ.get('RAVELLO_PASSWORD', None))
if username and password:
try:
client = RavelloClient(username, password, module.params.get('url'))
except Exception, e:
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.fail_json(msg = 'ERROR: Failed to authenticate to Ravello using ansiblie provided credentials %s' % e,stdout='%s' % log_contents)
else:
#Get user credentials from SDK auth cache file (better)
try:
username, password = get_user_credentials(None)
except Exception, e:
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.fail_json(msg = 'ERROR: Failed to retrieve credentials from Ravello SDK credentials cache %s' % e,stdout='%s' % log_contents)
if not username or not password:
module.fail_json(msg = 'ERROR: Unable to get any Ravello credentials!')
try:
client = connect(username, password)
except Exception, e:
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.fail_json(msg = 'ERROR: Failed to authenticate to Ravello using Ravello SDK credentials cache %s' % e,stdout='%s' % log_contents)
state_arg = module.params.get('state')
if state_arg == 'design':
create_blueprint_from_template(client, module)
elif state_arg == 'present':
create_app_and_publish(client, module)
elif state_arg == 'absent':
action_on_app(module, client,
client.delete_application,
lambda: None, 'Deleted')
elif state_arg == 'started':
action_on_app(module, client,
client.start_application,
functools.partial(_wait_for_state,
client,'STARTED',module), 'Started')
elif state_arg == 'stopped':
action_on_app(module, client,
client.stop_application,
functools.partial(_wait_for_state,
client,'STOPPED',module), 'Stopped')
elif state_arg == 'list':
list_app(client, module)
elif state_arg == 'blueprint':
create_blueprint_from_existing_app(module,
client, client.create_blueprint)
elif state_arg == 'blueprint_delete':
action_on_blueprint(module, client,
client.delete_blueprint)
elif state_arg == 'blueprint_location':
action_on_blueprint(module, client,
client.get_blueprint_publish_locations)
elif state_arg == 'test':
module.exit_json(msg = 'Authentication to Ravello successful')
def _wait_for_state(client, state, module):
if module.params.get('wait') == False:
return
wait_timeout = module.params.get('wait_timeout')
app_id = 0
wait_till = time.time() + wait_timeout
while wait_till > time.time():
if app_id > 0:
app = client.get_application(app_id)
else:
app = client.get_application_by_name(module.params.get('app_name'))
app_id = app['id']
states = list(set((vm['state'] \
for vm in app.get('deployment', {}).get('vms', []))))
if "ERROR" in states:
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.fail_json(msg = 'Vm got ERROR state',stdout='%s' % log_contents)
if len(states) == 1 and states[0] == state:
return
time.sleep(10)
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.fail_json(msg = 'Timed out waiting for async operation to complete.',
stdout='%s' % log_contents)
def is_wait_for_external_service(supplied_service,module):
return supplied_service['name'].lower() == \
module.params.get('service_name').lower() and \
supplied_service['external'] == True
def get_list_app_vm_result(app, vm, module):
for supplied_service in vm['suppliedServices']:
if is_wait_for_external_service(supplied_service, module):
for network_connection in vm['networkConnections']:
if network_connection['ipConfig']['id'] == \
supplied_service['ipConfigLuid']:
dest = network_connection['ipConfig'].get('fqdn')
port = int(supplied_service['externalPort'].split(",")[0].split("-")[0])
return (dest,port)
def list_app(client, module):
try:
app_name = module.params.get("app_name")
app = client.get_application_by_name(app_name)
results = []
for vm in app['deployment']['vms']:
if vm['state'] != "STARTED":
continue
(dest,port) = get_list_app_vm_result(app, vm, module)
results.append({'host': dest, 'port': port})
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.exit_json(changed=True, app_name='%s' % app_name,
results='%s' % results,stdout='%s' % log_contents)
except Exception, e:
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.fail_json(msg = '%s' % e,stdout='%s' % log_contents)
def action_on_app(module, client, runner_func, waiter_func, action):
try:
app_name = module.params.get("app_name")
app = client.get_application_by_name(app_name)
runner_func(app['id'])
waiter_func()
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.exit_json(changed=True,
app_name='%s application: %s' %(action, app_name),
stdout='%s' % log_contents)
except Exception, e:
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.fail_json(msg = '%s' % e,stdout='%s' % log_contents)
def create_blueprint_from_existing_app(module, client, runner_func):
app_name = module.params.get("app_name")
app = client.get_application_by_name(app_name)
blueprint_name = module.params.get("blueprint_name")
blueprint_description = module.params.get("blueprint_description")
blueprint_dict = {"applicationId":app['id'],
"blueprintName":blueprint_name, "offline": True,
"description":blueprint_description }
try:
blueprint_id=((runner_func(blueprint_dict))['_href'].split('/'))[2]
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.exit_json(changed=True,
app_name='%s' % app_name,
blueprint_name='%s' % blueprint_name,
blueprint_id='%s' % blueprint_id)
except Exception, e:
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.fail_json(msg = '%s' % e,stdout='%s' % log_contents)
def action_on_blueprint(module, client, runner_func):
if module.params.get("blueprint_id"):
blueprint_id = module.params.get("blueprint_id")
elif module.params.get("blueprint_name"):
blueprint_name = module.params.get("blueprint_name")
blueprint_id = get_blueprint_id(blueprint_name, client)
try:
output = runner_func(blueprint_id)
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.exit_json(changed=True, stdout='%s' % log_contents,
blueprint_id='%s' % blueprint_id, output='%s' % output)
except Exception, e:
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.fail_json(msg = '%s' % e,stdout='%s' % log_contents)
def create_blueprint_from_template(client, module):
app_name = module.params.get("app_name")
# Assert app does not exist in ravello
cap = client.get_applications({'name': app_name})
if cap:
module.fail_json(msg='ERROR: Application %s already exists!' % \
app_name, changed=False)
# Assert blueprint does not exist in ravello
blueprint_name = app_name + "-bp"
bp = client.get_blueprints({'name': blueprint_name})
if bp:
module.fail_json(msg='ERROR: Blueprint %s already exists!' % \
blueprint_name, changed=False)
app_description = module.params.get("description")
# Open local app template
if not module.params.get("app_template"):
module.fail_json(msg='Must supply an app_template for design state.', \
changed=False)
app_template = module.params.get("app_template")
with open(app_template, 'r') as data:
try:
read_app = yaml.load(data)
except yaml.YAMLError as exc:
print(exc)
app_request = {}
# Create random name extension token for app
rand_str = lambda n: ''.join([random.choice(string.lowercase) for i in xrange(n)])
app_request['name'] = "tmp-app-build-" + rand_str(10)
if client.get_applications({'name': app_request ['name'] }):
module.fail_json(msg='ERROR: Temporary application build %s already exists!' % \
app_name, changed=False)
# initialize app
ravello_template_set(app_request, 'description', app_description)
ravello_template_set(app_request, 'design.vms', [])
# Check template is valid
for vm in read_app['vms']:
assert_vm_valid(client, module, vm)
app_request['design']['vms'].append(vm)
# Create the tmp-app in ravello
try:
created_app = client.create_application(app_request)
except Exception, e:
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.fail_json(msg = '%s' % e,stdout='%s' % log_contents,
jsonout='%s' % app_request)
appID = created_app['id']
blueprint_dict = {
"applicationId":appID,
"blueprintName":blueprint_name,
"offline": False,
"description":app_description
}
# Remove the Ravello auto-generated subnet
delete_autogenerated_subnet(client, module, appID)
# Generate subnets if they are defined in the template
# Otherwise generate subnets compatible with defined VM IPs
if check_for_param(read_app, 'network.subnets', required=False):
netlist = []
for subnet in read_app['network']['subnets']:
netlist.append(IPNetwork(subnet))
netlist = sorted(netlist)
for i in range(len(netlist) - 1):
if (not IPSet(netlist[i]).isdisjoint(IPSet(netlist[i + 1]))):
raise Exception('Overlapping Subnets')
else:
create_subnet_with_ip_pool(client, module, appID, netlist[i])
create_subnet_with_ip_pool(client, module, appID, netlist[len(netlist) - 1])
else:
detect_ips_and_and_create_compatible_subnets(client, module, appID, app_request)
# Get the ravello-assigned internal luids to fix assigned IPs and services
update_app_with_internal_luids(client, module, app_request, appID)
try:
# create bp from tmp-app and delete tmp-app
blueprint_id= \
((client.create_blueprint(blueprint_dict))['_href'].split('/'))[2]
client.delete_application(appID)
module.exit_json(changed=True, app_name='%s' % app_name,
blueprint_name='%s' % blueprint_name,
blueprint_id='%s' % blueprint_id)
except Exception, e:
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.fail_json(msg = '%s' % e,stdout='%s' % log_contents)
def create_app_and_publish(client, module):
#validation
if not module.params.get("blueprint_id"):
module.fail_json(msg='Must supply a blueprint_id', changed=False)
if 'performance' == module.params.get("publish_optimization"):
if not module.params.get("cloud"):
module.fail_json(msg=\
'Must supply a cloud when publish optimization is performance',
changed=False)
if not module.params.get("region"):
module.fail_json(msg=\
'Must supply a region when publish optimization is performance',
changed=False)
app = {
'name': module.params.get("app_name"),
'description': module.params.get("description",''),
'baseBlueprintId': module.params.get("blueprint_id")
}
app = client.create_application(app)
req = {}
if 'performance' == module.params.get("publish_optimization"):
req = {
'id': app['id'],
'preferredRegion': module.params.get("region"),
'optimizationLevel': 'PERFORMANCE_OPTIMIZED'
}
ttl=module.params.get("application_ttl")
if ttl != -1:
ttl =ttl * 60
exp_req = {'expirationFromNowSeconds': ttl}
client.set_application_expiration(app,exp_req)
client.publish_application(app, req)
set_cost_bucket(app['id'], 'application',
module.params.get('cost_bucket'), client)
get_vm_hostnames(app['id'], client, module)
_wait_for_state(client,'STARTED',module)
log_contents = log_capture_string.getvalue()
log_capture_string.close()
module.exit_json(changed=True,
app_name='%s' % module.params.get("app_name"),
stdout='%s' % log_contents,
app_id='%s' % app['id'])
def get_vm_hostnames(app_id, client, module):
published_app = client.get_application(app_id, aspect='deployment')
vm_hostname_dict = {}
for vm in ravello_template_get(published_app, 'deployment.vms'):
if len(vm['hostnames']) < 1:
module.fail_json(msg="Could not obtain vm hostname list from app." +
"VMs must contain at least one internal hostname.")
hostname = vm['hostnames'][0]
vm_hostname_dict[hostname] = {}
vm_hostname_dict[hostname]['internal'] = vm['hostnames']
vm_hostname_dict[hostname]['external'] = vm['externalFqdn']
# import module snippets
import ansible
import os
import functools
import logging
import io
import datetime
import sys
import yaml
import json
import re
from netaddr import *
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
log_capture_string = io.BytesIO()
from ansible.module_utils.basic import *
##### Template Error Checking Definitions #####
def assert_hd_valid(client, module, hd):
check_for_param(hd, 'index')
check_for_param(hd, 'type', valid=['DISK','CDROM'],
default_if_missing='DISK')
check_for_param(hd, 'controller', valid=['virtio', 'ide'],
default_if_missing='virtio')
check_for_param(hd, 'controller', valid=['virtio', 'ide'],
default_if_missing='virtio')
check_for_param(hd, 'boot', valid=[True, False],
default_if_missing='False')
check_for_param(hd, 'name',
default_if_missing=('Disk' + hd['name']))
check_for_param(hd, 'size.unit', valid=['GB', 'MB'],
default_if_missing='GB')
check_for_param(hd, 'size.value',
valid=(lambda n: ((n is int) and (n > 0))))
assert_hd_image_exists_in_ravello(client, module, hd)
return hd
def assert_hd_image_exists_in_ravello(client, module, hd):
# Check image name or ID exists in ravello
if 'baseDiskImageId' in hd:
image = get_diskimage(client, hd['baseDiskImageId'])
if image is None:
module.fail_json(msg=\
'FATAL ERROR nonexistent baseDiskImageId %s specified!'
% hd['baseDiskImageId'])
elif 'imageName' in hd:
image = get_diskimage(client, hd['imageName'])
if image is None:
module.fail_json(msg=\
'FATAL ERROR nonexistent imageName %s specified!'
% hd['imageName'])
if 'baseDiskImageId' in hd or 'imageName' in hd:
if hd['size']['value'] < image['size']['value']:
module.fail_json(msg=\
'ERROR HD size value (%s) is smaller than the image (%s)'
% (hd['size']['value'], image['size']['value']))
else:
hd['baseDiskImageId'] = image['id']
def assert_nic_valid(client, module, nic):
check_for_param(nic, 'ipConfig')
check_for_param(nic, 'device.index')
check_for_param(nic, 'device.name',
default_if_missing=('nic', nic['device']['index']))
check_for_param(nic, 'device.deviceType', valid=['virtio', 'e1000'],
default_if_missing='virtio')
check_for_param(nic, 'device.useAutomaticMac', valid=[True, False],
default_if_missing=True)
if not nic['device']['useAutomaticMac']:
check_for_param(nic, 'device.mac',
fail_msg='ERROR useAutomaticMac set to False but ' + \
'no static mac set for VM %s NIC index')
auto_ip = check_for_param(nic,
'ipConfig.autoIpConfig',
required=False)
static_ip = check_for_param(nic,
'ipConfig.staticIpConfig', required=False)
if static_ip:
check_for_param(nic, 'ipConfig.staticIpConfig.ip')
check_for_param(nic, 'ipConfig.staticIpConfig.mask')
if (auto_ip == static_ip):
module.fail_json(msg=\
'Error: exactly one of [autoIpConfig,staticIpConfig] required')
def assert_vm_valid(client, module, vm):
#* Set vm description if undef'd
if not 'description' in vm:
vm['description'] = ""
# Check Template Valid
check_for_param(vm, 'description',
default_if_missing="")
check_for_param(vm, 'numCpus')
check_for_param(vm, 'memorySize.value')
check_for_param(vm, 'memorySize.unit',
default_if_missing='GB')
check_for_param(vm, 'supportsCloudInit',
fail_msg='Error: Template must support cloudInit')
check_for_param(vm, 'keypairId')
check_for_param(vm, 'keypairName')
check_for_param(vm, 'userData')
check_for_param(vm, 'stopTimeout',
default_if_missing=300)
check_for_param(vm, 'bootOrder',
default_if_missing=['DISK', 'CDROM'])
check_for_param(vm, 'hardDrives')
check_for_param(vm, 'networkConnections')
check_for_param(vm, 'networkConnections')
# Set vm tag if def'd
if 'tag' in vm:
vm['description'] = vm['description'] + "\ntag:" + vm['tag'] + "\n"
# Set new_vm params
ravello_template_set(vm, 'baseVmId', 0)
ravello_template_set(vm, 'os', 'linux_manuel')
#* set hard drives
for hd in vm['hardDrives']:
hd = assert_hd_valid(client, module, hd)
#* set nics
for nic in vm['networkConnections']:
assert_nic_valid(client, module, nic)
if 'suppliedServices' in vm:
for svc in vm['suppliedServices']:
check_for_param(svc, 'name')
check_for_param(svc, 'ip')
check_for_param(svc, 'portRange')
# add vm to app
return vm
def create_subnet_with_ip_pool(client, module, appID, netip):
# create the vlan
created_app = client.get_application(appID)
check_for_param(created_app, 'design.network.switches',
default_if_missing=[])
new_switch_path = path_for_next_item(created_app, 'design.network.switches')
ravello_template_set(created_app,
new_switch_path + '.networkSegments.0.vlanId', 1)
client.update_application(created_app)
created_app = client.get_application(appID)
check_for_param(created_app, 'design.network.subnets',
default_if_missing=[])
new_subnet_path = path_for_next_item(created_app, 'design.network.subnets')
ravello_template_set(created_app, new_subnet_path + '.ipVersion', 'IPV4')
ravello_template_set(created_app, new_subnet_path + '.mask', str(netip.netmask))
ravello_template_set(created_app, new_subnet_path + '.net', str(netip[0]))
new_switch_network_segment_id = \
ravello_template_get(created_app,
new_switch_path + '.networkSegments.0.id')
ravello_template_set(created_app,
new_subnet_path + '.networkSegmentId',
new_switch_network_segment_id)
client.update_application(created_app)
created_app = client.get_application(appID)
check_for_param(created_app, 'design.network.services.networkInterfaces',
default_if_missing=[])
new_l3_nic_path = path_for_next_item(created_app,
'design.network.services.networkInterfaces')
ravello_template_set(created_app,
new_l3_nic_path + \
'.ipConfigurations.0.staticIpConfig',
{
'ip': str(netip[1]),
'mask': str(netip.netmask)
})
ravello_template_set(created_app,
new_l3_nic_path + \
'.ipConfigurations.1.staticIpConfig',
{
'ip': str(netip[2]),
'mask': str(netip.netmask)
})
client.update_application(created_app)
created_app = client.get_application(appID)
check_for_param(created_app,
'design.network.services.routers.0.ipConfigurationIds',
default_if_missing=[])
router_ip_config_ids = ravello_template_get(created_app,
'design.network.services.routers.0.ipConfigurationIds')
router_ip_config_ids.append(ravello_template_get(created_app,
new_l3_nic_path + '.ipConfigurations.1.id'))
if 'ports' not in ravello_template_get(created_app, new_switch_path):
ravello_template_set(created_app, new_switch_path + '.ports', [])
create_port_on_switch(created_app, new_switch_path,
ravello_template_get(created_app,
new_l3_nic_path + '.id'),
'SERVICES')
client.update_application(created_app)
created_app = client.get_application(appID)
check_for_param(created_app, 'design.network.services.dhcpServers',
default_if_missing=[])
new_dhcp_path = path_for_next_item(created_app,
'design.network.services.dhcpServers')
ravello_template_set(created_app, new_dhcp_path + '.mask', str(netip.netmask))
ravello_template_set(created_app, new_dhcp_path + '.poolStart', str(netip[0]))
ravello_template_set(created_app, new_dhcp_path + '.poolEnd', str(netip[-1]))
ravello_template_set(created_app, new_dhcp_path + '.ipConfigurationId',
ravello_template_get(created_app, new_l3_nic_path + '.ipConfigurations.0.id'))
ravello_template_set(created_app, new_dhcp_path + '.gatewayIpConfigurationId',
ravello_template_get(created_app, new_l3_nic_path + '.ipConfigurations.1.id'))
check_for_param(created_app,
'design.network.services.dnsServers.0.ipConfigurationIds',
default_if_missing=[])
dns_ip_config_ids = ravello_template_get(created_app,
'design.network.services.dnsServers.0.ipConfigurationIds')
dns_ip_config_ids.append(ravello_template_get(created_app,
new_l3_nic_path + '.ipConfigurations.0.id'))
ravello_template_set(created_app, new_dhcp_path + '.dnsIpConfigurationId',
ravello_template_get(created_app, new_l3_nic_path + '.ipConfigurations.0.id'))
client.update_application(created_app)
def delete_autogenerated_subnet(client, module, appID):
created_app = client.get_application(appID)
ravello_template_set(created_app, 'design.network.switches', [])
ravello_template_set(created_app, 'design.network.subnets', [])
ravello_template_set(created_app, 'design.network.services.networkInterfaces', [])
ravello_template_set(created_app, 'design.network.services.dhcpServers', [])
client.update_application(created_app)
def create_port_on_switch(created_app, switch_path, device_id, device_type):
port_path = path_for_next_item(created_app,
switch_path + '.ports')
#ravello_template_set(created_app, port_path, {})
ravello_template_set(created_app,
port_path + '.deviceId',
device_id)
ravello_template_set(created_app,
port_path + '.deviceType',
device_type)
ravello_template_set(created_app,
port_path + '.index',
int(port_path.split('.')[-1]) + 1)
ravello_template_set(created_app,
port_path + '.networkSegmentReferences.0.networkSegmentId',
ravello_template_get(created_app,
switch_path + '.networkSegments.0.id'))
ravello_template_set(created_app,
port_path + '.networkSegmentReferences.0.anyNetworksegment',
False)
ravello_template_set(created_app,
port_path + '.networkSegmentReferences.0.egressPolicy',
'UNTAGGED')
return
def path_for_next_item(app_json, jspath):
return jspath + '.' + str(len(ravello_template_get(app_json, jspath)))
def path_from_ip(created_app, path_map, ip_addr):
for net_block, path in path_map.iteritems():
if IPAddress(ip_addr) in IPNetwork(net_block):
return path
raise Exception('no subnet for ip: ' + ip_addr + '...' + json.dumps(path_map))
def create_dhcp_ip_map(created_app):
dhcp_servers = ravello_template_get(created_app,
'design.network.services.dhcpServers')
ip_index_map = {}
for i, dhcp in enumerate(dhcp_servers):
cidr_num = IPAddress(dhcp['mask']).netmask_bits()
net_block = dhcp['poolStart'] + '/' + str(cidr_num)
ip_index_map[net_block] = \
'design.network.services.dhcpServers.' + str(i)
return ip_index_map
def create_subnet_ip_map(created_app):
subnets = ravello_template_get(created_app,
'design.network.subnets')
ip_index_map = {}
for i, subnet in enumerate(subnets):
cidr_num = IPAddress(subnet['mask']).netmask_bits()
net_block = subnet['net'] + '/' + str(cidr_num)
ip_index_map[net_block] = \
'design.network.subnets.' + str(i)
return ip_index_map
def switch_path_from_ip(created_app, subnet_ip_map, ip_addr):
network_segment_id = ravello_template_get(created_app,
path_from_ip(created_app, subnet_ip_map, ip_addr) + '.networkSegmentId')
switches = ravello_template_get(created_app,
'design.network.switches')
for i, switch in enumerate(switches):
if switch['networkSegments'][0]['id'] == network_segment_id:
return 'design.network.switches.' + str(i)
raise Exception('Invalid network segment')
def json_path_list_append(json_item, jspath, value):
item_list = ravello_template_get(json_item, jspath)
item_list.append(value)
def update_app_with_internal_luids(client, module, app_request, appID):
# update vms with ravello auto-gen'd luids
created_app = client.get_application(appID)
reserved_entries = []
hostname_ip_mapping = {}
dhcp_ip_mapping = create_dhcp_ip_map(created_app)
original_subnet_config_ids = ravello_template_get(created_app,
'design.network.subnets.0.ipConfigurationIds')
original_switch_ports = ravello_template_get(created_app,
'design.network.switches.0.ports')
subnet_ip_mapping = create_subnet_ip_map(created_app)
for dhcp in created_app['design']['network']['services']['dhcpServers']:
if 'reservedIpEntries' not in dhcp:
dhcp['reservedIpEntries'] = []
for vm in app_request['design']['vms']:
hostname = vm['hostnames'][0]
hostname_ip_mapping[hostname] = {}
for nic in vm['networkConnections']:
if check_for_param(nic, 'ipConfig.autoIpConfig.reservedIp',
required=False):
hostname_ip_mapping[hostname][nic['name']] = \
nic['ipConfig']['autoIpConfig']['reservedIp']
elif check_for_param(nic, 'ipConfig.staticIpConfig.ip',
required=False):
hostname_ip_mapping[hostname][nic['name']] = \
nic['ipConfig']['staticIpConfig']['ip']
for vm in created_app['design']['vms']:
for nic in vm['networkConnections']:
nic_ipconf_id = nic['ipConfig']['id']
nic_id = nic['id']
nic_name = nic['name']
if nic_name in hostname_ip_mapping[vm['hostnames'][0]]:
nic_ip = hostname_ip_mapping[vm['hostnames'][0]][nic_name]
if 'autoIpConfig' in nic['ipConfig']:
item = {
'ipConfigurationId': nic_ipconf_id,
'ip': nic_ip
}
json_path_list_append(created_app,
path_from_ip(created_app,
dhcp_ip_mapping,
nic_ip) + '.reservedIpEntries',
item)
switch_path = switch_path_from_ip(created_app,
subnet_ip_mapping,
nic_ip)
subnet_ipconfig_path = path_from_ip(created_app,
subnet_ip_mapping,
nic_ip)
else:
switch_path = 'design.network.switches.0'
subnet_ipconfig_path = 'design.network.subnets.0'
json_path_list_append(created_app,
subnet_ipconfig_path + '.ipConfigurationIds',
nic_ipconf_id)
create_port_on_switch(created_app,
switch_path,
nic_id,
'VM')
if 'suppliedServices' in vm:
for svc in vm ['suppliedServices']:
svc['useLuidForIpConfig'] = True
svc['ipConfigLuid'] = nic_ipconf_id
client.update_application(created_app)
def detect_ips_and_and_create_compatible_subnets(client, module, appID, app_request):
net_list = []
for vm in app_request['design']['vms']:
for nic in vm['networkConnections']:
if check_for_param(nic, 'ipConfig.autoIpConfig.reservedIp', required=False):
ip = nic['ipConfig']['autoIpConfig']['reservedIp']
subnet_exists = False
for net in net_list:
if ip in net:
subnet_exists = True
if not subnet_exists:
new_net = IPNetwork(ip + '/16')
net_list.append(new_net)
elif check_for_param(nic, 'ipConfig.staticIpConfig.ip', required=False):
'.'.join(nic['ipConfig']['staticIpConfig']['ip'].split('.')[:-3])
for net in net_list:
create_subnet_with_ip_pool(client, module, appID, net)
##### Application Json Tools #####
def maybe_digit(item):
if (item.isdigit()):
return int(item)
else:
return item
def json_insert_head(json_slice, key, value):
if type(key) is int:
if len(json_slice) <= key:
json_slice.insert(key, value)
else:
json_slice[key] = value
else:
json_slice[key] = value
return json_slice
def ravello_template_set(json_slice, jspath_str, value):
jspath = re.split(r'(?<!\\)\.', jspath_str)
def recur (json_slice, jspath, value):
if len(jspath) > 1:
if not json_head_contains(json_slice, maybe_digit(jspath[0])):
if jspath[1].isdigit():
json_slice = json_insert_head(json_slice, maybe_digit(jspath[0]), [])
else:
json_slice = json_insert_head(json_slice, maybe_digit(jspath[0]), {})
json_insert_head(json_slice, maybe_digit(jspath[0]),
recur(json_slice[maybe_digit(jspath[0])],
jspath[1:], value))
elif len(jspath) == 1:
json_slice = json_insert_head(json_slice, maybe_digit(jspath[0]), value)
else:
raise Exception("Error: invalid json path string: " + jspath_str)
return json_slice
return recur(json_slice, jspath, value)
# return kwargs[k] if it exists,
# otherwise return default
def from_kwargs(kwargs, k, default):
if k in kwargs:
return kwargs[k]
elif type(default) is Exception:
raise default
else:
return default
def json_head_contains(json_item, key):
if json_item is None:
return False
if type(key) is int:
if len(json_item) <= key:
return False
else:
return True
else:
return (key in json_item)
def ravello_template_get(json_item, jspath_str, **kwargs):
jspath = re.split(r'(?<!\\)\.', jspath_str)
def recur(json_slice, jspath):
if len(jspath) > 1:
if not json_head_contains(json_slice, maybe_digit(jspath[0])):
raise Exception("error: invalid json_path string: " + jspath_str)
return recur(json_slice[maybe_digit(jspath[0])], jspath[1:])
elif len(jspath) == 1:
if not json_head_contains(json_slice, maybe_digit(jspath[0])):
raise Exception("error: invalid json_path string: " + jspath_str)
else:
return json_slice[maybe_digit(jspath[0])]
else:
raise exception("error: invalid json_path string: " + jspath_str)
return recur(json_item, jspath)
class ModuleFail:
def __init__(self):
self.module = None
def attach_ansible_modle(self, module):
self.module = module
def __call__(self, msg):
if (self.module == None):
raise Exception(msg)
else:
self.module.fail_json(msg=msg)
def check_for_param(json_item, jspath, **kwargs):
valid = from_kwargs(kwargs, 'valid_options', [])
fail_msg = from_kwargs(kwargs, 'fail_msg',
"Template Error: " + jspath + " - Missing or invalid." )
required = from_kwargs(kwargs, 'required', True)
if type(valid) is str:
valid = [valid]
if type(valid) is list:
valid_list = valid
valid = lambda val: val in valid_list
if not callable(valid):
raise Exception('Error: `valid` kwarg must of type string, list, or parity 1 function')
def recur(json_slice, jspath):
if type(jspath) is str:
jspath = re.split(r'(?<!\\)\.', jspath)
if len(jspath) > 1:
if not json_head_contains(json_slice, maybe_digit(jspath[0])):
if not required:
return False
if 'default_if_missing' in kwargs:
ravello_template_set(json_item, '.'.join(jspath), value)
module_fail(fail_msg)
return recur(json_slice[maybe_digit(jspath[0])], jspath[1:])
elif len(jspath) == 1:
if not json_head_contains(json_slice, maybe_digit(jspath[0])):
if not required:
return False
if 'default_if_missing' not in kwargs:
module_fail(fail_msg)
else:
json_insert_head(json_slice, maybe_digit(jspath[0]),
kwargs['default_if_missing'])
if 'valid' not in kwargs:
return True
else:
return valid(json_slice[maybe_digit(jspath[0])])
else:
raise Exception("Error: invalid json path string")
return recur(json_item, jspath)
module_fail = ModuleFail()
main()
|
dbenoit17/ansible_agnostic_deployer
|
ansible/library/ravello_module.py
|
Python
|
gpl-3.0
| 45,954
|
# Copyright Notice: This code is in Copyright. Any use leading to publication
# or financial gain is prohibited without the permission of the authors
# Simon O'Meara and # David Topping: simon.omeara@manchester.ac.uk. First
# published 2017.
# This file is part of box_model
# box_model is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# box_model is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with box_model (see COPYING.txt). If not, see
# <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------------------# function to track particle number size distribution using hybrid
# size structure (p. 420 Jacobson 2000).
import numpy as np
from full_stat import full_stat
from full_move import full_move
def hybrid_main(n0, s0, mnv0, msv0, rhonv, rhosv, sbn, Vref, Vsv0):
# ------------------------------------------------------------------
# input:
# n0 - initial particle number concentration per size bin
# (# particle m/3 (air))
# s0 - initial volume bounds per size bin (m^3) (1st dim.)
# mnv0 - initial nonvolatile particle phase mass per size bin
# (1st dim.) (g/m3 (air))
# msv0 - initial semi-volatile particle phase mass per size bin
# (1st dim.) (g/m3 (air))
# rho - particle phase component (2nd dim.) densities (g/m3),
# repeated across size bins (1st dim.)
# sbn - number of size bins
# Vref - reference volume of single particles in each fixed
# size bin (m3)
# Vsv0 - initial volumes of semi-volatile material per fully moving
# size bin (m3) (summed across all particles in that bin)
# output:
# n1 - end of time step particle number concentration per size bin
# (# particle/m3 (air))
# mnv1 - end of time step mass of involatile components per size bin
# (g/m3 (air))
# msv1 - end of time step mass of semi-volatile components per size bin
# (g/m3 (air))
# notes:
# core (involatile part) of particles treated with full-stationary
# whilst shell (volatile part) treated with full-moving. Numerical
# diffusion a disadvantage. The size bin order of sv corresponds to
# that of the nv, so that mnv1 in bin #n has the msv1 in bin #n
# condensed onto it
# ------------------------------------------------------------------
# call on full-stationary method to find the number concentration of
# particles per size bin (/m3(air)) and mass concentration
# (g/m3 (air)) of involatile components per size bin
[nnv1, mnv1] = full_stat(n0, s0, mnv0, rhonv, Vref)
# call on full-moving method to calculate average semi-volatile volume
# per particle per size bin (m3(particle)/m3(air)) and average
# mass of semi-volatile per particle per size bin
# (g(condensed phase)/m3(air)). The size bin order of sv is aligned
# with that of the nv, so that mnv1 in bin #n has the msv1 in bin #n
# condensed onto it
[msv1, nsv1] = full_move(n1, msv0, rhosv)
return n1, mnv1, msv1
|
simonom/box-model
|
hybr_stru.py
|
Python
|
gpl-3.0
| 3,343
|
# -*- coding: utf-8 -*-
#from odoo import api, fields, models
#from datetime import datetime, timedelta
#class PurchaseOrder(models.Model):
# _inherit = "purchase.order"
# mainmenu_id = fields.Char('menu')
# gourmet_report = fields.Text(string='Tabelog')
# companion_id = fields.Many2one('res.users', string='companion')
# date_report = fields.Date(string='Report Date', required=True, readonly=Tru$
# val_del = fields.Boolean(string='Delicious?')
# price_menu = fields.Integer(string='price', default=0)
# image_main = fields.Binary(string='picture', attachment=True,)
# @api.multi
# def function_a(self):
#
# self.ensure_one()
# self.gourmet_report = "You push down the function A button!!"
# return
# @api.multi
# def function_b(self):
# self.ensure_one()
# self.price_menu = 2000
# return
# @api.multi
# def function_c(self):
# self.write({'test':'Can you see?'})
# return
|
ichi23de5/ichi_Repo
|
purchases_test/models/p_test.py
|
Python
|
gpl-3.0
| 986
|
r = "N"
while r != "S":
a = int(input("Digite a quantidade de votos do candidato A: "))
b = int(input("Digite a quantidade de votos do candidato B: "))
c = int(input("Digite a quantidade de votos do candidato C: "))
r = str(input("Para sair Precione [S]. ")).upper()
if a > b and a > c:
print("O candidato \033[30;42mA\033[m Venceu com {} votos.".format(a))
if b > a and b > c:
print("O candidado \033[30;42mB\033[m venceu com {} votos.".format(b))
if c > a and c > b:
print("O candidato \033[30;42mC\033[m venceu com {} votos.".format(c))
print("Fim da eleição.")
|
ronas/PythonGNF
|
Ronaldo/ex004.py
|
Python
|
gpl-3.0
| 632
|
import threading
from sqlalchemy import Column, String, func, distinct
from tg_bot.modules.sql import BASE, SESSION
class GroupLogs(BASE):
__tablename__ = "log_channels"
chat_id = Column(String(14), primary_key=True)
log_channel = Column(String(14), nullable=False)
def __init__(self, chat_id, log_channel):
self.chat_id = str(chat_id)
self.log_channel = str(log_channel)
GroupLogs.__table__.create(checkfirst=True)
LOGS_INSERTION_LOCK = threading.RLock()
CHANNELS = {}
def set_chat_log_channel(chat_id, log_channel):
with LOGS_INSERTION_LOCK:
res = SESSION.query(GroupLogs).get(str(chat_id))
if res:
res.log_channel = log_channel
else:
res = GroupLogs(chat_id, log_channel)
SESSION.add(res)
CHANNELS[str(chat_id)] = log_channel
SESSION.commit()
def get_chat_log_channel(chat_id):
return CHANNELS.get(str(chat_id))
def stop_chat_logging(chat_id):
with LOGS_INSERTION_LOCK:
res = SESSION.query(GroupLogs).get(str(chat_id))
if res:
if str(chat_id) in CHANNELS:
del CHANNELS[str(chat_id)]
log_channel = res.log_channel
SESSION.delete(res)
SESSION.commit()
return log_channel
def num_logchannels():
try:
return SESSION.query(func.count(distinct(GroupLogs.chat_id))).scalar()
finally:
SESSION.close()
def migrate_chat(old_chat_id, new_chat_id):
with LOGS_INSERTION_LOCK:
chat = SESSION.query(GroupLogs).get(str(old_chat_id))
if chat:
chat.chat_id = str(new_chat_id)
SESSION.add(chat)
if str(old_chat_id) in CHANNELS:
CHANNELS[str(new_chat_id)] = CHANNELS.get(str(old_chat_id))
SESSION.commit()
def __load_log_channels():
global CHANNELS
try:
all_chats = SESSION.query(GroupLogs).all()
CHANNELS = {chat.chat_id: chat.log_channel for chat in all_chats}
finally:
SESSION.close()
__load_log_channels()
|
PaulSonOfLars/tgbot
|
tg_bot/modules/sql/log_channel_sql.py
|
Python
|
gpl-3.0
| 2,071
|
from __future__ import print_function
import logging
#Full joinmarket uses its own bitcoin module;
#other implementations (like wallet plugins)
#can optionally include their own, which must
#be implemented as an interface in btc.py
from btc import *
from .support import get_log, calc_cj_fee, debug_dump_object, \
choose_sweep_orders, choose_orders, \
pick_order, cheapest_order_choose, weighted_order_choose, \
rand_norm_array, rand_pow_array, rand_exp_array, joinmarket_alert, core_alert
from .jsonrpc import JsonRpcError, JsonRpcConnectionError, JsonRpc
from .old_mnemonic import mn_decode, mn_encode
from .slowaes import decryptData, encryptData
from .wallet import AbstractWallet, BitcoinCoreInterface, Wallet, \
BitcoinCoreWallet, ElectrumWrapWallet
from .configure import load_program_config, jm_single, get_p2pk_vbyte, \
get_network, jm_single, get_network, validate_address
from .blockchaininterface import BlockrInterface, BlockchainInterface
from .irc import random_nick, IRCMessageChannel
from .taker import Taker
# Set default logging handler to avoid "No handler found" warnings.
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
|
AdamISZ/joinmarket_core
|
joinmarket_core/__init__.py
|
Python
|
gpl-3.0
| 1,324
|
"""Write initial TrueGrid files for one Sandia blade station.
Usage
-----
start an IPython (qt)console with the pylab flag:
$ ipython qtconsole --pylab
or
$ ipython --pylab
Then, from the prompt, run this script:
|> %run sandia_blade_lib/prep_stnXX_mesh.py
or
|> import sandia_blade_lib/prep_stnXX_mesh
Author: Perry Roth-Johnson
Last updated: April 10, 2014
"""
import matplotlib.pyplot as plt
import lib.blade as bl
import lib.poly_utils as pu
from shapely.geometry import Polygon
# SET THESE PARAMETERS -----------------
station_num = 4
# --------------------------------------
plt.close('all')
# load the Sandia blade
m = bl.MonoplaneBlade('Sandia blade SNL100-00', 'sandia_blade')
# pre-process the station dimensions
station = m.list_of_stations[station_num-1]
station.airfoil.create_polygon()
station.structure.create_all_layers()
station.structure.save_all_layer_edges()
station.structure.write_all_part_polygons()
# plot the parts
station.plot_parts()
# access the structure for this station
st = station.structure
# upper spar cap -----------------------------------------------------------
label = 'upper spar cap'
# create the bounding polygon
points_usc = [
(-0.75, 2.5),
( 0.75, 2.5),
( 0.75, 3.0),
(-0.75, 3.0)
]
bounding_polygon = Polygon(points_usc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.root_buildup, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# lower spar cap -----------------------------------------------------------
label = 'lower spar cap'
# create the bounding polygon
points_lsc = [
(-0.75,-3.0),
( 0.75,-3.0),
( 0.75,-2.5),
(-0.75,-2.5)
]
bounding_polygon = Polygon(points_lsc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.root_buildup, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# TE reinforcement ---------------------------------------------------------
label = 'TE reinforcement'
# create the bounding polygon
points_te = [
(1.84700000, 1.99063191),
(1.95, 2.1),
(3.0, 2.0),
(3.0,-2.0),
(1.95,-2.1),
(1.84700000, -1.99063191)
]
bounding_polygon = Polygon(points_te)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.root_buildup, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# LE -----------------------------------------------------------------------
label = 'LE'
# create the bounding polygon
points_le = [
(-3.00,-3.0),
(-0.75,-3.0),
(-0.75000000, -2.59408279),
(-0.76000000, -2.58050614),
(-0.76000000, 2.58050614),
(-0.75000000, 2.59408279),
(-0.75, 3.0),
(-3.00, 3.0)
]
bounding_polygon = Polygon(points_le)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.root_buildup, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# upper right --------------------------------------------------------------
label = 'upper right'
# create the bounding polygon
points_ur = [
(0.75, 2.8),
(2.0, 2.8),
(1.95, 2.1),
points_te[0],
(1.83701396, 1.98617431),
(0.8, 2.0),
(0.76000000, 2.58050614),
(0.75000000, 2.59408279)
]
bounding_polygon = Polygon(points_ur)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.root_buildup, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# lower right --------------------------------------------------------------
label = 'lower right'
# create the bounding polygon
points_lr = [
(0.75, -2.8),
(2.0, -2.8),
(1.95, -2.1),
points_te[-1],
(1.83701396, -1.98617431),
(0.8, -2.0),
(0.76000000, -2.58050614),
(0.75000000, -2.59408279)
]
bounding_polygon = Polygon(points_lr)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.root_buildup, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# show the plot
plt.show()
# write the TrueGrid input file for mesh generation ---------------------
st.write_truegrid_inputfile(
interrupt_flag=True,
additional_layers=[
st.spar_cap.layer['upper'],
st.spar_cap.layer['lower'],
st.TE_reinforcement.layer['uniax']
])
|
perryjohnson/biplaneblade
|
sandia_blade_lib/prep_stn04_mesh.py
|
Python
|
gpl-3.0
| 6,804
|
#!/usr/bin/env python
"""
@file vissim_parseBusStops.py
@author Daniel Krajzewicz
@date 2009-05-27
@version $Id: vissim_parseBusStops.py 11671 2012-01-07 20:14:30Z behrisch $
Parses bus stops and bus routes given in the Vissim file (first parameter).
The read bus lines are saved as <OUTPUT_PREFIX>_busses.rou.xml
The read routes are saved as <OUTPUT_PREFIX>_stops.add.xml
(Starting?) edges of the route may be renamed by setting them within "edgemap"
variable (see below).
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2009-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
edgemap = {}
edgemap["203"] = "203[0]"
edgemap["78"] = "78[0]"
edgemap["77"] = "77[0]"
edgemap["203"] = "203[0]"
import sys
from random import *
def getName(vals, beg):
name = vals[beg]
while name.count('"')!=2:
beg = beg + 1
name = name + " " + vals[beg]
return name.replace('"', '')
def parseBusStop(bs):
vals = bs.split()
id = int(vals[1])
i = vals.index("NAME")
name = getName(vals, i+1)
i = vals.index("STRECKE", i)
strecke = vals[i+1]
i = vals.index("SPUR", i)
spur = int(vals[i+1]) - 1
i = vals.index("BEI", i)
von = float(vals[i+1])
i = vals.index("LAENGE", i)
bis = von + float(vals[i+1])
return (id, name, strecke, spur, von, bis)
def parseBusRoute(br, stops):
vals = br.split()
id = vals[1]
i = vals.index("NAME")
name = getName(vals, i+1)
i = vals.index("EINFAHRT", i)
startKante = vals[i+2]
i = vals.index("ZIEL", i)
ziel = vals[i+2]
zeiten = []
endI = vals.index("HALTESTELLE", i)
i = vals.index("STARTZEITEN", i)
i = i + 1
while i>0 and i<endI:
zeiten.append(int(float(vals[i])))
i = i + 5
stops = []
while i>0 and i<len(vals):
try:
i = vals.index("HALTESTELLE", i)
i = i + 1
stops.insert(0, int(vals[i]))
except:
i = len(vals) + 1
return (id, name, startKante, ziel, zeiten, stops)
def sorter(idx):
def t(i, j):
if i[idx] < j[idx]:
return -1
elif i[idx] > j[idx]:
return 1
else:
return 0
if len(sys.argv) < 3:
print "Usage: " + sys.argv[0] + " <VISSIM_NETWORK> <OUTPUT_PREFIX>"
sys.exit()
print "Parsing Vissim input..."
fd = open(sys.argv[1])
haveStop = False
haveRoute = False
currentItem = ""
stopsL = []
routesL = []
for line in fd:
# process bus stops ("HALTESTELLE")
if line.find("HALTESTELLE")==0:
if haveStop:
stopsL.append(" ".join(currentItem.split()))
haveStop = True
currentItem = ""
elif line[0]!=' ':
if haveStop:
stopsL.append(" ".join(currentItem.split()))
haveStop = False
if haveStop:
currentItem = currentItem + line
# process bus routes ("LINIE")
if line.find(" LINIE")==0:
if haveRoute:
routesL.append(" ".join(currentItem.split()))
haveRoute = True
currentItem = ""
elif len(line)>2 and line[0]!=' ' and line[1]!=' ':
if haveRoute:
routesL.append(" ".join(currentItem.split()))
haveRoute = False
if haveRoute:
currentItem = currentItem + line
# build stops map
sm = {}
for bs in stopsL:
(id, name, strecke, spur, von, bis) = parseBusStop(bs)
sm[id] = (id, name, strecke, spur, von, bis)
# process bus routes
# build departure times
emissions = []
for br in routesL:
(pid, name, startKante, ziel, zeiten, stops) = parseBusRoute(br, sm)
edges = []
edges.append(startKante)
for s in stops:
if sm[s][2] not in edges:
edges.append(sm[s][2])
if ziel not in edges:
edges.append(ziel)
for i in range(0, len(edges)):
if edges[i] in edgemap:
edges[i] = edgemap[edges[i]]
for t in zeiten:
id = str(pid) + "_" + str(t)
emissions.append( ( int(t), id, edges, stops ) )
# sort emissions
print "Sorting routes..."
emissions.sort(sorter(0))
# write routes
print "Writing bus routes..."
fdo = open(sys.argv[2] + "_busses.rou.xml", "w")
fdo.write("<routes>\n")
for emission in emissions:
if len(emission[2])<2:
continue;
fdo.write(' <vehicle id="' + emission[1] + '" depart="' + str(emission[0]) + '" type="bus" color="0,1,0"><route>' + " ".join(emission[2]) + '</route>\n');
for s in emission[3]:
fdo.write(' <stop bus_stop="' + str(s) + '_0" duration="20"/>\n')
fdo.write(' </vehicle>\n');
fdo.write("</routes>\n")
# process bus stops
print "Writing bus stops..."
fdo = open(sys.argv[2] + "_stops.add.xml", "w")
fdo.write("<add>\n")
for bs in stopsL:
(id, name, strecke, spur, von, bis) = parseBusStop(bs)
fdo.write(' <busStop id="' + str(id) + '" lane="' + strecke + "_" + str(spur) + '" from="' + str(von) + '" to="' + str(bis) + '" lines="--"/>\n')
fdo.write("</add>\n")
fdo.close()
|
smendez-hi/SUMO-hib
|
tools/import/vissim/vissim_parseBusStops.py
|
Python
|
gpl-3.0
| 5,021
|
"""
class Esex
"""
ESODO, EKSODO, ESPAG, EKSPAG = 1, 2, 3, 4
TYPEE = {ESODO: 'Έσοδα', EKSODO: 'Έξοδα',
ESPAG: 'Αγορές παγίων', EKSPAG: 'Πωλήσεις παγίων'}
ELLADA, ENDOK, EKSOTERIKO = 10, 20, 30
TYPXO = {ELLADA: 'Εσωτερικού', ENDOK: 'Ενδοκοινοτικό', EKSOTERIKO: 'Εξωτερικό'}
class LineType:
"""Τύπος αναλυτικής γραμμής"""
def __init__(self, per, pfpa=0):
self._per = per
self._pfpa = pfpa
def pfpa(self):
"""Μετατροπή του ΦΠΑ σε δεκαδικό"""
return self._pfpa / 100.0
def per(self):
"""Περιγραφή"""
return self._per
class EsexLine:
"""Αναλυτική γραμμή εσόδων εξόδων"""
def __init__(self, ltype, val=0, fpa=0, tot=0):
self.line_type = ltype
if val != 0:
self.calc_from_val(val)
elif fpa != 0:
self.calc_from_fpa(fpa)
elif tot != 0:
self.calc_from_tot(tot)
else:
self._val = 0
self._fpa = 0
self._tot = 0
def val(self):
"""Αξία"""
return self._val
def fpa(self):
"""ΦΠΑ"""
return self._fpa
def tot(self):
"""Συνολική αξία"""
return self._tot
def per(self):
"""Περιγραφή γραμμής"""
return self.line_type.per()
def check(self):
"""Έλεγχοι για ακεραιότητα δεδομένων"""
if self.line_type.pfpa() == 0:
assert self._fpa == 0
assert self._fpa == self._val * self.line_type.pfpa()
def calc_from_val(self, val):
"""Υπολογισμός φπα, συνόλου από αξία"""
self._val = val
self._fpa = self.line_type.pfpa() * val
self._tot = self._val + self._fpa
def calc_from_fpa(self, fpa):
"""Υπολογισμός αξίας, συνόλου από φπα"""
self._val = fpa / self.line_type.pfpa()
self._fpa = fpa
self._tot = self._val + self._fpa
def calc_from_tot(self, tot):
"""Υπολογισμός αξίας, φπα από συνολική αξία"""
self._val = tot / (1.0 + self.line_type.pfpa())
self._fpa = tot - self._val
self._tot = tot
class Esex:
"""Έσοδα-έξοδα"""
def __init__(self, dat, par, typ=ESODO, xora=ELLADA, nor=True):
self._typ = typ # Έσοδο ή έξοδο
self._nor = nor # Αν True normal διαφορετικά credit
self._xora = xora
self._ypma = 1 # Κεντρικό παράρτημα
self._dat = dat # Ημερομηνία εγγραφής
self._par = par # Παραστατικό
self._lines = []
self.tval = 0
self.tfpa = 0
self.ttot = 0
def normal(self):
if self._nor:
return 'Normal'
return 'Credit'
def new_line(self, line):
"""Νέα γραμμή"""
self._lines.append(line)
self.tval += line.val()
self.tfpa += line.fpa()
self.ttot += line.tot()
def __repr__(self):
tmpl = '%-40s %12s %10s %13s\n'
ast = 'Έσοδα-έξοδα\n'
ast += '%s\n' % TYPEE[self._typ]
for lin in self._lines:
ast += tmpl % (lin.per(), lin.val(), lin.fpa(), lin.tot())
ast += tmpl % ('Σύνολα', self.tval, self.tfpa, self.ttot)
return ast
if __name__ == '__main__':
pe24 = LineType('Πωλήσεις εμπορευμάτων 24%', 24.0)
pe13 = LineType('Πωλήσεις εμπορευμάτων 13%', 13.0)
ese = Esex('2017-01-01', 'ΤΔΑ23')
ese.new_line(EsexLine(pe24, 100))
ese.new_line(EsexLine(pe13, 100))
print(ese)
|
tedlaz/pyted
|
esex17/ee.py
|
Python
|
gpl-3.0
| 3,878
|
import core
from core.urls import urls_base
from django.contrib import admin
from django.urls import include, path
from django.conf.urls.i18n import i18n_patterns
from django.contrib.sitemaps.views import sitemap
from .sitemaps import RootSitemap
from .api.urls import urlpatterns as api_urls
from .views import (
Index,
FAQ,
Contacts,
Students,
CourseView,
PayView,
Agreement,
TestCallback,
)
courses_urls = ([
path('<slug>', CourseView.as_view(), name="course"),
# path(r'^$', include('core.urls', namespace='core')),
# path(r'^articles/', include('articles.urls', namespace='articles')),
], 'courses')
# Error handlers
#
# https://docs.djangoproject.com/en/dev/ref/urls/#django.conf.urls.handler400
#
# def err404(request):
# if request.method == 'GET':
# return HttpResponseNotFound(render_to_string('404.html', locals()))
# else:
# return HttpResponseNotFound('404')
# def err500(request):
# if request.method == 'GET':
# return HttpResponseNotFound(render_to_string('500.html', locals()))
# else:
# return HttpResponseNotFound('500')
# handler404 = 'proj.urls.err404'
# handler500 = 'pashinin.urls.err500'
# handler404 = IndexView.as_view()
sitemaps = {
'static': RootSitemap,
}
urlpatterns = [
*core.urls.urlpatterns,
path('accounts/', include('allauth.urls')),
# path('accounts/vk/login/callback/', TestCallback.as_view(), name='cb'),
path(
'sitemap.xml',
sitemap,
{'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'
),
path('articles/', include('articles.urls', namespace='articles')),
path('api/', include(api_urls, namespace='api')),
path('courses/', include(courses_urls, namespace='courses')),
]
urlpatterns += i18n_patterns(
path('', Index.as_view(), name="index"),
# path(r'^$', Index.as_view(), name="lazysignup_convert"),
path('pay', PayView.as_view(), name='pay'),
path('contacts', Contacts.as_view(), name='contacts'),
path('students', Students.as_view(), name='students'),
path('faq', FAQ.as_view(), name="faq"),
path('agreement', Agreement.as_view(), name="agreement"),
# path('login', Login.as_view(), name='login'),
# This really needs to be here, not just in 'core' urls.py.
# Because admin templates are getting reverse urls with "admin:..."
# So if you wrap admin inside some app - reverse will throw an error
# path('_/django/', admin.site.urls),
prefix_default_language=False
)
# urlpatterns += [
# path(r'^cas/', include('mama_cas.urls'))
# ]
|
pashinin-com/pashinin.com
|
src/pashinin/urls.py
|
Python
|
gpl-3.0
| 2,618
|
from .function_button_model import FunctionButtonModel
from .default_path_model import DefaultPathModel
from .default_boolean_model import DefaultBooleanModel
from .string_model import StringModel
|
iLoop2/ResInsight
|
ThirdParty/Ert/devel/python/python/ert_gui/models/mixins/connectorless/__init__.py
|
Python
|
gpl-3.0
| 197
|
#!/usr/bin/python2
import xbmc
import os
import time
import urllib
import urllib2
import json
import xml.etree.ElementTree as ET
import hashlib
import re
from globals import ADDON_PATH_PROFILE, LOG_LEVEL
TAG = 'ESPN3 util: '
def is_file_valid(cache_file, timeout):
if os.path.isfile(cache_file):
modified_time = os.path.getmtime(cache_file)
current_time = time.time()
return current_time - modified_time < timeout
return False
def fetch_file(url, cache_file):
urllib.urlretrieve(url, cache_file)
def load_file(cache_file):
return open(cache_file, mode='r')
def get_url_as_xml_soup_cache(url, cache_file = None, timeout = 300):
if cache_file is None:
cache_file = hashlib.sha224(url).hexdigest()
cache_file = os.path.join(ADDON_PATH_PROFILE, cache_file + '.xml')
if not is_file_valid(cache_file, timeout):
xbmc.log(TAG + 'Fetching config file %s from %s' % (cache_file, url), LOG_LEVEL)
fetch_file(url, cache_file)
else:
xbmc.log(TAG + 'Using cache %s for %s' % (cache_file, url), LOG_LEVEL)
xml_file = open(cache_file)
xml_data = xml_file.read()
xml_file.close()
return load_element_tree(xml_data)
def get_url_as_xml_soup(url):
config_data = urllib2.urlopen(url).read()
return load_element_tree(config_data)
# ESPN files are in iso-8859-1 and sometimes do not have the xml preamble
def load_element_tree(data):
try:
parser = ET.XMLParser(encoding='iso-8859-1')
data_tree = ET.fromstring(data, parser)
except:
if '<?xml version' not in data:
xbmc.log(TAG + 'Fixing up data because of no xml preamble', LOG_LEVEL)
try:
data_tree = ET.fromstring('<?xml version="1.0" encoding="ISO-8859-1" ?>' + data)
except:
try:
data_tree = ET.fromstring('<?xml version="1.0" encoding="windows-1252" ?>' + data)
except:
# One last chance to fix up the data
xbmc.log(TAG + 'removing invalid xml characters', LOG_LEVEL)
data = re.sub('[\\x00-\\x1f]', '', data)
data = re.sub('[\\x7f-\\x9f]', '', data)
data_tree = ET.fromstring('<?xml version="1.0" encoding="ISO-8859-1" ?>' + data)
else:
data_tree = ET.fromstring(data)
return data_tree
def get_url_as_json(url):
response = urllib2.urlopen(url)
return json.load(response)
def get_url_as_json_cache(url, cache_file = None, timeout = 300):
if cache_file is None:
cache_file = hashlib.sha224(url).hexdigest()
cache_file = os.path.join(ADDON_PATH_PROFILE, cache_file + '.json')
if not is_file_valid(cache_file, timeout):
xbmc.log(TAG + 'Fetching config file %s from %s' % (cache_file, url), LOG_LEVEL)
fetch_file(url, cache_file)
else:
xbmc.log(TAG + 'Using cache %s for %s' % (cache_file, url), LOG_LEVEL)
json_file = open(cache_file)
json_data = json_file.read()
json_file.close()
if json_data.startswith('ud='):
json_data = json_data.replace('ud=', '')
json_data = json_data.replace('\'', '"')
return json.loads(json_data)
# espn.page.loadSportPage('url');
# -> url
def parse_url_from_method(method):
http_start = method.find('http')
end = method.find('\')')
return method[http_start:end]
# espn.page.loadMore('loadMoreLiveAndUpcoming', 'nav-0', 'url')
def parse_method_call(method):
p = re.compile('([\\w\\.:/&\\?=%,-]{2,})')
return p.findall(method)
|
locomot1f/plugin.video.espn_3
|
resources/lib/util.py
|
Python
|
gpl-3.0
| 3,590
|
"""
An integer N is given, representing the area of some rectangle.
The area of a rectangle whose sides are of length A and B is A * B,
and the perimeter is 2 * (A + B).
The goal is to find the minimal perimeter of any rectangle whose area equals N.
The sides of this rectangle should be only integers.
For example, given integer N = 30, rectangles of area 30 are:
(1, 30), with a perimeter of 62,
(2, 15), with a perimeter of 34,
(3, 10), with a perimeter of 26,
(5, 6), with a perimeter of 22.
Write a function:
def solution(N)
that, given an integer N, returns the minimal perimeter of any rectangle
whose area is exactly equal to N.
For example, given an integer N = 30, the function should return 22, as explained above.
Assume that:
N is an integer within the range [1..1,000,000,000].
Complexity:
expected worst-case time complexity is O(sqrt(N));
expected worst-case space complexity is O(1).
"""
def solution(N):
result = 0
if N > 0:
i = 1
perimeters = []
while i * i <= N:
if N % i == 0:
perimeters.append(2 * (i + (N // i)))
i = i + 1
continue
i += 1
result = min(perimeters)
return result
|
Dineshkarthik/codility_training
|
Lesson 10 - Prime and composite numbers/min_perimeter_rectangle.py
|
Python
|
gpl-3.0
| 1,229
|
import collections
import typing
from . import _inspect, _iterate, _update
from . import duration as _duration
from . import enums as _enums
from . import format as _format
from . import indicators as _indicators
from . import iterate as iterate_
from . import markups as _markups
from . import parentage as _parentage
from . import pcollections as _pcollections
from . import pitch as _pitch
from . import score as _score
from . import select as _select
from . import tag as _tag
from . import timespan as _timespan
from . import typings as _typings
def after_grace_container(argument):
r"""
Gets after grace containers attached to component.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... container = abjad.get.after_grace_container(component)
... print(f"{repr(component):30} {repr(container)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") None
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') None
Note("c'4") None
BeforeGraceContainer("cs'16") None
Note("cs'16") None
Note("d'4") None
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") None
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") None
Chord("<e' g'>16") None
Note("gs'16") None
Note("a'16") None
Note("as'16") None
Voice("e'4", name='Music_Voice') None
Note("e'4") None
Note("f'4") AfterGraceContainer("fs'16")
AfterGraceContainer("fs'16") None
Note("fs'16") None
"""
return getattr(argument, "_after_grace_container", None)
def annotation(
argument,
annotation: typing.Any,
default: typing.Any = None,
unwrap: bool = True,
) -> typing.Any:
r"""
Gets annotation.
.. container:: example
>>> staff = abjad.Staff("c'4 e' e' f'")
>>> abjad.annotate(staff[0], 'default_instrument', abjad.Cello())
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'4
e'4
e'4
f'4
}
>>> string = 'default_instrument'
>>> abjad.get.annotation(staff[0], string)
Cello()
>>> abjad.get.annotation(staff[1], string) is None
True
>>> abjad.get.annotation(staff[2], string) is None
True
>>> abjad.get.annotation(staff[3], string) is None
True
Returns default when no annotation is found:
>>> abjad.get.annotation(staff[3], string, abjad.Violin())
Violin()
.. container:: example
REGRESSION: annotation is not picked up as effective indicator:
>>> prototype = abjad.Instrument
>>> abjad.get.effective(staff[0], prototype) is None
True
>>> abjad.get.effective(staff[1], prototype) is None
True
>>> abjad.get.effective(staff[2], prototype) is None
True
>>> abjad.get.effective(staff[3], prototype) is None
True
"""
return _inspect._get_annotation(
argument, annotation, default=default, unwrap=unwrap
)
def annotation_wrappers(argument):
r"""
Gets annotation wrappers.
.. container:: example
>>> staff = abjad.Staff("c'4 e' e' f'")
>>> abjad.annotate(staff[0], 'default_instrument', abjad.Cello())
>>> abjad.annotate(staff[0], 'default_clef', abjad.Clef('tenor'))
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'4
e'4
e'4
f'4
}
>>> for wrapper in abjad.get.annotation_wrappers(staff[0]): wrapper
Wrapper(annotation='default_instrument', context=None, deactivate=False, indicator=Cello(), synthetic_offset=None, tag=Tag())
Wrapper(annotation='default_clef', context=None, deactivate=False, indicator=Clef(name='tenor', hide=False), synthetic_offset=None, tag=Tag())
"""
return _inspect._get_annotation_wrappers(argument)
def bar_line_crossing(argument) -> bool:
r"""
Is true when ``argument`` crosses bar line.
.. container:: example
>>> staff = abjad.Staff("c'4 d'4 e'4")
>>> time_signature = abjad.TimeSignature((3, 8))
>>> abjad.attach(time_signature, staff[0])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\time 3/8
c'4
d'4
e'4
}
>>> for note in staff:
... result = abjad.get.bar_line_crossing(note)
... print(note, result)
...
c'4 False
d'4 True
e'4 False
"""
if not isinstance(argument, _score.Component):
raise Exception("can only get indicator on component.")
time_signature = _inspect._get_effective(argument, _indicators.TimeSignature)
if time_signature is None:
time_signature_duration = _duration.Duration(4, 4)
else:
time_signature_duration = time_signature.duration
partial = getattr(time_signature, "partial", 0)
partial = partial or 0
start_offset = timespan(argument).start_offset
shifted_start = start_offset - partial
shifted_start %= time_signature_duration
stop_offset = argument._get_duration() + shifted_start
if time_signature_duration < stop_offset:
return True
return False
def before_grace_container(argument):
r"""
Gets before-grace container attached to leaf.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... container = abjad.get.before_grace_container(component)
... print(f"{repr(component):30} {repr(container)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") None
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') None
Note("c'4") None
BeforeGraceContainer("cs'16") None
Note("cs'16") None
Note("d'4") BeforeGraceContainer("cs'16")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") None
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") None
Chord("<e' g'>16") None
Note("gs'16") None
Note("a'16") None
Note("as'16") None
Voice("e'4", name='Music_Voice') None
Note("e'4") None
Note("f'4") None
AfterGraceContainer("fs'16") None
Note("fs'16") None
"""
return getattr(argument, "_before_grace_container", None)
def contents(argument) -> list[_score.Component]:
r"""
Gets contents.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... contents = abjad.get.contents(component)
... print(f"{repr(component)}:")
... for component_ in contents:
... print(f" {repr(component_)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice'):
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Note("c'4")
Note("d'4")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
Note("f'4")
Note("c'4"):
Note("c'4")
BeforeGraceContainer("cs'16"):
BeforeGraceContainer("cs'16")
Note("cs'16")
Note("cs'16"):
Note("cs'16")
Note("d'4"):
Note("d'4")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }"):
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Voice("e'4", name='Music_Voice')
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16"):
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Chord("<e' g'>16")
Note("gs'16")
Note("a'16")
Note("as'16")
Chord("<e' g'>16"):
Chord("<e' g'>16")
Note("gs'16"):
Note("gs'16")
Note("a'16"):
Note("a'16")
Note("as'16"):
Note("as'16")
Voice("e'4", name='Music_Voice'):
Voice("e'4", name='Music_Voice')
Note("e'4")
Note("e'4"):
Note("e'4")
Note("f'4"):
Note("f'4")
AfterGraceContainer("fs'16"):
AfterGraceContainer("fs'16")
Note("fs'16")
Note("fs'16"):
Note("fs'16")
.. container:: example
REGRESSSION. Works with tremolo containers:
>>> staff = abjad.Staff()
>>> staff.append(abjad.TremoloContainer(2, "c'16 e'"))
>>> staff.append("cs'4")
>>> staff.append(abjad.TremoloContainer(2, "d'16 f'"))
>>> staff.append("ds'4")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\repeat tremolo 2 {
c'16
e'16
}
cs'4
\repeat tremolo 2 {
d'16
f'16
}
ds'4
}
>>> for component in abjad.select.components(staff):
... contents = abjad.get.contents(component)
... print(f"{repr(component)}:")
... for component_ in contents:
... print(f" {repr(component_)}")
Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"):
Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4")
TremoloContainer("c'16 e'16")
Note("cs'4")
TremoloContainer("d'16 f'16")
Note("ds'4")
TremoloContainer("c'16 e'16"):
TremoloContainer("c'16 e'16")
Note("c'16")
Note("e'16")
Note("c'16"):
Note("c'16")
Note("e'16"):
Note("e'16")
Note("cs'4"):
Note("cs'4")
TremoloContainer("d'16 f'16"):
TremoloContainer("d'16 f'16")
Note("d'16")
Note("f'16")
Note("d'16"):
Note("d'16")
Note("f'16"):
Note("f'16")
Note("ds'4"):
Note("ds'4")
"""
if not isinstance(argument, _score.Component):
raise Exception("can only get contents of component.")
result = []
result.append(argument)
result.extend(getattr(argument, "components", []))
return result
def descendants(argument) -> list[_score.Component]:
r"""
Gets descendants.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... descendants = abjad.get.descendants(component)
... print(f"{repr(component)}:")
... for component_ in descendants:
... print(f" {repr(component_)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Note("c'4")
BeforeGraceContainer("cs'16")
Note("cs'16")
Note("d'4")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Chord("<e' g'>16")
Note("gs'16")
Note("a'16")
Note("as'16")
Voice("e'4", name='Music_Voice')
Note("e'4")
Note("f'4")
AfterGraceContainer("fs'16")
Note("fs'16")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice'):
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Note("c'4")
BeforeGraceContainer("cs'16")
Note("cs'16")
Note("d'4")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Chord("<e' g'>16")
Note("gs'16")
Note("a'16")
Note("as'16")
Voice("e'4", name='Music_Voice')
Note("e'4")
Note("f'4")
AfterGraceContainer("fs'16")
Note("fs'16")
Note("c'4"):
Note("c'4")
BeforeGraceContainer("cs'16"):
BeforeGraceContainer("cs'16")
Note("cs'16")
Note("cs'16"):
Note("cs'16")
Note("d'4"):
Note("d'4")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }"):
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Chord("<e' g'>16")
Note("gs'16")
Note("a'16")
Note("as'16")
Voice("e'4", name='Music_Voice')
Note("e'4")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16"):
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Chord("<e' g'>16")
Note("gs'16")
Note("a'16")
Note("as'16")
Chord("<e' g'>16"):
Chord("<e' g'>16")
Note("gs'16"):
Note("gs'16")
Note("a'16"):
Note("a'16")
Note("as'16"):
Note("as'16")
Voice("e'4", name='Music_Voice'):
Voice("e'4", name='Music_Voice')
Note("e'4")
Note("e'4"):
Note("e'4")
Note("f'4"):
Note("f'4")
AfterGraceContainer("fs'16"):
AfterGraceContainer("fs'16")
Note("fs'16")
Note("fs'16"):
Note("fs'16")
"""
if isinstance(argument, _score.Component):
argument = [argument]
components = []
for item in argument:
generator = _iterate._iterate_descendants(item)
for component in generator:
if component not in components:
components.append(component)
return components
def duration(
argument, in_seconds: bool = False, preprolated: bool = False
) -> _duration.Duration:
r"""
Gets duration.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... duration = abjad.get.duration(component)
... print(f"{repr(component):30} {repr(duration)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Duration(1, 1)
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Duration(1, 1)
Note("c'4") Duration(1, 4)
BeforeGraceContainer("cs'16") Duration(1, 16)
Note("cs'16") Duration(1, 16)
Note("d'4") Duration(1, 4)
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Duration(1, 4)
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Duration(1, 4)
Chord("<e' g'>16") Duration(1, 16)
Note("gs'16") Duration(1, 16)
Note("a'16") Duration(1, 16)
Note("as'16") Duration(1, 16)
Voice("e'4", name='Music_Voice') Duration(1, 4)
Note("e'4") Duration(1, 4)
Note("f'4") Duration(1, 4)
AfterGraceContainer("fs'16") Duration(1, 16)
Note("fs'16") Duration(1, 16)
.. container:: example
REGRESSSION. Works with tremolo containers:
>>> staff = abjad.Staff()
>>> staff.append(abjad.TremoloContainer(2, "c'16 e'"))
>>> staff.append("cs'4")
>>> staff.append(abjad.TremoloContainer(2, "d'16 f'"))
>>> staff.append("ds'4")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\repeat tremolo 2 {
c'16
e'16
}
cs'4
\repeat tremolo 2 {
d'16
f'16
}
ds'4
}
>>> for component in abjad.select.components(staff):
... duration = abjad.get.duration(component)
... print(f"{repr(component):30} {repr(duration)}")
Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") Duration(1, 1)
TremoloContainer("c'16 e'16") Duration(1, 4)
Note("c'16") Duration(1, 8)
Note("e'16") Duration(1, 8)
Note("cs'4") Duration(1, 4)
TremoloContainer("d'16 f'16") Duration(1, 4)
Note("d'16") Duration(1, 8)
Note("f'16") Duration(1, 8)
Note("ds'4") Duration(1, 4)
.. container:: example
REGRESSION. Works with selections:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'4
d'4
e'4
f'4
}
>>> selection = staff[:3]
>>> abjad.get.duration(selection)
Duration(3, 4)
.. container:: example
Gets preprolated duration:
>>> staff = abjad.Staff(r"\times 2/3 { c'4 ~ c' } \times 2/3 { d' ~ d' }")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\tweak edge-height #'(0.7 . 0)
\times 2/3
{
c'4
~
c'4
}
\tweak edge-height #'(0.7 . 0)
\times 2/3
{
d'4
~
d'4
}
}
>>> for lt in abjad.select.logical_ties(staff):
... duration = abjad.get.duration(lt)
... preprolated = abjad.get.duration(lt, preprolated=True)
... lt, duration, preprolated
(LogicalTie(items=[Note("c'4"), Note("c'4")]), Duration(1, 3), Duration(1, 2))
(LogicalTie(items=[Note("d'4"), Note("d'4")]), Duration(1, 3), Duration(1, 2))
"""
return _inspect._get_duration(
argument, in_seconds=in_seconds, preprolated=preprolated
)
def effective(
argument,
prototype: _typings.Prototype,
*,
attributes: typing.Dict = None,
default: typing.Any = None,
n: int = 0,
unwrap: bool = True,
) -> typing.Any:
r"""
Gets effective indicator.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Clef("alto"), container[0])
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\clef "alto"
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... clef = abjad.get.effective(component, abjad.Clef)
... print(f"{repr(component):30} {repr(clef)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") None
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') None
Note("c'4") None
BeforeGraceContainer("cs'16") None
Note("cs'16") None
Note("d'4") None
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Clef(name='alto', hide=False)
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Clef(name='alto', hide=False)
Chord("<e' g'>16") Clef(name='alto', hide=False)
Note("gs'16") Clef(name='alto', hide=False)
Note("a'16") Clef(name='alto', hide=False)
Note("as'16") Clef(name='alto', hide=False)
Voice("e'4", name='Music_Voice') Clef(name='alto', hide=False)
Note("e'4") Clef(name='alto', hide=False)
Note("f'4") Clef(name='alto', hide=False)
AfterGraceContainer("fs'16") Clef(name='alto', hide=False)
Note("fs'16") Clef(name='alto', hide=False)
.. container:: example
REGRESSSION. Works with tremolo containers:
>>> staff = abjad.Staff()
>>> staff.append(abjad.TremoloContainer(2, "c'16 e'"))
>>> staff.append("cs'4")
>>> staff.append(abjad.TremoloContainer(2, "d'16 f'"))
>>> abjad.attach(abjad.Clef("alto"), staff[-1][0])
>>> staff.append("ds'4")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\repeat tremolo 2 {
c'16
e'16
}
cs'4
\repeat tremolo 2 {
\clef "alto"
d'16
f'16
}
ds'4
}
>>> for component in abjad.select.components(staff):
... clef = abjad.get.effective(component, abjad.Clef)
... print(f"{repr(component):30} {repr(clef)}")
Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") None
TremoloContainer("c'16 e'16") None
Note("c'16") None
Note("e'16") None
Note("cs'4") None
TremoloContainer("d'16 f'16") Clef(name='alto', hide=False)
Note("d'16") Clef(name='alto', hide=False)
Note("f'16") Clef(name='alto', hide=False)
Note("ds'4") Clef(name='alto', hide=False)
.. container:: example
Arbitrary objects (like strings) can be contexted:
>>> staff = abjad.Staff("c'8 d'8 e'8 f'8")
>>> abjad.attach('color', staff[1], context='Staff')
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'8
d'8
e'8
f'8
}
>>> for component in abjad.iterate.components(staff):
... string = abjad.get.effective(component, str)
... print(component, repr(string))
...
Staff("c'8 d'8 e'8 f'8") None
c'8 None
d'8 'color'
e'8 'color'
f'8 'color'
.. container:: example
Scans forwards or backwards when ``n`` is set:
>>> staff = abjad.Staff("c'8 d'8 e'8 f'8 g'8")
>>> abjad.attach('red', staff[0], context='Staff')
>>> abjad.attach('blue', staff[2], context='Staff')
>>> abjad.attach('yellow', staff[4], context='Staff')
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'8
d'8
e'8
f'8
g'8
}
>>> for n in (-1, 0, 1):
... color = abjad.get.effective(staff[0], str, n=n)
... print(n, repr(color))
...
-1 None
0 'red'
1 'blue'
>>> for n in (-1, 0, 1):
... color = abjad.get.effective(staff[1], str, n=n)
... print(n, repr(color))
...
-1 None
0 'red'
1 'blue'
>>> for n in (-1, 0, 1):
... color = abjad.get.effective(staff[2], str, n=n)
... print(n, repr(color))
...
-1 'red'
0 'blue'
1 'yellow'
>>> for n in (-1, 0, 1):
... color = abjad.get.effective(staff[3], str, n=n)
... print(n, repr(color))
...
-1 'red'
0 'blue'
1 'yellow'
>>> for n in (-1, 0, 1):
... color = abjad.get.effective(staff[4], str, n=n)
... print(n, repr(color))
...
-1 'blue'
0 'yellow'
1 None
.. container:: example
Use synthetic offsets to hide a clef before the start of a staff
like this:
>>> staff = abjad.Staff("c'4 d'4 e'4 f'4")
>>> abjad.attach(
... abjad.Clef("treble", hide=True),
... staff[0],
... synthetic_offset=-1,
... )
>>> abjad.attach(abjad.Clef("alto"), staff[0])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\clef "alto"
c'4
d'4
e'4
f'4
}
>>> for leaf in staff:
... clef = abjad.get.effective(leaf, abjad.Clef)
... (leaf, clef)
...
(Note("c'4"), Clef(name='alto', hide=False))
(Note("d'4"), Clef(name='alto', hide=False))
(Note("e'4"), Clef(name='alto', hide=False))
(Note("f'4"), Clef(name='alto', hide=False))
>>> abjad.get.effective(staff[0], abjad.Clef)
Clef(name='alto', hide=False)
>>> abjad.get.effective(staff[0], abjad.Clef, n=-1)
Clef(name='treble', hide=True)
>>> abjad.get.effective(staff[0], abjad.Clef, n=-2) is None
True
Note that ``hide=True`` is set on the offset clef to prevent
duplicate clef commands in LilyPond output.
Note also that the order of attachment (offset versus non-offset)
makes no difference.
.. container:: example
Here's how to hide a clef after the end of a staff:
>>> staff = abjad.Staff("c'4 d'4 e'4 f'4")
>>> abjad.attach(abjad.Clef("treble"), staff[0])
>>> abjad.attach(
... abjad.Clef("alto", hide=True),
... staff[-1],
... synthetic_offset=1,
... )
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\clef "treble"
c'4
d'4
e'4
f'4
}
>>> for leaf in staff:
... clef = abjad.get.effective(leaf, abjad.Clef)
... (leaf, clef)
...
(Note("c'4"), Clef(name='treble', hide=False))
(Note("d'4"), Clef(name='treble', hide=False))
(Note("e'4"), Clef(name='treble', hide=False))
(Note("f'4"), Clef(name='treble', hide=False))
>>> abjad.get.effective(staff[-1], abjad.Clef)
Clef(name='treble', hide=False)
>>> abjad.get.effective(staff[-1], abjad.Clef, n=1)
Clef(name='alto', hide=True)
>>> abjad.get.effective(staff[-1], abjad.Clef, n=2) is None
True
.. container:: example
Gets effective time signature:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> leaves = abjad.select.leaves(staff)
>>> abjad.attach(abjad.TimeSignature((3, 8)), leaves[0])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\time 3/8
c'4
d'4
e'4
f'4
}
>>> prototype = abjad.TimeSignature
>>> for component in abjad.iterate.components(staff):
... time_signature = abjad.get.effective(component, prototype)
... print(component, time_signature)
...
Staff("c'4 d'4 e'4 f'4") 3/8
c'4 3/8
d'4 3/8
e'4 3/8
f'4 3/8
.. container:: example
Test attributes like this:
>>> voice = abjad.Voice("c'4 d' e' f'")
>>> staff = abjad.Staff([voice])
>>> start_text_span = abjad.StartTextSpan()
>>> abjad.attach(start_text_span, voice[0])
>>> stop_text_span = abjad.StopTextSpan()
>>> abjad.attach(stop_text_span, voice[2])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\new Voice
{
c'4
\startTextSpan
d'4
e'4
\stopTextSpan
f'4
}
}
>>> for note in abjad.select.notes(staff):
... note, abjad.get.effective(note, abjad.StartTextSpan)
...
(Note("c'4"), StartTextSpan(command='\\startTextSpan', concat_hspace_left=0.5, concat_hspace_right=None, direction=None, left_broken_text=None, left_text=None, right_padding=None, right_text=None, style=None, tweaks=None))
(Note("d'4"), StartTextSpan(command='\\startTextSpan', concat_hspace_left=0.5, concat_hspace_right=None, direction=None, left_broken_text=None, left_text=None, right_padding=None, right_text=None, style=None, tweaks=None))
(Note("e'4"), StartTextSpan(command='\\startTextSpan', concat_hspace_left=0.5, concat_hspace_right=None, direction=None, left_broken_text=None, left_text=None, right_padding=None, right_text=None, style=None, tweaks=None))
(Note("f'4"), StartTextSpan(command='\\startTextSpan', concat_hspace_left=0.5, concat_hspace_right=None, direction=None, left_broken_text=None, left_text=None, right_padding=None, right_text=None, style=None, tweaks=None))
>>> for note in abjad.select.notes(staff):
... note, abjad.get.effective(note, abjad.StopTextSpan)
...
(Note("c'4"), None)
(Note("d'4"), None)
(Note("e'4"), StopTextSpan(command='\\stopTextSpan', leak=False))
(Note("f'4"), StopTextSpan(command='\\stopTextSpan', leak=False))
>>> attributes = {'parameter': 'TEXT_SPANNER'}
>>> for note in abjad.select.notes(staff):
... indicator = abjad.get.effective(
... note,
... object,
... attributes=attributes,
... )
... note, indicator
...
(Note("c'4"), StartTextSpan(command='\\startTextSpan', concat_hspace_left=0.5, concat_hspace_right=None, direction=None, left_broken_text=None, left_text=None, right_padding=None, right_text=None, style=None, tweaks=None))
(Note("d'4"), StartTextSpan(command='\\startTextSpan', concat_hspace_left=0.5, concat_hspace_right=None, direction=None, left_broken_text=None, left_text=None, right_padding=None, right_text=None, style=None, tweaks=None))
(Note("e'4"), StopTextSpan(command='\\stopTextSpan', leak=False))
(Note("f'4"), StopTextSpan(command='\\stopTextSpan', leak=False))
.. container:: example
REGRESSION. Matching start-beam and stop-beam indicators work correctly:
>>> voice = abjad.Voice("c'8 d'8 e'8 f'8 g'4 a'4")
>>> abjad.attach(abjad.StartBeam(direction=None, tweaks=None), voice[0])
>>> abjad.attach(abjad.StopBeam(), voice[3])
>>> abjad.show(voice) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(voice)
>>> print(string)
\new Voice
{
c'8
[
d'8
e'8
f'8
]
g'4
a'4
}
>>> for leaf in abjad.select.leaves(voice):
... start_beam = abjad.get.effective(leaf, abjad.StartBeam)
... stop_beam = abjad.get.effective(leaf, abjad.StopBeam)
... leaf, start_beam, stop_beam
(Note("c'8"), StartBeam(direction=None, tweaks=None), None)
(Note("d'8"), StartBeam(direction=None, tweaks=None), None)
(Note("e'8"), StartBeam(direction=None, tweaks=None), None)
(Note("f'8"), StartBeam(direction=None, tweaks=None), StopBeam(leak=False))
(Note("g'4"), StartBeam(direction=None, tweaks=None), StopBeam(leak=False))
(Note("a'4"), StartBeam(direction=None, tweaks=None), StopBeam(leak=False))
# TODO: make this work.
.. container:: example
REGRESSION. Bar lines work like this:
>>> voice = abjad.Voice("c'2 d'2 e'2 f'2")
>>> score = abjad.Score([voice])
>>> abjad.attach(abjad.BarLine("||"), voice[1])
>>> abjad.show(score) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(score)
>>> print(string)
\new Score
<<
\new Voice
{
c'2
d'2
\bar "||"
e'2
f'2
}
>>
>>> for leaf in abjad.select.leaves(score):
... bar_line = abjad.get.effective(leaf, abjad.BarLine)
... leaf, bar_line
(Note("c'2"), None)
(Note("d'2"), BarLine(abbreviation='||', format_slot='after'))
(Note("e'2"), BarLine(abbreviation='||', format_slot='after'))
(Note("f'2"), BarLine(abbreviation='||', format_slot='after'))
"""
if not isinstance(argument, _score.Component):
raise Exception("can only get effective on components.")
if attributes is not None:
assert isinstance(attributes, dict), repr(attributes)
result = _inspect._get_effective(
argument, prototype, attributes=attributes, n=n, unwrap=unwrap
)
if result is None:
result = default
return result
def effective_staff(argument) -> typing.Optional["_score.Staff"]:
r"""
Gets effective staff.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... staff = abjad.get.effective_staff(component)
... print(f"{repr(component):30} {repr(staff)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("c'4") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
BeforeGraceContainer("cs'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("cs'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("d'4") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Chord("<e' g'>16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("gs'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("a'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("as'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("e'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("e'4") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("f'4") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
AfterGraceContainer("fs'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("fs'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
"""
if not isinstance(argument, _score.Component):
raise Exception("can only get effective staff on components.")
staff_change = _inspect._get_effective(argument, _indicators.StaffChange)
if staff_change is not None:
for component in argument._get_parentage():
root = component
effective_staff = root[staff_change.staff]
return effective_staff
for component in argument._get_parentage():
if isinstance(component, _score.Staff):
effective_staff = component
break
return effective_staff
def effective_wrapper(
argument,
prototype: _typings.Prototype,
*,
attributes: typing.Dict = None,
n: int = 0,
):
r"""
Gets effective wrapper.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Clef("alto"), container[0])
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\clef "alto"
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... wrapper = abjad.get.effective_wrapper(component, abjad.Clef)
... print(f"{repr(component):}")
... print(f" {repr(wrapper)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
None
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
None
Note("c'4")
None
BeforeGraceContainer("cs'16")
None
Note("cs'16")
None
Note("d'4")
None
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag())
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag())
Chord("<e' g'>16")
Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag())
Note("gs'16")
Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag())
Note("a'16")
Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag())
Note("as'16")
Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag())
Voice("e'4", name='Music_Voice')
Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag())
Note("e'4")
Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag())
Note("f'4")
Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag())
AfterGraceContainer("fs'16")
Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag())
Note("fs'16")
Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag())
"""
if attributes is not None:
assert isinstance(attributes, dict), repr(attributes)
return effective(argument, prototype, attributes=attributes, n=n, unwrap=False)
def grace(argument) -> bool:
r"""
Is true when ``argument`` is grace music.
Grace music defined equal to grace container, after-grace container and
contents of those containers.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... result = abjad.get.grace(component)
... print(f"{repr(component):30} {repr(result)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") False
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') False
Note("c'4") False
BeforeGraceContainer("cs'16") True
Note("cs'16") True
Note("d'4") False
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") False
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") True
Chord("<e' g'>16") True
Note("gs'16") True
Note("a'16") True
Note("as'16") True
Voice("e'4", name='Music_Voice') False
Note("e'4") False
Note("f'4") False
AfterGraceContainer("fs'16") True
Note("fs'16") True
"""
return _inspect._get_grace_container(argument)
def has_effective_indicator(
argument,
prototype: _typings.Prototype = None,
*,
attributes: typing.Dict = None,
) -> bool:
r"""
Is true when ``argument`` has effective indicator.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Clef("alto"), container[0])
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\clef "alto"
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... function = abjad.get.has_effective_indicator
... result = function(component, abjad.Clef)
... print(f"{repr(component):30} {repr(result)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") False
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') False
Note("c'4") False
BeforeGraceContainer("cs'16") False
Note("cs'16") False
Note("d'4") False
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") True
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") True
Chord("<e' g'>16") True
Note("gs'16") True
Note("a'16") True
Note("as'16") True
Voice("e'4", name='Music_Voice') True
Note("e'4") True
Note("f'4") True
AfterGraceContainer("fs'16") True
Note("fs'16") True
.. container:: example
REGRESSSION. Works with tremolo containers:
>>> staff = abjad.Staff()
>>> staff.append(abjad.TremoloContainer(2, "c'16 e'"))
>>> staff.append("cs'4")
>>> staff.append(abjad.TremoloContainer(2, "d'16 f'"))
>>> abjad.attach(abjad.Clef("alto"), staff[-1][0])
>>> staff.append("ds'4")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\repeat tremolo 2 {
c'16
e'16
}
cs'4
\repeat tremolo 2 {
\clef "alto"
d'16
f'16
}
ds'4
}
>>> for component in abjad.select.components(staff):
... function = abjad.get.has_effective_indicator
... result = function(component, abjad.Clef)
... print(f"{repr(component):30} {repr(result)}")
Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") False
TremoloContainer("c'16 e'16") False
Note("c'16") False
Note("e'16") False
Note("cs'4") False
TremoloContainer("d'16 f'16") True
Note("d'16") True
Note("f'16") True
Note("ds'4") True
"""
if not isinstance(argument, _score.Component):
raise Exception("can only get effective indicator on component.")
if attributes is not None:
assert isinstance(attributes, dict), repr(attributes)
indicator = _inspect._get_effective(argument, prototype, attributes=attributes)
return indicator is not None
def has_indicator(
argument,
prototype: typing.Union[str, _typings.Prototype] = None,
*,
attributes: typing.Dict = None,
) -> bool:
r"""
Is true when ``argument`` has one or more indicators.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Clef("alto"), container[0])
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\clef "alto"
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... result = abjad.get.has_indicator(component, abjad.Clef)
... print(f"{repr(component):30} {repr(result)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") False
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') False
Note("c'4") False
BeforeGraceContainer("cs'16") False
Note("cs'16") False
Note("d'4") False
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") False
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") False
Chord("<e' g'>16") True
Note("gs'16") False
Note("a'16") False
Note("as'16") False
Voice("e'4", name='Music_Voice') False
Note("e'4") False
Note("f'4") False
AfterGraceContainer("fs'16") False
Note("fs'16") False
.. container:: example
REGRESSSION. Works with tremolo containers:
>>> staff = abjad.Staff()
>>> staff.append(abjad.TremoloContainer(2, "c'16 e'"))
>>> staff.append("cs'4")
>>> staff.append(abjad.TremoloContainer(2, "d'16 f'"))
>>> abjad.attach(abjad.Clef("alto"), staff[-1][0])
>>> staff.append("ds'4")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\repeat tremolo 2 {
c'16
e'16
}
cs'4
\repeat tremolo 2 {
\clef "alto"
d'16
f'16
}
ds'4
}
>>> for component in abjad.select.components(staff):
... result = abjad.get.has_indicator(component, abjad.Clef)
... print(f"{repr(component):30} {repr(result)}")
Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") False
TremoloContainer("c'16 e'16") False
Note("c'16") False
Note("e'16") False
Note("cs'4") False
TremoloContainer("d'16 f'16") False
Note("d'16") True
Note("f'16") False
Note("ds'4") False
.. container:: example
Set ``attributes`` dictionary to test indicator attributes:
>>> voice = abjad.Voice("c'4 c'4 c'4 c'4")
>>> abjad.attach(abjad.Clef('treble'), voice[0])
>>> abjad.attach(abjad.Clef('alto'), voice[2])
>>> abjad.show(voice) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(voice)
>>> print(string)
\new Voice
{
\clef "treble"
c'4
c'4
\clef "alto"
c'4
c'4
}
>>> attributes = {'name': 'alto'}
>>> abjad.get.has_indicator(voice[0], abjad.Clef)
True
>>> abjad.get.has_indicator(
... voice[0],
... abjad.Clef,
... attributes=attributes,
... )
False
>>> abjad.get.has_indicator(voice[2], abjad.Clef)
True
>>> abjad.get.has_indicator(
... voice[2],
... abjad.Clef,
... attributes=attributes,
... )
True
"""
if isinstance(prototype, _tag.Tag):
raise Exception("do not attach tags; use tag=None keyword.")
if not isinstance(argument, _score.Component):
raise Exception("can only get indicator on component.")
if attributes is not None:
assert isinstance(attributes, dict), repr(attributes)
return argument._has_indicator(prototype=prototype, attributes=attributes)
def indicator(
argument,
prototype: _typings.Prototype = None,
*,
default: typing.Any = None,
unwrap: bool = True,
) -> typing.Any:
r"""
Gets indicator.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Clef("alto"), container[0])
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\clef "alto"
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... result = abjad.get.indicator(component, abjad.Clef)
... print(f"{repr(component):30} {repr(result)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") None
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') None
Note("c'4") None
BeforeGraceContainer("cs'16") None
Note("cs'16") None
Note("d'4") None
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") None
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") None
Chord("<e' g'>16") Clef(name='alto', hide=False)
Note("gs'16") None
Note("a'16") None
Note("as'16") None
Voice("e'4", name='Music_Voice') None
Note("e'4") None
Note("f'4") None
AfterGraceContainer("fs'16") None
Note("fs'16") None
.. container:: example
REGRESSSION. Works with tremolo containers:
>>> staff = abjad.Staff()
>>> staff.append(abjad.TremoloContainer(2, "c'16 e'"))
>>> staff.append("cs'4")
>>> staff.append(abjad.TremoloContainer(2, "d'16 f'"))
>>> abjad.attach(abjad.Clef("alto"), staff[-1][0])
>>> staff.append("ds'4")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\repeat tremolo 2 {
c'16
e'16
}
cs'4
\repeat tremolo 2 {
\clef "alto"
d'16
f'16
}
ds'4
}
>>> for component in abjad.select.components(staff):
... result = abjad.get.indicator(component, abjad.Clef)
... print(f"{repr(component):30} {repr(result)}")
Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") None
TremoloContainer("c'16 e'16") None
Note("c'16") None
Note("e'16") None
Note("cs'4") None
TremoloContainer("d'16 f'16") None
Note("d'16") Clef(name='alto', hide=False)
Note("f'16") None
Note("ds'4") None
Raises exception when more than one indicator of ``prototype`` attach to
``argument``.
Returns default when no indicator of ``prototype`` attaches to ``argument``.
"""
return _inspect._get_indicator(argument, prototype, default=default, unwrap=unwrap)
def indicators(
argument,
prototype: _typings.Prototype = None,
*,
attributes: typing.Dict = None,
unwrap: bool = True,
) -> typing.List:
r"""
Get indicators.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Clef("alto"), container[0])
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> for note in abjad.select.notes(staff):
... abjad.attach(abjad.Articulation("."), note)
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
- \staccato
\grace {
cs'16
- \staccato
}
d'4
- \staccato
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\clef "alto"
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
- \staccato
a'16
- \staccato
as'16
- \staccato
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
- \staccato
}
>>
\oneVoice
\afterGrace
f'4
- \staccato
{
fs'16
- \staccato
}
}
}
>>> for component in abjad.select.components(staff):
... result = abjad.get.indicators(component)
... print(f"{repr(component):30} {repr(result)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") []
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') []
Note("c'4") [Articulation(name='.', direction=None, tweaks=None)]
BeforeGraceContainer("cs'16") []
Note("cs'16") [Articulation(name='.', direction=None, tweaks=None)]
Note("d'4") [Articulation(name='.', direction=None, tweaks=None)]
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") []
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") [LilyPondLiteral(argument='\\set fontSize = #-3', format_slot='opening', directed=False, tweaks=None)]
Chord("<e' g'>16") [StartBeam(direction=None, tweaks=None), LilyPondLiteral(argument='\\slash', format_slot='opening', directed=False, tweaks=None), StartSlur(direction=None, tweaks=None), LilyPondLiteral(argument='\\voiceOne', format_slot='opening', directed=False, tweaks=None), Clef(name='alto', hide=False), Articulation(name='>', direction=None, tweaks=None)]
Note("gs'16") [Articulation(name='.', direction=None, tweaks=None)]
Note("a'16") [Articulation(name='.', direction=None, tweaks=None)]
Note("as'16") [StopBeam(leak=False), StopSlur(leak=False), Articulation(name='.', direction=None, tweaks=None)]
Voice("e'4", name='Music_Voice') []
Note("e'4") [LilyPondLiteral(argument='\\voiceTwo', format_slot='opening', directed=False, tweaks=None), Articulation(name='.', direction=None, tweaks=None)]
Note("f'4") [LilyPondLiteral(argument='\\oneVoice', format_slot='absolute_before', directed=False, tweaks=None), Articulation(name='.', direction=None, tweaks=None)]
AfterGraceContainer("fs'16") []
Note("fs'16") [Articulation(name='.', direction=None, tweaks=None)]
.. container:: example
REGRESSSION. Works with tremolo containers:
>>> staff = abjad.Staff()
>>> staff.append(abjad.TremoloContainer(2, "c'16 e'"))
>>> staff.append("cs'4")
>>> staff.append(abjad.TremoloContainer(2, "d'16 f'"))
>>> abjad.attach(abjad.Clef("alto"), staff[-1][0])
>>> staff.append("ds'4")
>>> for note in abjad.select.notes(staff):
... abjad.attach(abjad.Articulation("."), note)
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\repeat tremolo 2 {
c'16
- \staccato
e'16
- \staccato
}
cs'4
- \staccato
\repeat tremolo 2 {
\clef "alto"
d'16
- \staccato
f'16
- \staccato
}
ds'4
- \staccato
}
>>> for component in abjad.select.components(staff):
... result = abjad.get.indicators(component)
... print(f"{repr(component):30} {repr(result)}")
Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") []
TremoloContainer("c'16 e'16") []
Note("c'16") [Articulation(name='.', direction=None, tweaks=None)]
Note("e'16") [Articulation(name='.', direction=None, tweaks=None)]
Note("cs'4") [Articulation(name='.', direction=None, tweaks=None)]
TremoloContainer("d'16 f'16") []
Note("d'16") [Clef(name='alto', hide=False), Articulation(name='.', direction=None, tweaks=None)]
Note("f'16") [Articulation(name='.', direction=None, tweaks=None)]
Note("ds'4") [Articulation(name='.', direction=None, tweaks=None)]
"""
# TODO: extend to any non-none argument
if not isinstance(argument, _score.Component):
message = "can only get indicators on component"
message += f" (not {argument!r})."
raise Exception(message)
if attributes is not None:
assert isinstance(attributes, dict), repr(attributes)
result = argument._get_indicators(
prototype=prototype, attributes=attributes, unwrap=unwrap
)
return list(result)
def leaf(argument, n: int = 0) -> typing.Optional["_score.Leaf"]:
r"""
Gets leaf ``n``.
``n`` constrained to -1, 0, 1 for previous, current, next leaf.
.. container:: example
>>> staff = abjad.Staff()
>>> staff.append(abjad.Voice("c'8 d'8 e'8 f'8"))
>>> staff.append(abjad.Voice("g'8 a'8 b'8 c''8"))
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\new Voice
{
c'8
d'8
e'8
f'8
}
\new Voice
{
g'8
a'8
b'8
c''8
}
}
.. container:: example
Gets leaf **FROM** ``argument`` when ``argument`` is a leaf:
>>> leaf = staff[0][1]
>>> abjad.get.leaf(leaf, -1)
Note("c'8")
>>> abjad.get.leaf(leaf, 0)
Note("d'8")
>>> abjad.get.leaf(leaf, 1)
Note("e'8")
.. container:: example
Gets leaf **IN** ``argument`` when ``argument`` is a container:
>>> voice = staff[0]
>>> abjad.get.leaf(voice, -1)
Note("f'8")
>>> abjad.get.leaf(voice, 0)
Note("c'8")
>>> abjad.get.leaf(voice, 1)
Note("d'8")
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Clef("alto"), container[0])
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\clef "alto"
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for current_leaf in abjad.select.leaves(staff):
... previous_leaf = abjad.get.leaf(current_leaf, -1)
... next_leaf = abjad.get.leaf(current_leaf, 1)
... print(f"previous leaf: {repr(previous_leaf)}")
... print(f"current leaf: {repr(current_leaf)}")
... print(f"next leaf: {repr(next_leaf)}")
... print("---")
previous leaf: None
current leaf: Note("c'4")
next leaf: Note("cs'16")
---
previous leaf: Note("c'4")
current leaf: Note("cs'16")
next leaf: Note("d'4")
---
previous leaf: Note("cs'16")
current leaf: Note("d'4")
next leaf: Chord("<e' g'>16")
---
previous leaf: Note("d'4")
current leaf: Chord("<e' g'>16")
next leaf: Note("gs'16")
---
previous leaf: Chord("<e' g'>16")
current leaf: Note("gs'16")
next leaf: Note("a'16")
---
previous leaf: Note("gs'16")
current leaf: Note("a'16")
next leaf: Note("as'16")
---
previous leaf: Note("a'16")
current leaf: Note("as'16")
next leaf: Note("e'4")
---
previous leaf: Note("as'16")
current leaf: Note("e'4")
next leaf: Note("f'4")
---
previous leaf: Note("e'4")
current leaf: Note("f'4")
next leaf: Note("fs'16")
---
previous leaf: Note("f'4")
current leaf: Note("fs'16")
next leaf: None
---
.. container:: example
REGRESSSION. Works with tremolo containers:
>>> staff = abjad.Staff()
>>> staff.append(abjad.TremoloContainer(2, "c'16 e'"))
>>> staff.append("cs'4")
>>> staff.append(abjad.TremoloContainer(2, "d'16 f'"))
>>> staff.append("ds'4")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\repeat tremolo 2 {
c'16
e'16
}
cs'4
\repeat tremolo 2 {
d'16
f'16
}
ds'4
}
>>> for current_leaf in abjad.select.leaves(staff):
... previous_leaf = abjad.get.leaf(current_leaf, -1)
... next_leaf = abjad.get.leaf(current_leaf, 1)
... print(f"previous leaf: {repr(previous_leaf)}")
... print(f"current leaf: {repr(current_leaf)}")
... print(f"next leaf: {repr(next_leaf)}")
... print("---")
previous leaf: None
current leaf: Note("c'16")
next leaf: Note("e'16")
---
previous leaf: Note("c'16")
current leaf: Note("e'16")
next leaf: Note("cs'4")
---
previous leaf: Note("e'16")
current leaf: Note("cs'4")
next leaf: Note("d'16")
---
previous leaf: Note("cs'4")
current leaf: Note("d'16")
next leaf: Note("f'16")
---
previous leaf: Note("d'16")
current leaf: Note("f'16")
next leaf: Note("ds'4")
---
previous leaf: Note("f'16")
current leaf: Note("ds'4")
next leaf: None
---
"""
return _iterate._get_leaf(argument, n=n)
def lineage(argument) -> "Lineage":
r"""
Gets lineage.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... lineage = abjad.get.lineage(component)
... print(f"{repr(component)}:")
... for component_ in lineage:
... print(f" {repr(component_)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Note("c'4")
BeforeGraceContainer("cs'16")
Note("cs'16")
Note("d'4")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Chord("<e' g'>16")
Note("gs'16")
Note("a'16")
Note("as'16")
Voice("e'4", name='Music_Voice')
Note("e'4")
Note("f'4")
AfterGraceContainer("fs'16")
Note("fs'16")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice'):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Note("c'4")
BeforeGraceContainer("cs'16")
Note("cs'16")
Note("d'4")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Chord("<e' g'>16")
Note("gs'16")
Note("a'16")
Note("as'16")
Voice("e'4", name='Music_Voice')
Note("e'4")
Note("f'4")
AfterGraceContainer("fs'16")
Note("fs'16")
Note("c'4"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Note("c'4")
BeforeGraceContainer("cs'16"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
BeforeGraceContainer("cs'16")
Note("cs'16")
Note("cs'16"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
BeforeGraceContainer("cs'16")
Note("cs'16")
Note("d'4"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Note("d'4")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Chord("<e' g'>16")
Note("gs'16")
Note("a'16")
Note("as'16")
Voice("e'4", name='Music_Voice')
Note("e'4")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Chord("<e' g'>16")
Note("gs'16")
Note("a'16")
Note("as'16")
Chord("<e' g'>16"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Chord("<e' g'>16")
Note("gs'16"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Note("gs'16")
Note("a'16"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Note("a'16")
Note("as'16"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Note("as'16")
Voice("e'4", name='Music_Voice'):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
Voice("e'4", name='Music_Voice')
Note("e'4")
Note("e'4"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
Voice("e'4", name='Music_Voice')
Note("e'4")
Note("f'4"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Note("f'4")
AfterGraceContainer("fs'16"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
AfterGraceContainer("fs'16")
Note("fs'16")
Note("fs'16"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
AfterGraceContainer("fs'16")
Note("fs'16")
"""
if not isinstance(argument, _score.Component):
raise Exception("can only get lineage on component.")
return Lineage(argument)
def logical_tie(argument) -> "_select.LogicalTie":
r"""
Gets logical tie.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for leaf in abjad.select.leaves(staff):
... lt = abjad.get.logical_tie(leaf)
... print(f"{repr(leaf):30} {repr(lt)}")
Note("c'4") LogicalTie(items=[Note("c'4")])
Note("cs'16") LogicalTie(items=[Note("cs'16")])
Note("d'4") LogicalTie(items=[Note("d'4")])
Chord("<e' g'>16") LogicalTie(items=[Chord("<e' g'>16")])
Note("gs'16") LogicalTie(items=[Note("gs'16")])
Note("a'16") LogicalTie(items=[Note("a'16")])
Note("as'16") LogicalTie(items=[Note("as'16")])
Note("e'4") LogicalTie(items=[Note("e'4")])
Note("f'4") LogicalTie(items=[Note("f'4")])
Note("fs'16") LogicalTie(items=[Note("fs'16")])
.. container:: example
REGRESSSION. Works with tremolo containers:
>>> staff = abjad.Staff()
>>> staff.append(abjad.TremoloContainer(2, "c'16 e'"))
>>> staff.append("cs'4")
>>> staff.append(abjad.TremoloContainer(2, "d'16 f'"))
>>> staff.append("ds'4")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\repeat tremolo 2 {
c'16
e'16
}
cs'4
\repeat tremolo 2 {
d'16
f'16
}
ds'4
}
>>> for leaf in abjad.select.leaves(staff):
... lt = abjad.get.logical_tie(leaf)
... print(f"{repr(leaf):30} {repr(lt)}")
Note("c'16") LogicalTie(items=[Note("c'16")])
Note("e'16") LogicalTie(items=[Note("e'16")])
Note("cs'4") LogicalTie(items=[Note("cs'4")])
Note("d'16") LogicalTie(items=[Note("d'16")])
Note("f'16") LogicalTie(items=[Note("f'16")])
Note("ds'4") LogicalTie(items=[Note("ds'4")])
.. container:: example
REGRESSSION. Omits spurious rest when user ties from note to rest:
>>> staff = abjad.Staff("c'4 r4")
>>> # user error; shouldn't tie note to rest:
>>> abjad.attach(abjad.Tie(), staff[0])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'4
~
r4
}
>>> abjad.get.logical_tie(staff[0])
LogicalTie(items=[Note("c'4")])
>>> abjad.get.logical_tie(staff[1])
LogicalTie(items=[Rest('r4')])
Omits spurious rest when user repeat-ties into rest from note:
>>> staff = abjad.Staff("r4 c'4")
>>> # user error; shouldn't tie note to rest:
>>> abjad.attach(abjad.RepeatTie(), staff[1])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
r4
c'4
\repeatTie
}
>>> abjad.get.logical_tie(staff[0])
LogicalTie(items=[Rest('r4')])
>>> abjad.get.logical_tie(staff[1])
LogicalTie(items=[Note("c'4")])
"""
if not isinstance(argument, _score.Leaf):
raise Exception("can only get logical tie on leaf.")
leaves = _iterate._get_logical_tie_leaves(argument)
return _select.LogicalTie(leaves)
def markup(
argument, *, direction: _enums.VerticalAlignment = None
) -> typing.List[_markups.Markup]:
"""
Gets markup.
"""
# TODO: extend to any non-none argument
if not isinstance(argument, _score.Component):
raise Exception("can only get markup on component.")
result = argument._get_markup(direction=direction)
return list(result)
def measure_number(argument) -> int:
r"""
Gets measure number.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... measure_number = abjad.get.measure_number(component)
... print(f"{repr(component):30} {measure_number}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") 1
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') 1
Note("c'4") 1
BeforeGraceContainer("cs'16") 1
Note("cs'16") 1
Note("d'4") 1
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") 1
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") 1
Chord("<e' g'>16") 1
Note("gs'16") 1
Note("a'16") 1
Note("as'16") 1
Voice("e'4", name='Music_Voice') 1
Note("e'4") 1
Note("f'4") 1
AfterGraceContainer("fs'16") 1
Note("fs'16") 1
.. container:: example
REGRESSION. Measure number of score-initial grace notes is set equal to 0:
>>> voice = abjad.Voice("c'4 d' e' f'")
>>> container = abjad.BeforeGraceContainer("b16")
>>> abjad.attach(container, voice[0])
>>> abjad.show(voice) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(voice)
>>> print(string)
\new Voice
{
\grace {
b16
}
c'4
d'4
e'4
f'4
}
>>> for component in abjad.select.components(voice):
... measure_number = abjad.get.measure_number(component)
... print(f"{repr(component):30} {measure_number}")
Voice("c'4 d'4 e'4 f'4") 1
BeforeGraceContainer('b16') 0
Note('b16') 0
Note("c'4") 1
Note("d'4") 1
Note("e'4") 1
Note("f'4") 1
.. container:: example
REGRESSSION. Works with tremolo containers:
>>> staff = abjad.Staff()
>>> staff.append(abjad.TremoloContainer(2, "c'16 e'"))
>>> staff.append("cs'4")
>>> staff.append(abjad.TremoloContainer(2, "d'16 f'"))
>>> staff.append("ds'4")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\repeat tremolo 2 {
c'16
e'16
}
cs'4
\repeat tremolo 2 {
d'16
f'16
}
ds'4
}
>>> for component in abjad.select.components(staff):
... measure_number = abjad.get.measure_number(component)
... print(f"{repr(component):30} {measure_number}")
Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") 1
TremoloContainer("c'16 e'16") 1
Note("c'16") 1
Note("e'16") 1
Note("cs'4") 1
TremoloContainer("d'16 f'16") 1
Note("d'16") 1
Note("f'16") 1
Note("ds'4") 1
"""
if not isinstance(argument, _score.Component):
raise Exception("can only get measure number on component.")
_update._update_measure_numbers(argument)
assert isinstance(argument._measure_number, int)
return argument._measure_number
def parentage(argument) -> "_parentage.Parentage":
r"""
Gets parentage.
.. container:: example
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... parentage = abjad.get.parentage(component)
... print(f"{repr(component)}:")
... for component_ in parentage[:]:
... print(f" {repr(component_)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }"):
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice'):
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("c'4"):
Note("c'4")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
BeforeGraceContainer("cs'16"):
BeforeGraceContainer("cs'16")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("cs'16"):
Note("cs'16")
BeforeGraceContainer("cs'16")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("d'4"):
Note("d'4")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }"):
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16"):
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Chord("<e' g'>16"):
Chord("<e' g'>16")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("gs'16"):
Note("gs'16")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("a'16"):
Note("a'16")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("as'16"):
Note("as'16")
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16")
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Voice("e'4", name='Music_Voice'):
Voice("e'4", name='Music_Voice')
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("e'4"):
Note("e'4")
Voice("e'4", name='Music_Voice')
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("f'4"):
Note("f'4")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
AfterGraceContainer("fs'16"):
AfterGraceContainer("fs'16")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
Note("fs'16"):
Note("fs'16")
AfterGraceContainer("fs'16")
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice')
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }")
.. container:: example
REGRESSSION. Works with tremolo containers:
>>> staff = abjad.Staff()
>>> staff.append(abjad.TremoloContainer(2, "c'16 e'"))
>>> staff.append("cs'4")
>>> staff.append(abjad.TremoloContainer(2, "d'16 f'"))
>>> staff.append("ds'4")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\repeat tremolo 2 {
c'16
e'16
}
cs'4
\repeat tremolo 2 {
d'16
f'16
}
ds'4
}
>>> for component in abjad.select.components(staff):
... parentage = abjad.get.parentage(component)
... print(f"{repr(component)}:")
... print(f" {repr(parentage[:])}")
Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"):
(Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"),)
TremoloContainer("c'16 e'16"):
(TremoloContainer("c'16 e'16"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"))
Note("c'16"):
(Note("c'16"), TremoloContainer("c'16 e'16"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"))
Note("e'16"):
(Note("e'16"), TremoloContainer("c'16 e'16"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"))
Note("cs'4"):
(Note("cs'4"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"))
TremoloContainer("d'16 f'16"):
(TremoloContainer("d'16 f'16"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"))
Note("d'16"):
(Note("d'16"), TremoloContainer("d'16 f'16"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"))
Note("f'16"):
(Note("f'16"), TremoloContainer("d'16 f'16"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"))
Note("ds'4"):
(Note("ds'4"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"))
"""
if not isinstance(argument, _score.Component):
message = "can only get parentage on component"
message += f" (not {argument})."
raise Exception(message)
return _parentage.Parentage(argument)
def pitches(argument) -> typing.Optional[_pcollections.PitchSet]:
r"""
Gets pitches.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... pitches = abjad.get.pitches(component)
... print(f"{repr(component):30} {pitches!s}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") {c', cs', d', e', f', fs', g', gs', a', as'}
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') {c', cs', d', e', f', fs', g', gs', a', as'}
Note("c'4") {c'}
BeforeGraceContainer("cs'16") {cs'}
Note("cs'16") {cs'}
Note("d'4") {d'}
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") {e', g', gs', a', as'}
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") {e', g', gs', a', as'}
Chord("<e' g'>16") {e', g'}
Note("gs'16") {gs'}
Note("a'16") {a'}
Note("as'16") {as'}
Voice("e'4", name='Music_Voice') {e'}
Note("e'4") {e'}
Note("f'4") {f'}
AfterGraceContainer("fs'16") {fs'}
Note("fs'16") {fs'}
"""
if not argument:
return None
generator = iterate_.pitches(argument)
return _pcollections.PitchSet.from_pitches(generator)
def report_modifications(argument) -> str:
r"""
Reports modifications.
.. container:: example
Reports container modifications:
>>> container = abjad.Container("c'8 d'8 e'8 f'8")
>>> abjad.override(container).NoteHead.color = "#red"
>>> abjad.override(container).NoteHead.style = "#'harmonic"
>>> abjad.show(container) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(container)
>>> print(string)
{
\override NoteHead.color = #red
\override NoteHead.style = #'harmonic
c'8
d'8
e'8
f'8
\revert NoteHead.color
\revert NoteHead.style
}
>>> report = abjad.get.report_modifications(container)
>>> print(report)
{
\override NoteHead.color = #red
\override NoteHead.style = #'harmonic
%%% 4 components omitted %%%
\revert NoteHead.color
\revert NoteHead.style
}
.. container:: example
Reports leaf modifications:
>>> container = abjad.Container("c'8 d'8 e'8 f'8")
>>> abjad.attach(abjad.Clef('alto'), container[0])
>>> abjad.override(container[0]).NoteHead.color = "#red"
>>> abjad.override(container[0]).Stem.color = "#red"
>>> abjad.show(container) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(container)
>>> print(string)
{
\once \override NoteHead.color = #red
\once \override Stem.color = #red
\clef "alto"
c'8
d'8
e'8
f'8
}
>>> report = abjad.get.report_modifications(container[0])
>>> print(report)
slot "absolute before":
slot "before":
grob overrides:
\once \override NoteHead.color = #red
\once \override Stem.color = #red
slot "opening":
commands:
\clef "alto"
slot "contents slot":
leaf body:
c'8
slot "closing":
slot "after":
slot "absolute after":
"""
if isinstance(argument, _score.Container):
bundle = _format.bundle_format_contributions(argument)
result: typing.List[str] = []
for slot in ("before", "open brackets", "opening"):
lines = argument._get_format_contributions_for_slot(slot, bundle)
result.extend(lines)
line = f" %%% {len(argument)} components omitted %%%"
result.append(line)
for slot in ("closing", "close brackets", "after"):
lines = argument._get_format_contributions_for_slot(slot, bundle)
result.extend(lines)
return "\n".join(result)
elif isinstance(argument, _score.Leaf):
return _format._report_leaf_format_contributions(argument)
else:
return f"only defined for components: {argument}."
def sounding_pitch(argument) -> _pitch.NamedPitch:
r"""
Gets sounding pitch of note.
.. container:: example
>>> staff = abjad.Staff("d''8 e''8 f''8 g''8")
>>> piccolo = abjad.Piccolo()
>>> abjad.attach(piccolo, staff[0])
>>> abjad.iterpitches.transpose_from_sounding_pitch(staff)
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
d'8
e'8
f'8
g'8
}
>>> for note in abjad.select.notes(staff):
... pitch = abjad.get.sounding_pitch(note)
... print(f"{repr(note):10} {repr(pitch)}")
Note("d'8") NamedPitch("d''")
Note("e'8") NamedPitch("e''")
Note("f'8") NamedPitch("f''")
Note("g'8") NamedPitch("g''")
"""
if not isinstance(argument, _score.Note):
raise Exception("can only get sounding pitch of note.")
return _inspect._get_sounding_pitch(argument)
def sounding_pitches(argument) -> _pcollections.PitchSet:
r"""
Gets sounding pitches.
.. container:: example
>>> staff = abjad.Staff("<c''' e'''>4 <d''' fs'''>4")
>>> glockenspiel = abjad.Glockenspiel()
>>> abjad.attach(glockenspiel, staff[0])
>>> abjad.iterpitches.transpose_from_sounding_pitch(staff)
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
<c' e'>4
<d' fs'>4
}
>>> for chord in abjad.select.chords(staff):
... pitches = abjad.get.sounding_pitches(chord)
... print(f"{repr(chord):20} {pitches!s}")
Chord("<c' e'>4") {c''', e'''}
Chord("<d' fs'>4") {d''', fs'''}
"""
# TODO: extend to any non-none argument
if not isinstance(argument, _score.Chord):
raise Exception("can only get sounding pitches of chord.")
result = _inspect._get_sounding_pitches(argument)
return _pcollections.PitchSet(result)
def sustained(argument) -> bool:
r"""
Is true when ``argument`` is sustained.
.. container:: example
>>> tuplet = abjad.Tuplet((3, 2), "c'4 ~ c' ~ c'")
>>> abjad.show(tuplet) # doctest: +SKIP
.. container:: example
>>> string = abjad.lilypond(tuplet)
>>> print(string)
\tweak text #tuplet-number::calc-fraction-text
\times 3/2 {
c'4
~
c'4
~
c'4
}
>>> abjad.get.sustained(tuplet)
True
"""
lt_head_count = 0
leaves = _select.leaves(argument)
for leaf in leaves:
lt = logical_tie(leaf)
if lt.head is leaf:
lt_head_count += 1
if lt_head_count == 0:
return True
lt = logical_tie(leaves[0])
if lt.head is leaves[0] and lt_head_count == 1:
return True
return False
def timespan(argument, in_seconds: bool = False) -> _timespan.Timespan:
r"""
Gets timespan.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
\grace {
cs'16
}
d'4
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
a'16
as'16
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
}
>>
\oneVoice
\afterGrace
f'4
{
fs'16
}
}
}
>>> for component in abjad.select.components(staff):
... timespan = abjad.get.timespan(component)
... print(f"{repr(component):30} {repr(timespan)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Timespan(Offset((0, 1)), Offset((1, 1)))
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Timespan(Offset((0, 1)), Offset((1, 1)))
Note("c'4") Timespan(Offset((0, 1)), Offset((1, 4)))
BeforeGraceContainer("cs'16") Timespan(Offset((1, 4), displacement=Duration(-1, 16)), Offset((1, 4)))
Note("cs'16") Timespan(Offset((1, 4), displacement=Duration(-1, 16)), Offset((1, 4)))
Note("d'4") Timespan(Offset((1, 4)), Offset((1, 2)))
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Timespan(Offset((1, 2)), Offset((3, 4)))
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Timespan(Offset((1, 2)), Offset((1, 2), displacement=Duration(1, 4)))
Chord("<e' g'>16") Timespan(Offset((1, 2)), Offset((1, 2), displacement=Duration(1, 16)))
Note("gs'16") Timespan(Offset((1, 2), displacement=Duration(1, 16)), Offset((1, 2), displacement=Duration(1, 8)))
Note("a'16") Timespan(Offset((1, 2), displacement=Duration(1, 8)), Offset((1, 2), displacement=Duration(3, 16)))
Note("as'16") Timespan(Offset((1, 2), displacement=Duration(3, 16)), Offset((1, 2), displacement=Duration(1, 4)))
Voice("e'4", name='Music_Voice') Timespan(Offset((1, 2)), Offset((3, 4)))
Note("e'4") Timespan(Offset((1, 2), displacement=Duration(1, 4)), Offset((3, 4)))
Note("f'4") Timespan(Offset((3, 4)), Offset((1, 1)))
AfterGraceContainer("fs'16") Timespan(Offset((1, 1), displacement=Duration(-1, 16)), Offset((1, 1)))
Note("fs'16") Timespan(Offset((1, 1), displacement=Duration(-1, 16)), Offset((1, 1)))
.. container:: example
REGRESSSION. Works with tremolo containers:
>>> staff = abjad.Staff()
>>> staff.append(abjad.TremoloContainer(2, "c'16 e'"))
>>> staff.append("cs'4")
>>> staff.append(abjad.TremoloContainer(2, "d'16 f'"))
>>> staff.append("ds'4")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\repeat tremolo 2 {
c'16
e'16
}
cs'4
\repeat tremolo 2 {
d'16
f'16
}
ds'4
}
>>> for component in abjad.select.components(staff):
... timespan = abjad.get.timespan(component)
... print(f"{repr(component):30} {repr(timespan)}")
Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") Timespan(Offset((0, 1)), Offset((1, 1)))
TremoloContainer("c'16 e'16") Timespan(Offset((0, 1)), Offset((1, 4)))
Note("c'16") Timespan(Offset((0, 1)), Offset((1, 8)))
Note("e'16") Timespan(Offset((1, 8)), Offset((1, 4)))
Note("cs'4") Timespan(Offset((1, 4)), Offset((1, 2)))
TremoloContainer("d'16 f'16") Timespan(Offset((1, 2)), Offset((3, 4)))
Note("d'16") Timespan(Offset((1, 2)), Offset((5, 8)))
Note("f'16") Timespan(Offset((5, 8)), Offset((3, 4)))
Note("ds'4") Timespan(Offset((3, 4)), Offset((1, 1)))
.. container:: example
REGRESION. Works with selection:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'4
d'4
e'4
f'4
}
>>> abjad.get.timespan(staff[:3])
Timespan(Offset((0, 1)), Offset((3, 4)))
"""
return _inspect._get_timespan(argument, in_seconds=in_seconds)
def wrapper(
argument,
prototype: _typings.Prototype = None,
*,
attributes: typing.Dict = None,
):
r"""
Gets wrapper.
.. container:: example
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Clef("alto"), container[0])
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> for note in abjad.select.notes(staff):
... abjad.attach(abjad.Articulation("."), note)
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
- \staccato
\grace {
cs'16
- \staccato
}
d'4
- \staccato
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\clef "alto"
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
- \staccato
a'16
- \staccato
as'16
- \staccato
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
- \staccato
}
>>
\oneVoice
\afterGrace
f'4
- \staccato
{
fs'16
- \staccato
}
}
}
REGRESSION. Works with grace notes (and containers):
>>> for component in abjad.select.components(staff):
... wrapper = abjad.get.wrapper(component, abjad.Articulation)
... print(f"{repr(component):30} {repr(wrapper)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") None
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') None
Note("c'4") Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())
BeforeGraceContainer("cs'16") None
Note("cs'16") Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())
Note("d'4") Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") None
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") None
Chord("<e' g'>16") Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='>', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())
Note("gs'16") Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())
Note("a'16") Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())
Note("as'16") Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())
Voice("e'4", name='Music_Voice') None
Note("e'4") Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())
Note("f'4") Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())
AfterGraceContainer("fs'16") None
Note("fs'16") Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())
Raises exception when more than one indicator of ``prototype`` attach to
``argument``.
"""
if attributes is not None:
assert isinstance(attributes, dict), repr(attributes)
return indicator(argument, prototype=prototype, unwrap=False)
def wrappers(
argument,
prototype: _typings.Prototype = None,
*,
attributes: typing.Dict = None,
):
r"""
Gets wrappers.
.. container:: example
REGRESSION. Works with grace notes (and containers):
>>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice")
>>> container = abjad.BeforeGraceContainer("cs'16")
>>> abjad.attach(container, music_voice[1])
>>> container = abjad.on_beat_grace_container(
... "g'16 gs' a' as'", music_voice[2:3]
... )
>>> abjad.attach(abjad.Clef("alto"), container[0])
>>> abjad.attach(abjad.Articulation(">"), container[0])
>>> container = abjad.AfterGraceContainer("fs'16")
>>> abjad.attach(container, music_voice[3])
>>> staff = abjad.Staff([music_voice])
>>> for note in abjad.select.notes(staff):
... abjad.attach(abjad.Articulation("."), note)
>>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff])
>>> abjad.show(lilypond_file) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\context Voice = "Music_Voice"
{
c'4
- \staccato
\grace {
cs'16
- \staccato
}
d'4
- \staccato
<<
\context Voice = "On_Beat_Grace_Container"
{
\set fontSize = #-3
\clef "alto"
\slash
\voiceOne
<
\tweak font-size 0
\tweak transparent ##t
e'
g'
>16
- \accent
[
(
gs'16
- \staccato
a'16
- \staccato
as'16
- \staccato
)
]
}
\context Voice = "Music_Voice"
{
\voiceTwo
e'4
- \staccato
}
>>
\oneVoice
\afterGrace
f'4
- \staccato
{
fs'16
- \staccato
}
}
}
>>> for component in abjad.select.components(staff):
... result = abjad.get.wrappers(component, abjad.Articulation)
... print(f"{repr(component):30} {repr(result)}")
Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") []
Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') []
Note("c'4") [Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())]
BeforeGraceContainer("cs'16") []
Note("cs'16") [Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())]
Note("d'4") [Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())]
Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") []
OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") []
Chord("<e' g'>16") [Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='>', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())]
Note("gs'16") [Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())]
Note("a'16") [Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())]
Note("as'16") [Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())]
Voice("e'4", name='Music_Voice') []
Note("e'4") [Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())]
Note("f'4") [Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())]
AfterGraceContainer("fs'16") []
Note("fs'16") [Wrapper(annotation=None, context=None, deactivate=False, indicator=Articulation(name='.', direction=None, tweaks=None), synthetic_offset=None, tag=Tag())]
"""
if attributes is not None:
assert isinstance(attributes, dict), repr(attributes)
return indicators(argument, prototype=prototype, unwrap=False)
class Lineage(collections.abc.Sequence):
r'''
Lineage of a component.
.. container:: example
>>> score = abjad.Score()
>>> staff = abjad.Staff(
... r"""\new Voice = "Treble_Voice" { c'4 }""",
... name='Treble_Staff',
... )
>>> score.append(staff)
>>> bass = abjad.Staff(
... r"""\new Voice = "Bass_Voice" { b,4 }""",
... name='Bass_Staff',
... )
>>> score.append(bass)
.. docs::
>>> string = abjad.lilypond(score)
>>> print(string)
\new Score
<<
\context Staff = "Treble_Staff"
{
\context Voice = "Treble_Voice"
{
c'4
}
}
\context Staff = "Bass_Staff"
{
\context Voice = "Bass_Voice"
{
b,4
}
}
>>
>>> for component in abjad.get.lineage(score):
... component
...
Score("{ { c'4 } } { { b,4 } }", simultaneous=True)
Staff("{ c'4 }", name='Treble_Staff')
Voice("c'4", name='Treble_Voice')
Note("c'4")
Staff('{ b,4 }', name='Bass_Staff')
Voice('b,4', name='Bass_Voice')
Note('b,4')
>>> bass_voice = score['Bass_Voice']
>>> for component in abjad.get.lineage(bass_voice):
... component
...
Score("{ { c'4 } } { { b,4 } }", simultaneous=True)
Staff('{ b,4 }', name='Bass_Staff')
Voice('b,4', name='Bass_Voice')
Note('b,4')
'''
__slots__ = ("_component", "_components")
def __init__(self, component=None):
if component is not None:
assert hasattr(component, "_timespan"), repr(component)
self._component = component
components = []
if component is not None:
components.extend(reversed(parentage(component)[1:]))
components.append(component)
components.extend(descendants(component)[1:])
self._components = components
def __getitem__(self, argument):
"""
Gets ``argument``.
Returns component or tuple.
"""
return self.components.__getitem__(argument)
def __len__(self):
"""
Gets length of lineage.
Returns int.
"""
return len(self._components)
@property
def component(self):
"""
The component from which the lineage was derived.
"""
return self._component
@property
def components(self):
"""
Gets components.
Returns tuple.
"""
return self._components
|
Abjad/abjad
|
abjad/get.py
|
Python
|
gpl-3.0
| 156,692
|
#!/usr/local/bin/python3.5
#coding: utf-8
#Author: Wu Xi <xi_wu@dell.com>
#Purpose: LifeCycle(contained in TSR log) log analyzer
#from tabulate import tabulate as T
import xml.etree.ElementTree as ET
import argparse
import datetime
from tabulate import tabulate as T
#Create parser
parser = argparse.ArgumentParser(description='LifeCycle events analyzer')
#Add arguments
parser.add_argument('-f','--logfile', dest='logfile', help='Specify LC event log in xml format', required=True)
parser.add_argument('-s','--severity', dest='severity', help='Specify Severity level of the logs: c: critical|w: warning|i: info', required=True)
parser.add_argument('-d','--dcim', dest='dcim', help='Specify name of DCIM XML file', required=True)
args = vars(parser.parse_args())
lc = args['logfile']
sev = args['severity']
dcim = args['dcim']
tree = ET.parse(lc)
severity_dict = {'c': 'Critical', 'w': 'Warning', 'i': 'Informational'}
#Elements list declaration
ctrl_list = ['ProductName', 'ControllerFirmwareVersion']
sys_list = ['Model', 'ChassisServiceTag', 'BIOSVersionString', 'LifecycleControllerVersion']
#ins_dict = {'DCIM_SystemView': sys_list, 'DCIM_ControllerView': ctrl_list}
ins_dict = {'DCIM_SystemView': sys_list}
attrib_list = ['Category', 'Timestamp']
keys_list = ['MessageID', 'Message']
messages = []
sysinfo = []
#Fetch values from element
def getValue(instances, property):
s = []
for i in instances:
for p in property:
p_xpath = './PROPERTY[@NAME="{0}"]'.format(p)
v = i.find(p_xpath)
s.append(v.find('VALUE').text)
#print("{0}: {1}".format(p, v.find('VALUE').text))
sysinfo.append(s)
#Define a function to get Version Strings from TSR log
def getVer(dcim):
tree = ET.parse(dcim)
for k in ins_dict.keys():
ins_xpath = './/MESSAGE/SIMPLEREQ/VALUE.NAMEDINSTANCE/INSTANCE[@CLASSNAME="{0}"]'.format(k)
instances = tree.findall(ins_xpath)
getValue(instances, ins_dict[k])
return 0
def getEvent(messageElement):
i = []
[ i.append(messageElement.attrib[a]) for a in attrib_list ]
[ i.append(messageElement.find(k).text) for k in keys_list ]
messages.append(i)
def timeFormat(message):
ds = message[1]
return datetime.datetime.strptime(ds, '%Y-%m-%dT%H:%M:%S%z')
if __name__ == "__main__":
if sev not in severity_dict.keys():
parser.print_help()
exit()
getVer(dcim)
xpath_str = './Event[@Severity="{0}"]'.format(severity_dict[sev])
elist = tree.findall(xpath_str)
[ getEvent(e) for e in elist ]
messages_sorted = sorted(messages, key = timeFormat, reverse=True)
print(T(sysinfo, headers=['Model', 'Service Tag', 'BIOS', 'iDRAC'], tablefmt="psql"))
print(T(messages_sorted, headers=['Category', 'Timestamp', 'MessageID', 'Message'], tablefmt="psql"))
|
harpsichord/wuxi-scripts
|
Dell/lcAnalyzer.py
|
Python
|
gpl-3.0
| 2,844
|
import sys
import time
from datetime import datetime
import tornado.ioloop
import tornado.web
from tornado.log import enable_pretty_logging
from commons.sql_class import SQLMaxMilhas
import MySQLdb
from routes import ROUTES
PORT = 5000
class Application(tornado.web.Application):
def __init__(self, ROUTES):
settings = dict(
debug=True
)
super(Application, self).__init__(ROUTES, **settings)
self.up_time = int(time.time())
self.up_time_iso = datetime.now().isoformat(' ')
self.request_counter = 0
SQLMaxMilhas(MySQLdb.connect(host="ec2-18-231-63-241.sa-east-1.compute.amazonaws.com", user="gustavo",
passwd="123456", db="maxmilhas"))
if __name__ == "__main__":
enable_pretty_logging()
args = sys.argv
args.append("--log_file_prefix=max-milhas.log")
tornado.options.parse_command_line(args)
app = Application(ROUTES)
app.listen(PORT)
tornado.ioloop.IOLoop.current().start()
|
gunasper/test_mm
|
max_milhas/app.py
|
Python
|
gpl-3.0
| 1,018
|
__author__ = 'beto'
|
b-cube/restparql
|
api/db/__init__.py
|
Python
|
gpl-3.0
| 20
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pac.py
#
# Copyright (C) 2013 Antergos
#
# This code is based on previous work by Rémy Oudompheng <remy@archlinux.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
""" Module interface to pyalpm """
import traceback
import sys
import math
import logging
try:
import pyalpm
except ImportError:
logging.error(_("pyalpm not found! This installer won't work."))
try:
import pacman.config as config
except ImportError:
import config
import queue
class Pac(object):
""" Comunicates with libalpm using pyalpm """
def __init__(self, conf_path="/etc/pacman.conf", callback_queue=None):
self.callback_queue = callback_queue
self.conflict_to_remove = None
# Some download indicators (used in cb_dl callback)
self.last_dl_filename = None
self.last_dl_progress = 0
self.last_dl_total = 0
# Store package total download size
self.total_download_size = 0
self.last_event = {}
if conf_path != None:
self.config = config.PacmanConfig(conf_path)
self.handle = self.config.initialize_alpm()
# Set callback functions
self.handle.logcb = self.cb_log
self.handle.dlcb = self.cb_dl
self.handle.totaldlcb = self.cb_totaldl
self.handle.eventcb = self.cb_event
self.handle.questioncb = self.cb_conv
self.handle.progresscb = self.cb_progress
def finalize(self, t):
""" Commit a transaction """
try:
t.prepare()
t.commit()
except pyalpm.error:
line = traceback.format_exc()
logging.error(line)
t.release()
return False
t.release()
return True
def init_transaction(self, options={}):
""" Transaction initialization """
try:
t = self.handle.init_transaction(
cascade = options.get('cascade', False),
nodeps = options.get('nodeps', False),
force = options.get('force', False),
dbonly = options.get('dbonly', False),
downloadonly = options.get('downloadonly', False),
needed = options.get('needed', False),
nosave = options.get('nosave', False),
recurse = (options.get('recursive', 0) > 0),
recurseall = (options.get('recursive', 0) > 1),
unneeded = options.get('unneeded', False),
alldeps = (options.get('mode', None) == pyalpm.PKG_REASON_DEPEND),
allexplicit = (options.get('mode', None) == pyalpm.PKG_REASON_EXPLICIT))
except pyalpm.error:
line = traceback.format_exc()
logging.error(line)
t = None
finally:
return t
def do_refresh(self):
""" Sync databases like pacman -Sy """
force = True
for db in self.handle.get_syncdbs():
t = self.init_transaction()
db.update(force)
t.release()
return 0
def do_install(self, pkgs, conflicts=[], options={}):
""" Install a list of packages like pacman -S """
logging.debug(_("Cnchi will install a list of packages like pacman -S"))
repos = dict((db.name, db) for db in self.handle.get_syncdbs())
targets = self.get_targets(pkgs, conflicts)
if len(targets) == 0:
logging.error(_("No targets found"))
return 1
t = self.init_transaction(options)
if t is None:
return 1
pkg_names = []
for pkg in targets:
# Avoid duplicates
if pkg.name not in pkg_names:
logging.debug(_("Adding %s to transaction"), pkg.name)
t.add_pkg(pkg)
pkg_names.append(pkg.name)
logging.debug(_("Finalize transaction..."))
ok = self.finalize(t)
return (0 if ok else 1)
def get_targets(self, pkgs, conflicts=[]):
""" Get the list of packages needed to install package list 'pkgs' """
if len(pkgs) == 0:
return []
repos = dict((db.name, db) for db in self.handle.get_syncdbs())
targets = []
for name in pkgs:
ok, pkg = self.find_sync_package(name, repos)
if ok:
# Check that added package is not in our conflicts list
# Ex: gnome-extra adds brasero, then we don't want xfburn (which is a default) to be installed
if pkg.name not in conflicts:
targets.append(pkg)
else:
# Can't find this one, check if it's a group
group_pkgs = self.get_group_pkgs(name)
if group_pkgs != None:
# It's a group
for pkg in group_pkgs:
# Check that added package is not in our conflicts list
# Ex: connman conflicts with netctl(openresolv),
# which is installed by default with base group
if pkg.name not in conflicts and pkg.name not in pkgs:
targets.append(pkg)
else:
# No, it wasn't neither a package nor a group. Show error message and continue.
logging.error(_("Can't find a package or group called '%s'"), name)
return targets
def find_sync_package(self, pkgname, syncdbs):
""" Finds a package name in a list of DBs """
for db in syncdbs.values():
pkg = db.get_pkg(pkgname)
if pkg is not None:
return True, pkg
return False, "Package '%s' was not found." % pkgname
def get_group_pkgs(self, group):
""" Get group packages """
for repo in self.handle.get_syncdbs():
grp = repo.read_grp(group)
if grp is None:
continue
else:
name, pkgs = grp
return pkgs
return None
def queue_event(self, event_type, event_text=""):
""" Queues events to the event list in the GUI thread """
if event_type in self.last_event:
if self.last_event[event_type] == event_text:
# Do not enqueue the same event twice
return
self.last_event[event_type] = event_text
if event_type == "error":
# Format message to show file, function, and line where the error was issued
import inspect
# Get the previous frame in the stack, otherwise it would be this function
f = inspect.currentframe().f_back.f_code
# Dump the message + the name of this function to the log.
event_text = "%s: %s in %s:%i" % (event_text, f.co_name, f.co_filename, f.co_firstlineno)
if self.callback_queue is None:
print(event_type, event_text)
if event_type == "error":
sys.exit(1)
else:
return
try:
self.callback_queue.put_nowait((event_type, event_text))
except queue.Full as err:
pass
if event_type == "error":
# We've queued a fatal event so we must exit installer_process process
# wait until queue is empty (is emptied in slides.py, in the GUI thread), then exit
self.callback_queue.join()
sys.exit(1)
def get_version(self):
return "Cnchi running on pyalpm v%s - libalpm v%s" % (pyalpm.version(), pyalpm.alpmversion())
def get_versions(self):
return (pyalpm.version(), pyalpm.alpmversion())
# Callback functions
def cb_conv(self, *args):
pass
def cb_totaldl(self, total_size):
""" Stores total download size for use in cb_progress """
self.total_download_size = total_size
def cb_event(self, ID, event, tupel):
""" Converts action ID to descriptive text and enqueues it to the events queue """
action = ""
if ID is 1:
action = _('Checking dependencies...')
elif ID is 3:
action = _('Checking file conflicts...')
elif ID is 5:
action = _('Resolving dependencies...')
elif ID is 7:
action = _('Checking inter conflicts...')
elif ID is 9:
# action = _('Installing...')
action = ""
elif ID is 11:
action = _('Removing...')
elif ID is 13:
action = _('Upgrading...')
elif ID is 15:
action = _('Checking integrity...')
elif ID is 17:
action = _('Loading packages files...')
elif ID is 26:
action = _('Configuring...')
elif ID is 27:
action = _('Downloading a file')
else:
action = ""
if len(action) > 0:
self.queue_event('info', action)
def cb_log(self, level, line):
""" Log pyalpm warning and error messages """
_logmask = pyalpm.LOG_ERROR | pyalpm.LOG_WARNING
# Only manage error and warning messages
if not (level & _logmask):
return
if level & pyalpm.LOG_ERROR:
logging.error(line)
elif level & pyalpm.LOG_WARNING:
logging.warning(line)
#elif level & pyalpm.LOG_DEBUG:
# logging.debug(line)
#elif level & pyalpm.LOG_FUNCTION:
# pass
def cb_progress(self, target, percent, n, i):
""" Shows install progress """
if target:
msg = _("Installing %s (%d/%d)") % (target, i, n)
percent = i / n
else:
msg = _("Checking and loading packages... (%d targets)") % n
percent = percent / 100
self.queue_event('info', msg)
self.queue_event('percent', percent)
def cb_dl(self, filename, tx, total):
""" Shows downloading progress """
# Check if a new file is coming
if filename != self.last_dl_filename or self.last_dl_total != total:
self.last_dl_filename = filename
self.last_dl_total = total
self.last_dl_progress = 0
# If pacman is just updating databases total_download_size will be zero
if self.total_download_size == 0:
ext = ".db"
if filename.endswith(ext):
filename = filename[:-len(ext)]
text = _("Updating %s database") % filename
else:
ext = ".pkg.tar.xz"
if filename.endswith(ext):
filename = filename[:-len(ext)]
text = _("Downloading %s...") % filename
self.queue_event('info', text)
self.queue_event('percent', 0)
else:
# Compute a progress indicator
if self.last_dl_total > 0:
progress = tx / self.last_dl_total
else:
# If total is unknown, use log(kBytes)²/2
progress = (math.log(1 + tx / 1024) ** 2 / 2) / 100
# Update progress only if it has grown
if progress > self.last_dl_progress:
#logging.debug("filename [%s], tx [%d], total [%d]", filename, tx, total)
self.last_dl_progress = progress
self.queue_event('percent', progress)
''' Test case '''
if __name__ == "__main__":
import gettext
_ = gettext.gettext
try:
alpm = Pac("/etc/pacman.conf")
except Exception as err:
logging.error(err)
raise InstallError("Can't initialize pyalpm: %s" % err)
#alpm.do_refresh()
pacman_options = {}
pacman_options["downloadonly"] = True
alpm.do_install(pkgs=["base"], conflicts=[], options=pacman_options)
|
prescott66/Cnchi
|
src/pacman/pac.py
|
Python
|
gpl-3.0
| 12,667
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011, 2012 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
# $Revision$
# $Author$
# $Date$
#
"""Unit test module weewx.stats"""
from __future__ import with_statement
import os
import sys
import syslog
import time
import unittest
import math
import configobj
import weeutil.weeutil
import weewx.stats
import gen_fake_data
import user.schemas
config_path = "testgen.conf"
cwd = None
class Common(unittest.TestCase):
def setUp(self):
global config_path
global cwd
weewx.debug = 1
syslog.openlog('test_stats', syslog.LOG_CONS)
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
# Save and set the current working directory in case some service changes it.
if not cwd:
cwd = os.getcwd()
else:
os.chdir(cwd)
try :
config_dict = configobj.ConfigObj(config_path, file_error=True)
except IOError:
sys.stderr.write("Unable to open configuration file %s" % self.config_path)
# Reraise the exception (this will eventually cause the program to exit)
raise
except configobj.ConfigObjError:
sys.stderr.write("Error while parsing configuration file %s" % config_path)
raise
self.archive_db_dict = config_dict['Databases'][self.archive_db]
self.stats_db_dict = config_dict['Databases'][self.stats_db]
# This will generate the test databases if necessary:
gen_fake_data.configDatabases(self.archive_db_dict, self.stats_db_dict)
def tearDown(self):
pass
def test_create_stats(self):
with weewx.stats.StatsDb.open(self.stats_db_dict) as stats:
self.assertItemsEqual(sorted(stats.statsTypes), sorted([stat_tuple[0] for stat_tuple in user.schemas.defaultStatsSchema]))
self.assertEqual(stats.connection.columnsOf('barometer'), ['dateTime', 'min', 'mintime', 'max', 'maxtime', 'sum', 'count'])
self.assertEqual(stats.connection.columnsOf('wind'), ['dateTime', 'min', 'mintime', 'max', 'maxtime', 'sum', 'count', 'gustdir', 'xsum', 'ysum', 'squaresum', 'squarecount'])
def testScalarTally(self):
with weewx.stats.StatsDb.open(self.stats_db_dict) as stats:
with weewx.archive.Archive.open(self.archive_db_dict) as archive:
# Pick a random day, say 15 March:
start_ts = int(time.mktime((2010,3,15,0,0,0,0,0,-1)))
stop_ts = int(time.mktime((2010,3,16,0,0,0,0,0,-1)))
# Sanity check that this is truly the start of day:
self.assertEqual(start_ts, weeutil.weeutil.startOfDay(start_ts))
allStats = stats._getDayStats(start_ts)
# Test it against some types
# Should also test monthly, yearly summaries
for stats_type in ['barometer', 'outTemp', 'rain']:
# Now test all the aggregates:
for aggregate in ['min', 'max', 'sum', 'count', 'avg']:
# Compare to the main archive:
res = archive.getSql("SELECT %s(%s) FROM archive WHERE dateTime>? AND dateTime <=?;" % (aggregate, stats_type), (start_ts, stop_ts))
# From StatsDb:
allStats_res = getattr(allStats[stats_type], aggregate)
self.assertAlmostEqual(allStats_res, res[0], msg="Value check. Failing type %s, aggregate: %s" % (stats_type, aggregate))
# Check the times of min and max as well:
if aggregate in ['min','max']:
res2 = archive.getSql("SELECT dateTime FROM archive WHERE %s = ? AND dateTime>? AND dateTime <=?" % (stats_type,), (res[0], start_ts, stop_ts))
stats_time = getattr(allStats[stats_type], aggregate+'time')
self.assertEqual(stats_time, res2[0], "Time check. Failing type %s, aggregate: %s" % (stats_type, aggregate))
def testWindTally(self):
with weewx.stats.StatsDb.open(self.stats_db_dict) as stats:
with weewx.archive.Archive.open(self.archive_db_dict) as archive:
# Pick a random day, say 15 March:
start_ts = int(time.mktime((2010,3,15,0,0,0,0,0,-1)))
stop_ts = int(time.mktime((2010,3,16,0,0,0,0,0,-1)))
# Sanity check that this is truly the start of day:
self.assertEqual(start_ts, weeutil.weeutil.startOfDay(start_ts))
allStats = stats._getDayStats(start_ts)
# Test all the aggregates:
for aggregate in ['min', 'max', 'sum', 'count', 'avg']:
if aggregate == 'max':
res = archive.getSql("SELECT MAX(windGust) FROM archive WHERE dateTime>? AND dateTime <=?;", (start_ts, stop_ts))
else:
res = archive.getSql("SELECT %s(windSpeed) FROM archive WHERE dateTime>? AND dateTime <=?;" % (aggregate, ), (start_ts, stop_ts))
# From StatsDb:
allStats_res = getattr(allStats['wind'], aggregate)
self.assertAlmostEqual(allStats_res, res[0])
# Check the times of min and max as well:
if aggregate == 'min':
resmin = archive.getSql("SELECT dateTime FROM archive WHERE windSpeed = ? AND dateTime>? AND dateTime <=?", (res[0], start_ts, stop_ts))
self.assertEqual(allStats['wind'].mintime, resmin[0])
elif aggregate == 'max':
resmax = archive.getSql("SELECT dateTime FROM archive WHERE windGust = ? AND dateTime>? AND dateTime <=?", (res[0], start_ts, stop_ts))
self.assertEqual(allStats['wind'].maxtime, resmax[0])
# Check RMS:
(squaresum, count) = archive.getSql("SELECT SUM(windSpeed*windSpeed), COUNT(windSpeed) from archive where dateTime>? AND dateTime<=?;", (start_ts, stop_ts))
rms = math.sqrt(squaresum/count) if count else None
self.assertAlmostEqual(allStats['wind'].rms, rms)
def testTags(self):
with weewx.stats.StatsDb.open(self.stats_db_dict) as stats:
with weewx.archive.Archive.open(self.archive_db_dict) as archive:
spans = {'day' : weeutil.weeutil.TimeSpan(time.mktime((2010,3,15,0,0,0,0,0,-1)),
time.mktime((2010,3,16,0,0,0,0,0,-1))),
'week' : weeutil.weeutil.TimeSpan(time.mktime((2010,3,14,0,0,0,0,0,-1)),
time.mktime((2010,3,21,0,0,0,0,0,-1))),
'month': weeutil.weeutil.TimeSpan(time.mktime((2010,3,01,0,0,0,0,0,-1)),
time.mktime((2010,4,01,0,0,0,0,0,-1))),
'year' : weeutil.weeutil.TimeSpan(time.mktime((2010,1,01,0,0,0,0,0,-1)),
time.mktime((2011,1,01,0,0,0,0,0,-1)))}
# This may not necessarily execute in the order specified above:
for span in spans:
start_ts = spans[span].start
stop_ts = spans[span].stop
tagStats = weewx.stats.TaggedStats(stats, stop_ts,
rain_year_start=1,
heatbase=(65.0, 'degree_F', 'group_temperature'),
coolbase=(65.0, 'degree_F', 'group_temperature'))
# Cycle over the statistical types:
for stats_type in ('barometer', 'outTemp', 'rain'):
# Now test all the aggregates:
for aggregate in ('min', 'max', 'sum', 'count', 'avg'):
# Compare to the main archive:
res = archive.getSql("SELECT %s(%s) FROM archive WHERE dateTime>? AND dateTime <=?;" % (aggregate, stats_type), (start_ts, stop_ts))
archive_result = res[0]
# This is how you form a tag such as tagStats.month.barometer.avg when
# all you have is strings holding the attributes:
value_helper = getattr(getattr(getattr(tagStats, span), stats_type), aggregate)
self.assertAlmostEqual(float(str(value_helper.formatted)), archive_result, places=1)
# Check the times of min and max as well:
if aggregate in ('min','max'):
res2 = archive.getSql("SELECT dateTime FROM archive WHERE %s = ? AND dateTime>? AND dateTime <=?" % (stats_type,), (archive_result, start_ts, stop_ts))
stats_value_helper = getattr(getattr(getattr(tagStats, span), stats_type), aggregate +'time')
self.assertEqual(stats_value_helper.raw, res2[0])
self.assertEqual(str(tagStats.day.barometer.avg), "30.675 inHg")
self.assertEqual(str(tagStats.day.barometer.min), "30.065 inHg")
self.assertEqual(str(tagStats.day.barometer.max), "31.000 inHg")
self.assertEqual(str(tagStats.day.barometer.mintime), "00:00")
self.assertEqual(str(tagStats.day.barometer.maxtime), "01:00")
self.assertEqual(str(tagStats.week.barometer.avg), "29.904 inHg")
self.assertEqual(str(tagStats.week.barometer.min), "29.000 inHg")
self.assertEqual(str(tagStats.week.barometer.max), "31.000 inHg")
self.assertEqual(str(tagStats.week.barometer.mintime), "01:00 on Monday")
self.assertEqual(str(tagStats.week.barometer.maxtime), "01:00 on Wednesday")
self.assertEqual(str(tagStats.month.barometer.avg), "30.021 inHg")
self.assertEqual(str(tagStats.month.barometer.min), "29.000 inHg")
self.assertEqual(str(tagStats.month.barometer.max), "31.000 inHg")
self.assertEqual(str(tagStats.month.barometer.mintime), "05-Mar-2010 00:00")
self.assertEqual(str(tagStats.month.barometer.maxtime), "03-Mar-2010 00:00")
self.assertEqual(str(tagStats.year.barometer.avg), "30.002 inHg")
self.assertEqual(str(tagStats.year.barometer.min), "29.000 inHg")
self.assertEqual(str(tagStats.year.barometer.max), "31.000 inHg")
self.assertEqual(str(tagStats.year.barometer.mintime), "04-Jan-2010 00:00")
self.assertEqual(str(tagStats.year.barometer.maxtime), "02-Jan-2010 00:00")
self.assertEqual(str(tagStats.day.outTemp.avg), "38.8°F")
self.assertEqual(str(tagStats.day.outTemp.min), "18.6°F")
self.assertEqual(str(tagStats.day.outTemp.max), "59.0°F")
self.assertEqual(str(tagStats.day.outTemp.mintime), "07:00")
self.assertEqual(str(tagStats.day.outTemp.maxtime), "19:00")
self.assertEqual(str(tagStats.week.outTemp.avg), "38.8°F")
self.assertEqual(str(tagStats.week.outTemp.min), "16.6°F")
self.assertEqual(str(tagStats.week.outTemp.max), "61.0°F")
self.assertEqual(str(tagStats.week.outTemp.mintime), "07:00 on Sunday")
self.assertEqual(str(tagStats.week.outTemp.maxtime), "19:00 on Saturday")
self.assertEqual(str(tagStats.month.outTemp.avg), "28.7°F")
self.assertEqual(str(tagStats.month.outTemp.min), "-0.9°F")
self.assertEqual(str(tagStats.month.outTemp.max), "59.0°F")
self.assertEqual(str(tagStats.month.outTemp.mintime), "01-Mar-2010 06:00")
self.assertEqual(str(tagStats.month.outTemp.maxtime), "31-Mar-2010 19:00")
self.assertEqual(str(tagStats.year.outTemp.avg), "40.0°F")
self.assertEqual(str(tagStats.year.outTemp.min), "-20.0°F")
self.assertEqual(str(tagStats.year.outTemp.max), "100.0°F")
self.assertEqual(str(tagStats.year.outTemp.mintime), "01-Jan-2010 06:00")
self.assertEqual(str(tagStats.year.outTemp.maxtime), "02-Jul-2010 19:00")
# Check the special aggregate types "exists" and "has_data":
self.assertTrue(tagStats.year.barometer.exists)
self.assertTrue(tagStats.year.barometer.has_data)
self.assertFalse(tagStats.year.bar.exists)
self.assertFalse(tagStats.year.bar.has_data)
self.assertTrue(tagStats.year.inHumidity.exists)
self.assertFalse(tagStats.year.inHumidity.has_data)
def test_rainYear(self):
with weewx.stats.StatsDb.open(self.stats_db_dict) as stats:
stop_ts = time.mktime((2011,1,01,0,0,0,0,0,-1))
# Check for a rain year starting 1-Jan
tagStats = weewx.stats.TaggedStats(stats, stop_ts,
rain_year_start=1)
self.assertEqual(str(tagStats.rainyear.rain.sum), "86.59 in")
# Do it again, for starting 1-Oct:
tagStats = weewx.stats.TaggedStats(stats, stop_ts,
rain_year_start=10)
self.assertEqual(str(tagStats.rainyear.rain.sum), "21.89 in")
def test_heatcool(self):
with weewx.stats.StatsDb.open(self.stats_db_dict) as stats:
#Test heating and cooling degree days:
stop_ts = time.mktime((2011,1,01,0,0,0,0,0,-1))
tagStats = weewx.stats.TaggedStats(stats, stop_ts,
heatbase=(65.0, 'degree_F', 'group_temperature'),
coolbase=(65.0, 'degree_F', 'group_temperature'))
self.assertEqual(str(tagStats.year.heatdeg.sum), "10150.7°F-day")
self.assertEqual(str(tagStats.year.cooldeg.sum), "1026.2°F-day")
class TestSqlite(Common):
def __init__(self, *args, **kwargs):
self.archive_db = "archive_sqlite"
self.stats_db = "stats_sqlite"
super(TestSqlite, self).__init__(*args, **kwargs)
class TestMySQL(Common):
def __init__(self, *args, **kwargs):
self.archive_db = "archive_mysql"
self.stats_db = "stats_mysql"
super(TestMySQL, self).__init__(*args, **kwargs)
def suite():
tests = ['test_create_stats', 'testScalarTally', 'testWindTally',
'testTags', 'test_rainYear', 'test_heatcool']
return unittest.TestSuite(map(TestSqlite, tests) + map(TestMySQL, tests))
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
crmorse/weewx-waterflow
|
bin/weewx/test/test_stats.py
|
Python
|
gpl-3.0
| 15,410
|
#!/usr/bin/python
# Copyright 2002, 2003 Dave Abrahams
# Copyright 2002, 2003, 2005 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", "")
t.write("lib/c.cpp", "int bar() { return 0; }\n")
t.write("lib/jamfile.jam", """\
static-lib auxilliary1 : c.cpp ;
lib auxilliary2 : c.cpp ;
""")
def reset():
t.rm("lib/bin")
t.run_build_system(subdir='lib')
t.expect_addition("lib/bin/$toolset/debug*/" * BoostBuild.List("c.obj "
"auxilliary1.lib auxilliary2.dll"))
t.expect_nothing_more()
reset()
t.run_build_system(["link=shared"], subdir="lib")
t.expect_addition("lib/bin/$toolset/debug*/" * BoostBuild.List("c.obj "
"auxilliary1.lib auxilliary2.dll"))
t.expect_nothing_more()
reset()
t.run_build_system(["link=static"], subdir="lib")
t.expect_addition("lib/bin/$toolset/debug/link-static*/" * BoostBuild.List(
"c.obj auxilliary1.lib auxilliary2.lib"))
t.expect_nothing_more()
t.cleanup()
|
TGAC/KAT
|
deps/boost/tools/build/test/static_and_shared_library.py
|
Python
|
gpl-3.0
| 1,097
|
import socket #import socket module
s = socket.socket() #create a socket object
host = '192.168.1.94' #Host i.p
port = 12397 #Reserve a port for your service
s.connect((host,port))
print s.recv(1024)
s.close
|
Hellrungj/CSC-412-Networking
|
client_server-project/client3.py
|
Python
|
gpl-3.0
| 210
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import glob
import json
import logging
import os
import pdb
import shlex
import sys
import time
from ..base import Utils
from ..base.Constants import *
log = logging.getLogger(__name__)
class DB_Obj(object):
'''
basic db object, represent one row of table x
easy getter/setter for db object
no auto write-through
'''
identName = '"none idetifier set"'
def getDB(self, key):
assert(key in self.FIELDS)
return self.dbValues[key]
def setDB(self, key, value):
assert(key in self.FIELDS)
if not(hasattr(self, 'dbValues')):
self.dbValues = {}
#if __debug__:
# obj = self.identName if self.identName!=None else self
# log.info('Object "{0}" stores new value for key "{1}": {2}'.format(obj, key, value))
self.dbValues[key] = value
def getAllValues(self):
return self.dbValues
@classmethod
def getTableName(self):
return self.TABLE
def dbID(self):
return self.getDB('id')
def __repr__(self):
return str(self.identName)
def __copy__(self):
newObj = object.__new__(type(self))
newObj.__dict__ = self.__dict__.copy()
newObj.dbValues = self.dbValues.copy()
return newObj
|
DVS-P2P/bubblestorm
|
testbed/src/testbed/models/Base.py
|
Python
|
gpl-3.0
| 1,187
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Fabric_PyPOA14.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
voliveirajr/Fabric_PyPOA14
|
manage.py
|
Python
|
gpl-3.0
| 257
|
def maximo(datos):
maximo = 0
for i in datos:
if i > maximo:
maximo = i
return maximo
def minimo(datos):
minimo = 999999
for i in datos:
if i < minimo:
minimo = i
return minimo
def promedio(datos):
suma = 0
for i in datos:
suma+=i
return (suma/len(datos))
#datos = [6,7,7,7,6,6,7]
def moda(datos):
repeticiones = 0
for i in datos:
apariciones = datos.count(i)
if apariciones>repeticiones:
repeticiones = apariciones
modas = []
for i in datos:
apariciones = datos.count(i)
if apariciones == repeticiones and i not in modas:
modas.append(i)
return modas
def DobleDe(n):
return (2*n)
lista = [7,3,4.5,6.2,5.5,5,3,5,4,1.7,6.5,7,7,7]
print("Lista original: ",lista)
print("El maximo es: ",maximo(lista))
print("El minimo es: ",minimo(lista))
print("El promedio es: ",promedio(lista))
print("La moda es: ",moda(lista))
print("El doble de",lista[0],"es: ",DobleDe(lista[0]))
|
xbash/LabUNAB
|
06_funciones/funciones-max-min-moda-promedio.py
|
Python
|
gpl-3.0
| 1,079
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import bmesh
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.utils.sv_bmesh_utils import bmesh_from_pydata
from sverchok.data_structure import (dataCorrect, updateNode,
SvSetSocketAnyType, SvGetSocketAnyType,
Vector_generate)
class SvVolumeNode(bpy.types.Node, SverchCustomTreeNode):
''' Volume '''
bl_idname = 'SvVolumeNode'
bl_label = 'Volume Node'
bl_icon = 'OUTLINER_OB_EMPTY'
def draw_buttons(self, context, layout):
pass
def sv_init(self, context):
self.inputs.new('VerticesSocket', 'Vers', 'Vers')
self.inputs.new('StringsSocket', "Pols", "Pols")
self.outputs.new('StringsSocket', "Volume", "Volume")
def process(self):
if self.outputs['Volume'].is_linked and self.inputs['Vers'].is_linked:
vertices = Vector_generate(dataCorrect(SvGetSocketAnyType(self, self.inputs['Vers'])))
faces = dataCorrect(SvGetSocketAnyType(self, self.inputs['Pols']))
out = []
for verts_obj, faces_obj in zip(vertices, faces):
# this is for one object
bme = bmesh_from_pydata(verts_obj, [], faces_obj)
geom_in = bme.verts[:]+bme.edges[:]+bme.faces[:]
bmesh.ops.recalc_face_normals(bme, faces=bme.faces[:])
# calculation itself
out.append(bme.calc_volume())
bme.clear()
bme.free()
if self.outputs['Volume'].is_linked:
SvSetSocketAnyType(self, 'Volume', out)
'''
solution, that blow my mind, not delete.
i have to investigate it here
def Volume(self, bme):
verts = obj_data.vertices # array of vertices
obj_data.calc_tessface()
faces = obj_data.tessfaces # array of faces
VOLUME = 0; # VOLUME OF THE OBJECT
for f in faces:
fverts = f.vertices # getting face's vertices
ab = verts[fverts[0]].co
ac = verts[fverts[1]].co
ad = verts[fverts[2]].co
# calculating determinator
det = (ab[0]*ac[1]*ad[2]) - (ab[0]*ac[2]*ad[1]) - \
(ab[1]*ac[0]*ad[2]) + (ab[1]*ac[2]*ad[0]) + \
(ab[2]*ac[0]*ad[1]) - (ab[2]*ac[1]*ad[0])
VOLUME += det/6
'''
def register():
bpy.utils.register_class(SvVolumeNode)
def unregister():
bpy.utils.unregister_class(SvVolumeNode)
if __name__ == '__main__':
register()
|
kilon/sverchok
|
nodes/analyzer/volume.py
|
Python
|
gpl-3.0
| 3,399
|
# Copyright (C) 2015 FreeIPa Project Contributors, see 'COPYING' for license.
from __future__ import print_function, absolute_import
import enum
import logging
from ipalib import api
from ipaserver.secrets.kem import IPAKEMKeys, KEMLdap
from ipaserver.secrets.client import CustodiaClient
from ipaplatform.paths import paths
from ipaplatform.constants import constants
from ipaserver.install.service import SimpleServiceInstance
from ipapython import ipautil
from ipapython import ipaldap
from ipapython.certdb import NSSDatabase
from ipaserver.install import ldapupdate
from ipaserver.install import sysupgrade
from base64 import b64decode
from jwcrypto.common import json_decode
import os
import stat
import time
import pwd
logger = logging.getLogger(__name__)
class CustodiaModes(enum.Enum):
# peer must have a CA
CA_PEER = 'Custodia CA peer'
# peer must have a CA, KRA preferred
KRA_PEER = 'Custodia KRA peer'
# any master will do
MASTER_PEER = 'Custodia master peer'
# local instance (first master)
FIRST_MASTER = 'Custodia on first master'
def get_custodia_instance(config, mode):
"""Create Custodia instance
:param config: configuration/installer object
:param mode: CustodiaModes member
:return: CustodiaInstance object
The config object must have the following attribute
*host_name*
FQDN of the new replica/master
*realm_name*
Kerberos realm
*master_host_name* (for *CustodiaModes.MASTER_PEER*)
hostname of a master (may not have a CA)
*ca_host_name* (for *CustodiaModes.CA_PEER*)
hostname of a master with CA
*kra_host_name* (for *CustodiaModes.KRA_PEER*)
hostname of a master with KRA or CA
For replicas, the instance will upload new keys and retrieve secrets
to the same host. Therefore it uses *ca_host_name* instead of
*master_host_name* to create a replica with CA.
"""
assert isinstance(mode, CustodiaModes)
logger.debug(
"Custodia client for '%r' with promotion %s.",
mode, 'yes' if mode != CustodiaModes.FIRST_MASTER else 'no'
)
if mode == CustodiaModes.CA_PEER:
# In case we install replica with CA, prefer CA host as source for
# all Custodia secret material.
custodia_peer = config.ca_host_name
elif mode == CustodiaModes.KRA_PEER:
custodia_peer = config.kra_host_name
elif mode == CustodiaModes.MASTER_PEER:
custodia_peer = config.master_host_name
elif mode == CustodiaModes.FIRST_MASTER:
custodia_peer = None
else:
raise RuntimeError("Unknown custodia mode %s" % mode)
if custodia_peer is None:
# use ldapi with local dirsrv instance
logger.debug("Custodia uses LDAPI.")
else:
logger.info("Custodia uses '%s' as master peer.", custodia_peer)
return CustodiaInstance(
host_name=config.host_name,
realm=config.realm_name,
custodia_peer=custodia_peer
)
class CustodiaInstance(SimpleServiceInstance):
def __init__(self, host_name=None, realm=None, custodia_peer=None):
super(CustodiaInstance, self).__init__("ipa-custodia")
self.config_file = paths.IPA_CUSTODIA_CONF
self.server_keys = paths.IPA_CUSTODIA_KEYS
self.custodia_peer = custodia_peer
self.fqdn = host_name
self.realm = realm
@property
def ldap_uri(self):
if self.custodia_peer is None:
return ipaldap.realm_to_ldapi_uri(self.realm)
else:
return "ldap://{}".format(self.custodia_peer)
def __config_file(self):
template_file = os.path.basename(self.config_file) + '.template'
template = os.path.join(paths.USR_SHARE_IPA_DIR, template_file)
httpd_info = pwd.getpwnam(constants.HTTPD_USER)
sub_dict = dict(
IPA_CUSTODIA_CONF_DIR=paths.IPA_CUSTODIA_CONF_DIR,
IPA_CUSTODIA_KEYS=paths.IPA_CUSTODIA_KEYS,
IPA_CUSTODIA_SOCKET=paths.IPA_CUSTODIA_SOCKET,
IPA_CUSTODIA_AUDIT_LOG=paths.IPA_CUSTODIA_AUDIT_LOG,
LDAP_URI=ipaldap.realm_to_ldapi_uri(self.realm),
UID=httpd_info.pw_uid,
GID=httpd_info.pw_gid
)
conf = ipautil.template_file(template, sub_dict)
with open(self.config_file, "w") as f:
f.write(conf)
ipautil.flush_sync(f)
def create_instance(self):
if self.ldap_uri.startswith('ldapi://'):
# local case, ensure container exists
self.step("Making sure custodia container exists",
self.__create_container)
self.step("Generating ipa-custodia config file", self.__config_file)
self.step("Generating ipa-custodia keys", self.__gen_keys)
super(CustodiaInstance, self).create_instance(
gensvc_name='KEYS',
fqdn=self.fqdn,
ldap_suffix=ipautil.realm_to_suffix(self.realm),
realm=self.realm
)
sysupgrade.set_upgrade_state('custodia', 'installed', True)
def uninstall(self):
super(CustodiaInstance, self).uninstall()
keystore = IPAKEMKeys({
'server_keys': self.server_keys,
'ldap_uri': self.ldap_uri
})
keystore.remove_server_keys_file()
ipautil.remove_file(self.config_file)
sysupgrade.set_upgrade_state('custodia', 'installed', False)
def __gen_keys(self):
keystore = IPAKEMKeys({
'server_keys': self.server_keys,
'ldap_uri': self.ldap_uri
})
keystore.generate_server_keys()
def upgrade_instance(self):
installed = sysupgrade.get_upgrade_state("custodia", "installed")
if installed:
if (not os.path.isfile(self.server_keys)
or not os.path.isfile(self.config_file)):
logger.warning(
"Custodia server keys or config are missing, forcing "
"reinstallation of ipa-custodia."
)
installed = False
if not installed:
logger.info("Custodia service is being configured")
self.create_instance()
else:
old_config = open(self.config_file).read()
self.__config_file()
new_config = open(self.config_file).read()
if new_config != old_config:
logger.info("Restarting Custodia")
self.restart()
mode = os.stat(self.server_keys).st_mode
if stat.S_IMODE(mode) != 0o600:
logger.info("Secure server.keys mode")
os.chmod(self.server_keys, 0o600)
def __create_container(self):
"""
Runs the custodia update file to ensure custodia container is present.
"""
sub_dict = {
'SUFFIX': self.suffix,
}
updater = ldapupdate.LDAPUpdate(sub_dict=sub_dict)
updater.update([os.path.join(paths.UPDATES_DIR, '73-custodia.update')])
def import_ra_key(self):
cli = self._get_custodia_client()
# please note that ipaCert part has to stay here for historical
# reasons (old servers expect you to ask for ra/ipaCert during
# replication as they store the RA agent cert in an NSS database
# with this nickname)
cli.fetch_key('ra/ipaCert')
def import_dm_password(self):
cli = self._get_custodia_client()
cli.fetch_key('dm/DMHash')
def _wait_keys(self):
timeout = api.env.replication_wait_timeout
deadline = int(time.time()) + timeout
logger.debug("Waiting up to %s seconds to see our keys "
"appear on host %s", timeout, self.ldap_uri)
konn = KEMLdap(self.ldap_uri)
saved_e = None
while True:
try:
return konn.check_host_keys(self.fqdn)
except Exception as e:
# Print message to console only once for first error.
if saved_e is None:
# FIXME: Change once there's better way to show this
# message in installer output,
print(
" Waiting for keys to appear on host: {}, please "
"wait until this has completed.".format(
self.ldap_uri)
)
# log only once for the same error
if not isinstance(e, type(saved_e)):
logger.debug(
"Transient error getting keys: '%s'", e)
saved_e = e
if int(time.time()) > deadline:
raise RuntimeError("Timed out trying to obtain keys.")
time.sleep(1)
def _get_custodia_client(self):
if self.custodia_peer is None:
raise ValueError("Can't replicate secrets without Custodia peer")
# Before we attempt to fetch keys from this host, make sure our public
# keys have been replicated there.
self._wait_keys()
return CustodiaClient(
client_service='host@{}'.format(self.fqdn),
keyfile=self.server_keys, keytab=paths.KRB5_KEYTAB,
server=self.custodia_peer, realm=self.realm
)
def _get_keys(self, cacerts_file, cacerts_pwd, data):
# Fetch all needed certs one by one, then combine them in a single
# PKCS12 file
prefix = data['prefix']
certlist = data['list']
cli = self._get_custodia_client()
with NSSDatabase(None) as tmpdb:
tmpdb.create_db()
# Cert file password
crtpwfile = os.path.join(tmpdb.secdir, 'crtpwfile')
with open(crtpwfile, 'w+') as f:
f.write(cacerts_pwd)
for nickname in certlist:
value = cli.fetch_key(os.path.join(prefix, nickname), False)
v = json_decode(value)
pk12pwfile = os.path.join(tmpdb.secdir, 'pk12pwfile')
with open(pk12pwfile, 'w+') as f:
f.write(v['export password'])
pk12file = os.path.join(tmpdb.secdir, 'pk12file')
with open(pk12file, 'wb') as f:
f.write(b64decode(v['pkcs12 data']))
tmpdb.run_pk12util([
'-k', tmpdb.pwd_file,
'-n', nickname,
'-i', pk12file,
'-w', pk12pwfile
])
# Add CA certificates
self.export_ca_certs_nssdb(tmpdb, True)
# Now that we gathered all certs, re-export
ipautil.run([
paths.PKCS12EXPORT,
'-d', tmpdb.secdir,
'-p', tmpdb.pwd_file,
'-w', crtpwfile,
'-o', cacerts_file
])
def get_ca_keys(self, cacerts_file, cacerts_pwd):
certlist = ['caSigningCert cert-pki-ca',
'ocspSigningCert cert-pki-ca',
'auditSigningCert cert-pki-ca',
'subsystemCert cert-pki-ca']
data = {'prefix': 'ca',
'list': certlist}
self._get_keys(cacerts_file, cacerts_pwd, data)
def get_kra_keys(self, cacerts_file, cacerts_pwd):
certlist = ['auditSigningCert cert-pki-kra',
'storageCert cert-pki-kra',
'subsystemCert cert-pki-ca',
'transportCert cert-pki-kra']
data = {'prefix': 'ca',
'list': certlist}
self._get_keys(cacerts_file, cacerts_pwd, data)
def __start(self):
super(CustodiaInstance, self).__start()
def __enable(self):
super(CustodiaInstance, self).__enable()
|
encukou/freeipa
|
ipaserver/install/custodiainstance.py
|
Python
|
gpl-3.0
| 11,748
|
"""
Dragon 64 config
================
:created: 2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2014-2015 by the DragonPy team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import logging
from dragonpy import constants
from dragonpy.Dragon32.config import Dragon32Cfg
from dragonpy.Dragon64.Dragon64_rom import Dragon64RomIC17, Dragon64RomIC18
from dragonpy.Dragon64.mem_info import get_dragon_meminfo
class Dragon64Cfg(Dragon32Cfg):
CONFIG_NAME = constants.DRAGON64
MACHINE_NAME = "Dragon 64"
RAM_START = 0x0000
# 1KB RAM is not runnable and raise a error
# 2-8 KB - BASIC Interpreter will be initialized. But every
# statement will end with a OM ERROR (Out of Memory)
# 16 KB - Is usable
# RAM_END = 0x03FF # 1KB
# RAM_END = 0x07FF # 2KB # BASIC will always raise a OM ERROR!
# RAM_END = 0x0FFF # 4KB # BASIC will always raise a OM ERROR!
# RAM_END = 0x1FFF # 8KB # BASIC will always raise a OM ERROR!
# RAM_END = 0x3FFF # 16KB # usable
RAM_END = 0x7FFF # 32KB
ROM_START = 0x8000
ROM_END = 0xFFFF
# ROM size: 0x8000 == 32768 Bytes
"""
$8000-$bfff - d64_ic17.rom - size: $3fff (dez.: 16383) Bytes
$c000-$ffff - d64_ic18.rom - size: $3fff (dez.: 16383) Bytes
"""
DEFAULT_ROMS = (
Dragon64RomIC17(address=0x8000, max_size=0x4000),
Dragon64RomIC18(address=0xC000, max_size=0x4000),
)
def __init__(self, cmd_args):
super().__init__(cmd_args)
if self.verbosity <= logging.ERROR:
self.mem_info = get_dragon_meminfo()
self.periphery_class = None # Dragon32Periphery
def get_initial_RAM(self):
"""
init the Dragon RAM
See: http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=5&t=4444
"""
mem_FF = [0xff for _ in range(4)]
mem_00 = [0x00 for _ in range(4)]
mem = []
for _ in range(self.RAM_SIZE // 8):
mem += mem_FF
mem += mem_00
return mem
config = Dragon64Cfg
|
jedie/DragonPy
|
dragonpy/Dragon64/config.py
|
Python
|
gpl-3.0
| 2,105
|
import numpy as np
from stcad.source_dev.utilities import *
from stcad.source_dev.objects import *
from stcad.source_dev.chip import *
import gdsCAD as cad
from stcad.source_dev.drums import *
chipsize = 10000
chip = Base_Chip('TEST', chipsize, chipsize,label=False)
# --------------------------------------------- Array -----------------------------------
start_pos = [[100,0]]
# start_pos = [[-4000,4000]]
array_separation = 300
array_indicators = ["A","B","C","D"]
for i in range(len(start_pos)):
array_indicator = array_indicators[i]
# Array = simple_drum_Array(drum_sizes=drum_sizes,drum_gaps=drum_gaps,tether_widths=tether_widths, separation=100)
# for i in range(len(Array.get_dependencies())):
# chip.add(Array.get_dependencies()[i]._objects)
position = start_pos[i]
drum_sizes = [1,2,4,10,15]
drum_gaps = [2,4,10, 30]
tether_widths = [0.5,2,4,10,15]
board_array = diving_board_Array(drum_sizes, drum_gaps, tether_widths, separation = 50)
board_array.translate(position=position)
board_array.add_to_chip(Base_Chip=chip)
position = [board_array._bounding_box[1,0]+array_separation,start_pos[i][1]]
drum_sizes = [20, 10, 5, 3]
tether_widths = [0.5, 1, 2, 4]
numbers_of_tethers = [5,6,7]
circ_array1 = circ_gap_drum_Array(drum_sizes=drum_sizes,tether_widths=tether_widths,numbers_of_tethers=numbers_of_tethers,array_indicator=array_indicators[i])
circ_array1.translate(position=position)
# Array1.add_labl()
print(circ_array1._bounding_box)
circ_array1.add_to_chip(Base_Chip=chip)
position = [board_array._bounding_box[0,0], board_array._bounding_box[0,1]-array_separation]
drum1 = circ_gap_drum(drum_size=20,tether_width=1,number_of_tethers=7)
circuit_drum1 = circuit_drum(drum=drum1, oversize = 80, lead_length = 100, lead_width = 70, cut_out_height = 150, cut_out_width = 150)
circuit_drum1.translate(position=position)
circuit_drum1.add_to_chip(Base_Chip=chip)
position = [circuit_drum1._bounding_box[0,0], circuit_drum1._bounding_box[0,1]-array_separation]
drum2 = circ_gap_drum(drum_size=20,tether_width=2,number_of_tethers=7)
circuit_drum2 = circuit_drum(drum=drum2, oversize = 80, lead_length = 100, lead_width = 70, cut_out_height = 150, cut_out_width = 150)
circuit_drum2.translate(position=position)
circuit_drum2.add_to_chip(Base_Chip=chip)
position = [circuit_drum1._bounding_box[1,0]+array_separation, circuit_drum1._bounding_box[1,1]]
circ_array2 = circ_gap_drum_Array(drum_sizes=drum_sizes,tether_widths=tether_widths,numbers_of_tethers=numbers_of_tethers,array_indicator=array_indicators[i])
circ_array2.translate(position=position)
# Array1.add_labl()
print(circ_array2._bounding_box)
circ_array2.add_to_chip(Base_Chip=chip)
position = [circ_array2._bounding_box[1,0]+array_separation, circ_array2._bounding_box[1,1]-array_separation]
drum3 = circ_gap_drum(drum_size=20,tether_width=3,number_of_tethers=7)
circuit_drum3 = circuit_drum(drum=drum3, oversize = 80, lead_length = 100, lead_width = 50, cut_out_height = 150, cut_out_width = 150)
circuit_drum3.translate(position=position)
circuit_drum3.add_to_chip(Base_Chip=chip)
position = [circuit_drum3._bounding_box[0,0], circuit_drum3._bounding_box[0,1]-array_separation]
drum4 = circ_gap_drum(drum_size=20,tether_width=4,number_of_tethers=7)
circuit_drum4 = circuit_drum(drum=drum4, oversize = 80, lead_length = 100, lead_width = 50, cut_out_height = 150, cut_out_width = 150)
circuit_drum4.translate(position=position)
circuit_drum4.add_to_chip(Base_Chip=chip)
position = [circuit_drum3._bounding_box[1,0]+array_separation, circuit_drum3._bounding_box[1,1]]
drum5 = circ_gap_drum(drum_size=10,tether_width=2,number_of_tethers=7)
circuit_drum5 = circuit_drum(drum=drum5, oversize = 80, lead_length = 100, lead_width = 40, cut_out_height = 150, cut_out_width = 150)
circuit_drum5.translate(position=position)
circuit_drum5.add_to_chip(Base_Chip=chip)
position = [circuit_drum5._bounding_box[0,0], circuit_drum5._bounding_box[0,1]-array_separation]
drum6 = circ_gap_drum(drum_size=10,tether_width=2,number_of_tethers=8)
circuit_drum6 = circuit_drum(drum=drum6, oversize = 80, lead_length = 100, lead_width = 40, cut_out_height = 150, cut_out_width = 150)
circuit_drum6.translate(position=position)
circuit_drum6.add_to_chip(Base_Chip=chip)
position = [circuit_drum5._bounding_box[1,0]+array_separation, -1000]
drum_sizes = [1,2,4,10,15]
drum_gaps = [2,4,10, 30]
tether_widths = [0.5,2,4,10,15]
board_array2 = diving_board_Array(drum_sizes, drum_gaps, tether_widths, separation = 50)
board_array2.translate(position=position)
board_array2.add_to_chip(Base_Chip=chip)
position = [circ_array1._bounding_box[1,0]+array_separation+500, 0]
drum7 = circ_gap_drum(drum_size=30,tether_width=4,number_of_tethers=10)
circuit_drum7 = circuit_drum(drum=drum7, oversize = 80, lead_length = 150, lead_width = 100, cut_out_height = 150, cut_out_width = 150)
circuit_drum7.translate(position=position)
circuit_drum7.add_to_chip(Base_Chip=chip)
position = [circuit_drum7._bounding_box[0,0], circuit_drum7._bounding_box[0,1]-array_separation]
drum8 = circ_gap_drum(drum_size=10,tether_width=4,number_of_tethers=10)
circuit_drum8 = circuit_drum(drum=drum8, oversize = 80, lead_length = 150, lead_width = 50, cut_out_height = 150, cut_out_width = 150)
circuit_drum8.translate(position=position)
circuit_drum8.add_to_chip(Base_Chip=chip)
position = [circuit_drum7._bounding_box[1,0]+array_separation, circuit_drum7._bounding_box[1,1]]
drum9 = circ_gap_drum(drum_size=10,tether_width=2,number_of_tethers=10)
circuit_drum9 = circuit_drum(drum=drum9, oversize = 80, lead_length = 150, lead_width = 50, cut_out_height = 150, cut_out_width = 150)
circuit_drum9.translate(position=position)
circuit_drum9.add_to_chip(Base_Chip=chip)
position = [circuit_drum9._bounding_box[0,0], circuit_drum9._bounding_box[0,1]-array_separation]
drum10 = circ_gap_drum(drum_size=10,tether_width=2,number_of_tethers=5)
circuit_drum10 = circuit_drum(drum=drum10, oversize = 80, lead_length = 150, lead_width = 50, cut_out_height = 150, cut_out_width = 150)
circuit_drum10.translate(position=position)
circuit_drum10.add_to_chip(Base_Chip=chip)
points_square = [[0,0], [1000,0],[1000,-1000],[0,-1000],[0,0]]
square = cad.core.Boundary(points_square)
square.translate([1500,-1500])
chip.add(square)
chip.save_to_gds(show=True, save=True,loc='')
|
feschmidt/stcad
|
testing_scripts/rick_drum_array2.py
|
Python
|
gpl-3.0
| 6,388
|
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, oth):
""" Returns a new point containing the addition of the coordinates of a & b """
return Point(self.x+oth.x,self.y+oth.y)
def __iadd__(self, oth):
self.x += oth.x
self.y += oth.y
def __sub__(self, i):
return Point(self.x-i.x, self.y-i.y)
def __mul__(self, i):
if isinstance(i, Point):
return Point(self.x*i.x, self.y*i.y)
else:
return Point(self.x*i, self.y*i)
def __truediv__(self, i):
if isinstance(i, Point):
return Point(self.x/i.x, self.y/i.y)
else:
return Point(self.x/i, self.y/i)
def __floordiv__(self, i):
if isinstance(i, Point):
return Point(self.x//i.x, self.y//i.y)
else:
return Point(self.x//i, self.y//i)
|
TheUnderscores/time-robot
|
src/point.py
|
Python
|
gpl-3.0
| 913
|
import os
from PyQt4 import QtCore, QtGui
import common
class FileList(QtGui.QListWidget):
files_dropped = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(FileList, self).__init__(parent)
self.setAcceptDrops(True)
self.setIconSize(QtCore.QSize(32, 32))
# drag and drop label
self.drop_label = QtGui.QLabel(QtCore.QString('Drag and drop\nfiles here'), parent=self)
self.drop_label.setAlignment(QtCore.Qt.AlignCenter)
self.drop_label.setStyleSheet('background: url({0}/drop_files.png) no-repeat center center; color: #999999;'.format(common.supercipher_gui_dir))
self.drop_label.hide()
self.filenames = []
self.update()
def update(self):
# file list should have a background image if empty
if len(self.filenames) == 0:
self.drop_label.show()
else:
self.drop_label.hide()
def resizeEvent(self, event):
self.drop_label.setGeometry(0, 0, self.width(), self.height())
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
for url in event.mimeData().urls():
filename = str(url.toLocalFile())
self.add_file(filename)
else:
event.ignore()
self.files_dropped.emit()
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
basename = os.path.basename(filename)
fileinfo = QtCore.QFileInfo(filename)
ip = QtGui.QFileIconProvider()
icon = ip.icon(fileinfo)
if os.path.isfile(filename):
size = self.human_readable_filesize(fileinfo.size())
item = QtGui.QListWidgetItem('{0} ({1})'.format(basename, size))
item.setToolTip(QtCore.QString(size))
else:
item = QtGui.QListWidgetItem(basename)
item.setIcon(icon)
self.addItem(item)
def human_readable_filesize(self, b):
thresh = 1024.0
if b < thresh:
return '{0} B'.format(b)
units = ['KiB','MiB','GiB','TiB','PiB','EiB','ZiB','YiB']
u = 0
b /= thresh
while b >= thresh:
b /= thresh
u += 1
return '{0} {1}'.format(round(b, 1), units[u])
class FileSelection(QtGui.QVBoxLayout):
def __init__(self):
super(FileSelection, self).__init__()
# file list
self.file_list = FileList()
self.file_list.currentItemChanged.connect(self.update)
self.file_list.files_dropped.connect(self.update)
# buttons
self.add_files_button = QtGui.QPushButton('Add Files')
self.add_files_button.clicked.connect(self.add_files)
self.add_dir_button = QtGui.QPushButton('Add Folder')
self.add_dir_button.clicked.connect(self.add_dir)
self.delete_button = QtGui.QPushButton('Delete')
self.delete_button.clicked.connect(self.delete_file)
button_layout = QtGui.QHBoxLayout()
button_layout.addWidget(self.add_files_button)
button_layout.addWidget(self.add_dir_button)
button_layout.addWidget(self.delete_button)
# add the widgets
self.addWidget(self.file_list)
self.addLayout(button_layout)
self.update()
def update(self):
# delete button should be disabled if item isn't selected
current_item = self.file_list.currentItem()
if not current_item:
self.delete_button.setEnabled(False)
else:
self.delete_button.setEnabled(True)
# update the file list
self.file_list.update()
def add_files(self):
filenames = QtGui.QFileDialog.getOpenFileNames(caption='Choose files', options=QtGui.QFileDialog.ReadOnly)
if filenames:
for filename in filenames:
self.file_list.add_file(str(filename))
self.update()
def add_dir(self):
filename = QtGui.QFileDialog.getExistingDirectory(caption='Choose folder', options=QtGui.QFileDialog.ReadOnly)
if filename:
self.file_list.add_file(str(filename))
self.update()
def delete_file(self):
current_row = self.file_list.currentRow()
self.file_list.filenames.pop(current_row)
self.file_list.takeItem(current_row)
self.update()
|
micahflee/supercipher
|
supercipher_gui/file_selection.py
|
Python
|
gpl-3.0
| 4,842
|
# -*- coding: utf-8 -*-
from flask import Flask
app = Flask(__name__)
app.config.from_object('config')
from app import views
#app.debug = config.DEBUG
#app.secret_key = config.SECRET_KEY
#app.csrf_enabled = config.CSRF_ENABLED
|
OpenGrow/OpenGrow
|
app/__init__.py
|
Python
|
gpl-3.0
| 230
|
# Copyright (C) 2011-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Additional tests for the hold chain."""
from __future__ import absolute_import, unicode_literals
__metaclass__ = type
__all__ = [
]
import unittest
from zope.component import getUtility
from mailman.app.lifecycle import create_list
from mailman.chains.hold import autorespond_to_sender
from mailman.interfaces.autorespond import IAutoResponseSet, Response
from mailman.interfaces.usermanager import IUserManager
from mailman.testing.helpers import configuration, get_queue_messages
from mailman.testing.layers import ConfigLayer
class TestAutorespond(unittest.TestCase):
"""Test autorespond_to_sender()"""
layer = ConfigLayer
def setUp(self):
self._mlist = create_list('test@example.com')
# Python 2.7 has assertMultiLineEqual. Let this work without bounds.
self.maxDiff = None
self.eq = getattr(self, 'assertMultiLineEqual', self.assertEqual)
@configuration('mta', max_autoresponses_per_day=1)
def test_max_autoresponses_per_day(self):
# The last one we sent was the last one we should send today. Instead
# of sending an automatic response, send them the "no more today"
# message. Start by simulating a response having been sent to an
# address already.
anne = getUtility(IUserManager).create_address('anne@example.com')
response_set = IAutoResponseSet(self._mlist)
response_set.response_sent(anne, Response.hold)
# Trigger the sending of a "last response for today" using the default
# language (i.e. the mailing list's preferred language).
autorespond_to_sender(self._mlist, 'anne@example.com')
# So first, there should be one more hold response sent to the user.
self.assertEqual(response_set.todays_count(anne, Response.hold), 2)
# And the virgin queue should have the message in it.
messages = get_queue_messages('virgin')
self.assertEqual(len(messages), 1)
# Remove the variable headers.
message = messages[0].msg
self.assertTrue('message-id' in message)
del message['message-id']
self.assertTrue('date' in message)
del message['date']
self.eq(messages[0].msg.as_string(), """\
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Subject: Last autoresponse notification for today
From: test-owner@example.com
To: anne@example.com
Precedence: bulk
We have received a message from your address <anne@example.com>
requesting an automated response from the test@example.com mailing
list.
The number we have seen today: 1. In order to avoid problems such as
mail loops between email robots, we will not be sending you any
further responses today. Please try again tomorrow.
If you believe this message is in error, or if you have any questions,
please contact the list owner at test-owner@example.com.""")
|
hcs/mailman
|
src/mailman/chains/tests/test_hold.py
|
Python
|
gpl-3.0
| 3,645
|
from flask.ext.assets import Environment, Bundle
def register_assets(app):
assets = Environment(app)
ALL_ASSETS = {
'core_js': Bundle('js/lib/jquery.js',
'js/lib/jquery.highlight.js',
'js/lib/jquery.mobile.js',
'js/lib/underscore.js',
'js/lib/backbone.js',
'js/lib/inifiniScroll.js',
'js/lib/moment.js',
'js/model/modelsForEvents.js',
'js/utils.js',
'js/view/PageView.js',
'js/view/ListView.js',
'js/add_favorites.js',
'js/routes.js',
filters='rjsmin', output='core_%(version)s.js'),
'home_js': Bundle('js/home.js',
filters='rjsmin', output='home_%(version)s.js'),
'goingon_js': Bundle('js/goingon.js',
filters='rjsmin', output='goingon_%(version)s.js'),
'events_js': Bundle('js/events.js',
filters='rjsmin', output='events_%(version)s.js'),
'favorites_js': Bundle('js/favorites.js',
filters='rjsmin', output='favorites_%(version)ss.js'),
'history_js': Bundle('js/history.js',
filters='rjsmin', output='history_%(version)s.js'),
'search_js': Bundle('js/search.js',
filters='rjsmin', output='search_%(version)s.js'),
'statistics_js': Bundle('js/statistics.js',
filters='rjsmin', output='statistics_%(version)s.js'),
'maps_js': Bundle('js/lib/gmaps.js',
'js/maps.js',
filters='rjsmin', output='maps_%(version)s.js'),
'style_css': Bundle('style/jquery.mobile.css',
'style/icons.css',
'style/core.css',
filters='cssmin', output='style/style_%(version)s.css')
}
assets.debug = app.config["DEBUG"]
for bundle_id, bundle in ALL_ASSETS.iteritems():
assets.register(bundle_id, bundle)
return assets
|
indico/indico-mobile
|
indicomobile/views/assets.py
|
Python
|
gpl-3.0
| 2,265
|
#!/usr/bin/python3
days_in_months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
days_in_months_leap_year = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
months_names = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
name_to_days_num = {}
for i in range(12):
name_to_days_num[months_names[i]] = days_in_months[i]
def get_num_of_days_in_month(month_name):
''' Given a month name,
return the number of days in this month'''
if month_name in name_to_days_num:
return name_to_days_num[months_name]
else:
print('No such month')
def get_following_month(month_name):
''' Given a month name,
return the name of the following month'''
if month_name in name_to_days_num:
i = months_names.index(month_name)
return months_names[(i + 1) % 12]
else:
print('No such month')
def is_leap_year(year):
''' Return True if the year is a leap year, False otherwise'''
return year % 4 == 0 and year % 1000 != 0
class Calendar:
''' A callender of events, has an entry for every event,
which is a mapping from event name to Date o bject'''
def __init__(self):
self.events = {}
def add_event(self, name, date):
''' Add a new entry to the callender'''
self.events[name] = date
def is_event(self, date):
''' Check if the given date appears in the callender'''
return date in self.events.values()
def get_date(self, name):
''' Return the date of the given event name'''
return self.events[name]
def get_all_events_in_month(self, month):
''' Return a dictionary with all the events in the given month
month is the number of the month '''
month_events = {}
for name in self.events.keys():
if self.events[name].month == month:
month_events[name] = self.events[name]
return month_events
class Date:
def __init__(self, day, month, year):
if not isinstance(day, int) or not isinstance(month, int) or not isinstance(year, int):
print('Date must be initialized with numbers')
return
if month < 1 or month > 12:
print('Month must be between 1 and 12')
return
if is_leap_year(year):
if day < 0 or day > days_in_months_leap_year[month - 1]:
print('Day must be between 1 and ', days_in_months[month - 1])
return
else:
if day < 0 or day > days_in_months[month - 1]:
print('Day must be between 1 and ', days_in_months[month - 1])
return
self.day = day
self.month = month
self.year = year
def __gt__(self, other):
''' Overloading operator>for dates '''
if self.year > other.year:
return True
elif self.year == other.year:
if self.month > other.month:
return True
elif self.month == other.month:
if self.day > other.day:
return True
return False
def __lt__(self, other):
''' Overloading operator<for dates '''
return other > self
def __eq__(self, other):
''' Overloading operator==for dates '''
return self.year == other.year and self.month == other.month and self.day == other.day
def __ne__(self, other):
''' Overloading operator!=for dates '''
return not (self == other)
def __le__(self, other):
''' Overloading operator<=for dates '''
return self < other or self == other
def __ge__(self, other):
''' Overloading operator>=for dates '''
return self > other or self == other
def __str__(self):
return str(self.day) + '.' + str(self.month) + '.' + str(self.year)
|
nonZero/demos-python
|
src/exercises/basic/date/date9.py
|
Python
|
gpl-3.0
| 3,920
|
"""
Module contains tools for processing files into DataFrames or other objects
"""
from __future__ import print_function
from pandas.compat import range, lrange, StringIO, lzip, zip, string_types, map
from pandas import compat
import re
import csv
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas.core.frame import DataFrame
import datetime
import pandas.core.common as com
from pandas.core.config import get_option
from pandas.io.date_converters import generic_parser
from pandas.io.common import get_filepath_or_buffer
from pandas.tseries import tools
from pandas.util.decorators import Appender
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.parser as _parser
_parser_params = """Also supports optionally iterating or breaking of the file
into chunks.
Parameters
----------
filepath_or_buffer : string or file handle / StringIO. The string could be
a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a
host is expected. For instance, a local file could be
file ://localhost/path/to/table.csv
%s
lineterminator : string (length 1), default None
Character to break file into lines. Only valid with C parser
quotechar : string (length 1)
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default None
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
Default (None) results in QUOTE_MINIMAL behavior.
skipinitialspace : boolean, default False
Skip spaces after delimiter
escapechar : string
dtype : Type name or dict of column -> type
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
compression : {'gzip', 'bz2', None}, default None
For on-the-fly decompression of on-disk data
dialect : string or csv.Dialect instance, default None
If None defaults to Excel dialect. Ignored if sep longer than 1 char
See csv.Dialect documentation for more details
header : int row number(s) to use as the column names, and the start of the
data. Defaults to 0 if no ``names`` passed, otherwise ``None``. Explicitly
pass ``header=0`` to be able to replace existing names. The header can be
a list of integers that specify row locations for a multi-index on the
columns E.g. [0,1,3]. Intervening rows that are not specified will be
skipped. (E.g. 2 in this example are skipped)
skiprows : list-like or integer
Row numbers to skip (0-indexed) or number of rows to skip (int)
at the start of the file
index_col : int or sequence or False, default None
Column to use as the row labels of the DataFrame. If a sequence is given, a
MultiIndex is used. If you have a malformed file with delimiters at the end
of each line, you might consider index_col=False to force pandas to _not_
use the first column as the index (row names)
names : array-like
List of column names to use. If file contains no header row, then you
should explicitly pass header=None
prefix : string or None (default)
Prefix to add to column numbers when no header, e.g 'X' for X0, X1, ...
na_values : list-like or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values
true_values : list
Values to consider as True
false_values : list
Values to consider as False
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
parse_dates : boolean, list of ints or names, list of lists, or dict
If True -> try parsing the index.
If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column.
If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column.
{'foo' : [1, 3]} -> parse columns 1, 3 as date and call result 'foo'
A fast-path exists for iso8601-formatted dates.
keep_date_col : boolean, default False
If True and parse_dates specifies combining multiple columns then
keep the original columns.
date_parser : function
Function to use for converting a sequence of string columns to an
array of datetime instances. The default uses dateutil.parser.parser
to do the conversion.
dayfirst : boolean, default False
DD/MM format dates, international and European format
thousands : str, default None
Thousands separator
comment : str, default None
Indicates remainder of line should not be parsed
Does not support line commenting (will return empty line)
decimal : str, default '.'
Character to recognize as decimal point. E.g. use ',' for European data
nrows : int, default None
Number of rows of file to read. Useful for reading pieces of large files
iterator : boolean, default False
Return TextFileReader object
chunksize : int, default None
Return TextFileReader object for iteration
skipfooter : int, default 0
Number of line at bottom of file to skip
converters : dict. optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
delimiter : string, default None
Alternative argument name for sep. Regular expressions are accepted.
encoding : string, default None
Encoding to use for UTF when reading/writing (ex. 'utf-8')
squeeze : boolean, default False
If the parsed data only contains one column then return a Series
na_filter: boolean, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file
usecols : array-like
Return a subset of the columns.
Results in much faster parsing time and lower memory usage.
mangle_dupe_cols: boolean, default True
Duplicate columns will be specified as 'X.0'...'X.N', rather than 'X'...'X'
tupleize_cols: boolean, default False
Leave a list of tuples on columns as is (default is to convert to
a Multi Index on the columns)
error_bad_lines: boolean, default True
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
returned. (Only valid with C parser).
warn_bad_lines: boolean, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output. (Only valid with C parser).
infer_datetime_format : boolean, default False
If True and parse_dates is enabled for a column, attempt to infer
the datetime format to speed up the processing
Returns
-------
result : DataFrame or TextParser
"""
_csv_sep = """sep : string, default ','
Delimiter to use. If sep is None, will try to automatically determine
this. Regular expressions are accepted.
"""
_table_sep = """sep : string, default \\t (tab-stop)
Delimiter to use. Regular expressions are accepted."""
_read_csv_doc = """
Read CSV (comma-separated) file into DataFrame
%s
""" % (_parser_params % _csv_sep)
_read_table_doc = """
Read general delimited file into DataFrame
%s
""" % (_parser_params % _table_sep)
_fwf_widths = """\
colspecs : list of pairs (int, int) or 'infer'. optional
A list of pairs (tuples) giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data (default='infer').
widths : list of ints. optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
"""
_read_fwf_doc = """
Read a table of fixed-width formatted lines into DataFrame
%s
Also, 'delimiter' is used to specify the filler character of the
fields if it is not spaces (e.g., '~').
""" % (_parser_params % _fwf_widths)
def _read(filepath_or_buffer, kwds):
"Generic reader of line files."
encoding = kwds.get('encoding', None)
skipfooter = kwds.pop('skipfooter', None)
if skipfooter is not None:
kwds['skip_footer'] = skipfooter
filepath_or_buffer, _ = get_filepath_or_buffer(filepath_or_buffer,
encoding)
if kwds.get('date_parser', None) is not None:
if isinstance(kwds['parse_dates'], bool):
kwds['parse_dates'] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get('iterator', False)
nrows = kwds.pop('nrows', None)
chunksize = kwds.get('chunksize', None)
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if nrows is not None:
return parser.read(nrows)
elif chunksize or iterator:
return parser
return parser.read()
_parser_defaults = {
'delimiter': None,
'doublequote': True,
'escapechar': None,
'quotechar': '"',
'quoting': csv.QUOTE_MINIMAL,
'skipinitialspace': False,
'lineterminator': None,
'header': 'infer',
'index_col': None,
'names': None,
'prefix': None,
'skiprows': None,
'na_values': None,
'true_values': None,
'false_values': None,
'skip_footer': 0,
'converters': None,
'keep_default_na': True,
'thousands': None,
'comment': None,
# 'engine': 'c',
'parse_dates': False,
'keep_date_col': False,
'dayfirst': False,
'date_parser': None,
'usecols': None,
# 'nrows': None,
# 'iterator': False,
'chunksize': None,
'verbose': False,
'encoding': None,
'squeeze': False,
'compression': None,
'mangle_dupe_cols': True,
'tupleize_cols': False,
'infer_datetime_format': False,
}
_c_parser_defaults = {
'delim_whitespace': False,
'as_recarray': False,
'na_filter': True,
'compact_ints': False,
'use_unsigned': False,
'low_memory': True,
'memory_map': False,
'buffer_lines': None,
'error_bad_lines': True,
'warn_bad_lines': True,
'dtype': None,
'decimal': b'.'
}
_fwf_defaults = {
'colspecs': 'infer',
'widths': None,
}
_c_unsupported = set(['skip_footer'])
_python_unsupported = set(_c_parser_defaults.keys())
def _make_parser_function(name, sep=','):
def parser_f(filepath_or_buffer,
sep=sep,
dialect=None,
compression=None,
doublequote=True,
escapechar=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
skipinitialspace=False,
lineterminator=None,
header='infer',
index_col=None,
names=None,
prefix=None,
skiprows=None,
skipfooter=None,
skip_footer=0,
na_values=None,
na_fvalues=None,
true_values=None,
false_values=None,
delimiter=None,
converters=None,
dtype=None,
usecols=None,
engine='c',
delim_whitespace=False,
as_recarray=False,
na_filter=True,
compact_ints=False,
use_unsigned=False,
low_memory=_c_parser_defaults['low_memory'],
buffer_lines=None,
warn_bad_lines=True,
error_bad_lines=True,
keep_default_na=True,
thousands=None,
comment=None,
decimal=b'.',
parse_dates=False,
keep_date_col=False,
dayfirst=False,
date_parser=None,
memory_map=False,
nrows=None,
iterator=False,
chunksize=None,
verbose=False,
encoding=None,
squeeze=False,
mangle_dupe_cols=True,
tupleize_cols=False,
infer_datetime_format=False):
# Alias sep -> delimiter.
if delimiter is None:
delimiter = sep
kwds = dict(delimiter=delimiter,
engine=engine,
dialect=dialect,
compression=compression,
doublequote=doublequote,
escapechar=escapechar,
quotechar=quotechar,
quoting=quoting,
skipinitialspace=skipinitialspace,
lineterminator=lineterminator,
header=header,
index_col=index_col,
names=names,
prefix=prefix,
skiprows=skiprows,
na_values=na_values,
na_fvalues=na_fvalues,
true_values=true_values,
false_values=false_values,
keep_default_na=keep_default_na,
thousands=thousands,
comment=comment,
decimal=decimal,
parse_dates=parse_dates,
keep_date_col=keep_date_col,
dayfirst=dayfirst,
date_parser=date_parser,
nrows=nrows,
iterator=iterator,
chunksize=chunksize,
skipfooter=skipfooter or skip_footer,
converters=converters,
dtype=dtype,
usecols=usecols,
verbose=verbose,
encoding=encoding,
squeeze=squeeze,
memory_map=memory_map,
na_filter=na_filter,
compact_ints=compact_ints,
use_unsigned=use_unsigned,
delim_whitespace=delim_whitespace,
as_recarray=as_recarray,
warn_bad_lines=warn_bad_lines,
error_bad_lines=error_bad_lines,
low_memory=low_memory,
buffer_lines=buffer_lines,
mangle_dupe_cols=mangle_dupe_cols,
tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
return _read(filepath_or_buffer, kwds)
parser_f.__name__ = name
return parser_f
read_csv = _make_parser_function('read_csv', sep=',')
read_csv = Appender(_read_csv_doc)(read_csv)
read_table = _make_parser_function('read_table', sep='\t')
read_table = Appender(_read_table_doc)(read_table)
@Appender(_read_fwf_doc)
def read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds):
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, 'infer') and widths is not None:
raise ValueError("You must specify only one of 'widths' and "
"'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds['colspecs'] = colspecs
kwds['engine'] = 'python-fwf'
return _read(filepath_or_buffer, kwds)
# common NA values
# no longer excluding inf representations
# '1.#INF','-1.#INF', '1.#INF000000',
_NA_VALUES = set([
'-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A', 'N/A', 'NA', '#NA',
'NULL', 'NaN', '-NaN', 'nan', '-nan', ''
])
class TextFileReader(object):
"""
Passed dialect overrides any of the related parser options
"""
def __init__(self, f, engine='python', **kwds):
self.f = f
if kwds.get('dialect') is not None:
dialect = kwds['dialect']
kwds['delimiter'] = dialect.delimiter
kwds['doublequote'] = dialect.doublequote
kwds['escapechar'] = dialect.escapechar
kwds['skipinitialspace'] = dialect.skipinitialspace
kwds['quotechar'] = dialect.quotechar
kwds['quoting'] = dialect.quoting
if kwds.get('header', 'infer') == 'infer':
kwds['header'] = 0 if kwds.get('names') is None else None
self.orig_options = kwds
# miscellanea
self.engine = engine
self._engine = None
options = self._get_options_with_defaults(engine)
self.chunksize = options.pop('chunksize', None)
self.squeeze = options.pop('squeeze', False)
# might mutate self.engine
self.options, self.engine = self._clean_options(options, engine)
if 'has_index_names' in kwds:
self.options['has_index_names'] = kwds['has_index_names']
self._make_engine(self.engine)
def _get_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in compat.iteritems(_parser_defaults):
options[argname] = kwds.get(argname, default)
for argname, default in compat.iteritems(_c_parser_defaults):
if argname in kwds:
value = kwds[argname]
if engine != 'c' and value != default:
raise ValueError('The %r option is not supported with the'
' %r engine' % (argname, engine))
else:
value = default
options[argname] = value
if engine == 'python-fwf':
for argname, default in compat.iteritems(_fwf_defaults):
options[argname] = kwds.get(argname, default)
return options
def _clean_options(self, options, engine):
result = options.copy()
sep = options['delimiter']
delim_whitespace = options['delim_whitespace']
if sep is None and not delim_whitespace:
if engine == 'c':
print('Using Python parser to sniff delimiter')
engine = 'python'
elif sep is not None and len(sep) > 1:
# wait until regex engine integrated
if engine not in ('python', 'python-fwf'):
engine = 'python'
# C engine not supported yet
if engine == 'c':
if options['skip_footer'] > 0:
engine = 'python'
if engine == 'c':
for arg in _c_unsupported:
del result[arg]
if 'python' in engine:
for arg in _python_unsupported:
del result[arg]
index_col = options['index_col']
names = options['names']
converters = options['converters']
na_values = options['na_values']
skiprows = options['skiprows']
# really delete this one
keep_default_na = result.pop('keep_default_na')
if _is_index_col(index_col):
if not isinstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result['index_col'] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not isinstance(converters, dict):
raise TypeError('Type converters must be a dict or'
' subclass, input was '
'a {0!r}'.format(type(converters).__name__))
else:
converters = {}
# Converting values to NA
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
if com.is_integer(skiprows):
skiprows = lrange(skiprows)
skiprows = set() if skiprows is None else set(skiprows)
# put stuff back
result['names'] = names
result['converters'] = converters
result['na_values'] = na_values
result['na_fvalues'] = na_fvalues
result['skiprows'] = skiprows
return result, engine
def __iter__(self):
try:
if self.chunksize:
while True:
yield self.read(self.chunksize)
else:
yield self.read()
except StopIteration:
pass
def _make_engine(self, engine='c'):
if engine == 'c':
self._engine = CParserWrapper(self.f, **self.options)
else:
if engine == 'python':
klass = PythonParser
elif engine == 'python-fwf':
klass = FixedWidthFieldParser
self._engine = klass(self.f, **self.options)
def _failover_to_python(self):
raise NotImplementedError
def read(self, nrows=None):
if nrows is not None:
if self.options.get('skip_footer'):
raise ValueError('skip_footer not supported for iteration')
ret = self._engine.read(nrows)
if self.options.get('as_recarray'):
return ret
# May alter columns / col_dict
index, columns, col_dict = self._create_index(ret)
df = DataFrame(col_dict, columns=columns, index=index)
if self.squeeze and len(df.columns) == 1:
return df[df.columns[0]]
return df
def _create_index(self, ret):
index, columns, col_dict = ret
return index, columns, col_dict
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
return self.read(nrows=size)
def _is_index_col(col):
return col is not None and col is not False
class ParserBase(object):
def __init__(self, kwds):
self.names = kwds.get('names')
self.orig_names = None
self.prefix = kwds.pop('prefix', None)
self.index_col = kwds.get('index_col', None)
self.index_names = None
self.col_names = None
self.parse_dates = kwds.pop('parse_dates', False)
self.date_parser = kwds.pop('date_parser', None)
self.dayfirst = kwds.pop('dayfirst', False)
self.keep_date_col = kwds.pop('keep_date_col', False)
self.na_values = kwds.get('na_values')
self.na_fvalues = kwds.get('na_fvalues')
self.true_values = kwds.get('true_values')
self.false_values = kwds.get('false_values')
self.tupleize_cols = kwds.get('tupleize_cols', False)
self.infer_datetime_format = kwds.pop('infer_datetime_format', False)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_format=self.infer_datetime_format
)
# validate header options for mi
self.header = kwds.get('header')
if isinstance(self.header, (list, tuple, np.ndarray)):
if kwds.get('as_recarray'):
raise ValueError("cannot specify as_recarray when "
"specifying a multi-index header")
if kwds.get('usecols'):
raise ValueError("cannot specify usecols when "
"specifying a multi-index header")
if kwds.get('names'):
raise ValueError("cannot specify names when "
"specifying a multi-index header")
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = isinstance(self.index_col, (list, tuple,
np.ndarray))
if not (is_sequence and
all(map(com.is_integer, self.index_col)) or
com.is_integer(self.index_col)):
raise ValueError("index_col must only contain row numbers "
"when specifying a multi-index header")
self._name_processed = False
@property
def _has_complex_date_col(self):
return (isinstance(self.parse_dates, dict) or
(isinstance(self.parse_dates, list) and
len(self.parse_dates) > 0 and
isinstance(self.parse_dates[0], list)))
def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
name = self.index_names[i]
j = self.index_col[i]
if np.isscalar(self.parse_dates):
return (j == self.parse_dates) or (name == self.parse_dates)
else:
return (j in self.parse_dates) or (name in self.parse_dates)
def _extract_multi_indexer_columns(self, header, index_names, col_names,
passed_names=False):
""" extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers """
if len(header) < 2:
return header[0], index_names, col_names, passed_names
# the names are the tuples of the header that are not the index cols
# 0 is the name of the index, assuming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not isinstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header.pop(-1)
index_names, names, index_col = _clean_index_names(index_names,
self.index_col)
# extract the columns
field_count = len(header[0])
def extract(r):
return tuple([r[i] for i in range(field_count) if i not in sic])
columns = lzip(*[extract(r) for r in header])
names = ic + columns
# if we find 'Unnamed' all of a single level, then our header was too
# long
for n in range(len(columns[0])):
if all(['Unnamed' in c[n] for c in columns]):
raise _parser.CParserError(
"Passed header=[%s] are too many rows for this "
"multi_index of columns"
% ','.join([str(x) for x in self.header])
)
# clean the column names (if we have an index_col)
if len(ic):
col_names = [r[0] if len(r[0]) and 'Unnamed' not in r[0] else None
for r in header]
else:
col_names = [None] * len(header)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if (not self.tupleize_cols and len(columns) and
not isinstance(columns, MultiIndex) and
all([isinstance(c, tuple) for c in columns])):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._get_simple_index(alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _,
self.index_col) = _clean_index_names(list(columns),
self.index_col)
self._name_processed = True
index = self._get_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = len(indexnamerow) - len(columns)
index = index.set_names(indexnamerow[:coffset])
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _get_simple_index(self, data, columns):
def ix(col):
if not isinstance(col, compat.string_types):
return col
raise ValueError('Index %s invalid' % col)
index = None
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.append(i)
index.append(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in reversed(sorted(to_remove)):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _get_complex_date_index(self, data, col_names):
def _get_name(icol):
if isinstance(icol, compat.string_types):
return icol
if col_names is None:
raise ValueError(('Must supply column order to use %s as '
'index') % str(icol))
for i, c in enumerate(col_names):
if i == icol:
return c
index = None
to_remove = []
index = []
for idx in self.index_col:
name = _get_name(idx)
to_remove.append(name)
index.append(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in reversed(sorted(to_remove)):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True):
arrays = []
for i, arr in enumerate(index):
if (try_parse_dates and self._should_parse_dates(i)):
arr = self._date_conv(arr)
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
if isinstance(self.na_values, dict):
col_name = self.index_names[i]
if col_name is not None:
col_na_values, col_na_fvalues = _get_na_values(
col_name, self.na_values, self.na_fvalues)
arr, _ = self._convert_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
index = MultiIndex.from_arrays(arrays, names=self.index_names)
return index
def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
converters=None):
result = {}
for c, values in compat.iteritems(dct):
conv_f = None if converters is None else converters.get(c, None)
col_na_values, col_na_fvalues = _get_na_values(c, na_values,
na_fvalues)
coerce_type = True
if conv_f is not None:
values = lib.map_infer(values, conv_f)
coerce_type = False
cvals, na_count = self._convert_types(
values, set(col_na_values) | col_na_fvalues, coerce_type)
result[c] = cvals
if verbose and na_count:
print('Filled %d NA values in column %s' % (na_count, str(c)))
return result
def _convert_types(self, values, na_values, try_num_bool=True):
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = lib.ismember(values, na_values)
na_count = mask.sum()
if na_count > 0:
if com.is_integer_dtype(values):
values = values.astype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
if try_num_bool:
try:
result = lib.maybe_convert_numeric(values, na_values, False)
except Exception:
result = values
if values.dtype == np.object_:
na_count = lib.sanitize_objects(result, na_values, False)
else:
result = values
if values.dtype == np.object_:
na_count = lib.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
result = lib.maybe_convert_bool(values,
true_values=self.true_values,
false_values=self.false_values)
return result, na_count
def _do_date_conversions(self, names, data):
# returns data, columns
if self.parse_dates is not None:
data, names = _process_date_conversion(
data, self._date_conv, self.parse_dates, self.index_col,
self.index_names, names, keep_date_col=self.keep_date_col)
return names, data
class CParserWrapper(ParserBase):
"""
"""
def __init__(self, src, **kwds):
self.kwds = kwds
kwds = kwds.copy()
self.as_recarray = kwds.get('as_recarray', False)
ParserBase.__init__(self, kwds)
if 'utf-16' in (kwds.get('encoding') or ''):
if isinstance(src, compat.string_types):
src = open(src, 'rb')
src = com.UTF8Recoder(src, kwds['encoding'])
kwds['encoding'] = 'utf-8'
# #2442
kwds['allow_leading_cols'] = self.index_col is not False
self._reader = _parser.TextReader(src, **kwds)
# XXX
self.usecols = self._reader.usecols
passed_names = self.names is None
if self._reader.header is None:
self.names = None
else:
if len(self._reader.header) > 1:
# we have a multi index in the columns
self.names, self.index_names, self.col_names, passed_names = (
self._extract_multi_indexer_columns(
self._reader.header, self.index_names, self.col_names,
passed_names
)
)
else:
self.names = list(self._reader.header[0])
if self.names is None:
if self.prefix:
self.names = ['%s%d' % (self.prefix, i)
for i in range(self._reader.table_width)]
else:
self.names = lrange(self._reader.table_width)
# If the names were inferred (not passed by user) and usedcols is
# defined, then ensure names refers to the used columns, not the
# document's columns.
if self.usecols and passed_names:
col_indices = []
for u in self.usecols:
if isinstance(u, string_types):
col_indices.append(self.names.index(u))
else:
col_indices.append(u)
self.names = [n for i, n in enumerate(self.names)
if i in col_indices]
if len(self.names) < len(self.usecols):
raise ValueError("Usecols do not match names.")
self._set_noconvert_columns()
self.orig_names = self.names
if not self._has_complex_date_col:
if (self._reader.leading_cols == 0 and
_is_index_col(self.index_col)):
self._name_processed = True
(index_names, self.names,
self.index_col) = _clean_index_names(self.names,
self.index_col)
if self.index_names is None:
self.index_names = index_names
if self._reader.header is None and not passed_names:
self.index_names = [None] * len(self.index_names)
self._implicit_index = self._reader.leading_cols > 0
def _set_noconvert_columns(self):
names = self.names
def _set(x):
if com.is_integer(x):
self._reader.set_noconvert(x)
else:
self._reader.set_noconvert(names.index(x))
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
def set_error_bad_lines(self, status):
self._reader.set_error_bad_lines(int(status))
def read(self, nrows=None):
if self.as_recarray:
# what to do if there are leading columns?
return self._reader.read(nrows)
try:
data = self._reader.read(nrows)
except StopIteration:
if nrows is None:
return None, self.names, {}
else:
raise
names = self.names
if self._reader.leading_cols:
if self._has_complex_date_col:
raise NotImplementedError('file structure not yet supported')
# implicit index, no index names
arrays = []
for i in range(self._reader.leading_cols):
if self.index_col is None:
values = data.pop(i)
else:
values = data.pop(self.index_col[i])
values = self._maybe_parse_dates(values, i,
try_parse_dates=True)
arrays.append(values)
index = MultiIndex.from_arrays(arrays)
if self.usecols is not None:
names = self._filter_usecols(names)
# rename dict keys
data = sorted(data.items())
data = dict((k, v) for k, (i, v) in zip(names, data))
names, data = self._do_date_conversions(names, data)
else:
# rename dict keys
data = sorted(data.items())
# ugh, mutation
names = list(self.orig_names)
if self.usecols is not None:
names = self._filter_usecols(names)
# columns as list
alldata = [x[1] for x in data]
data = dict((k, v) for k, (i, v) in zip(names, data))
names, data = self._do_date_conversions(names, data)
index, names = self._make_index(data, alldata, names)
# maybe create a mi on the columns
names = self._maybe_make_multi_index_columns(names, self.col_names)
return index, names, data
def _filter_usecols(self, names):
# hackish
if self.usecols is not None and len(names) != len(self.usecols):
names = [name for i, name in enumerate(names)
if i in self.usecols or name in self.usecols]
return names
def _get_index_names(self):
names = list(self._reader.header[0])
idx_names = None
if self._reader.leading_cols == 0 and self.index_col is not None:
(idx_names, names,
self.index_col) = _clean_index_names(names, self.index_col)
return names, idx_names
def _maybe_parse_dates(self, values, index, try_parse_dates=True):
if try_parse_dates and self._should_parse_dates(index):
values = self._date_conv(values)
return values
def TextParser(*args, **kwds):
"""
Converts lists of lists/tuples into DataFrames with proper type inference
and optional (e.g. string to datetime) conversion. Also enables iterating
lazily over chunks of large files
Parameters
----------
data : file-like object or list
delimiter : separator character to use
dialect : str or csv.Dialect instance, default None
Ignored if delimiter is longer than 1 character
names : sequence, default
header : int, default 0
Row to use to parse column labels. Defaults to the first row. Prior
rows will be discarded
index_col : int or list, default None
Column or columns to use as the (possibly hierarchical) index
has_index_names: boolean, default False
True if the cols defined in index_col have an index name and are
not in the header
na_values : iterable, default None
Custom NA values
keep_default_na : bool, default True
thousands : str, default None
Thousands separator
comment : str, default None
Comment out remainder of line
parse_dates : boolean, default False
keep_date_col : boolean, default False
date_parser : function, default None
skiprows : list of integers
Row numbers to skip
skip_footer : int
Number of line at bottom of file to skip
encoding : string, default None
Encoding to use for UTF when reading/writing (ex. 'utf-8')
squeeze : boolean, default False
returns Series if only one column
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
"""
kwds['engine'] = 'python'
return TextFileReader(*args, **kwds)
def count_empty_vals(vals):
return sum([1 for v in vals if v == '' or v is None])
def _wrap_compressed(f, compression, encoding=None):
"""wraps compressed fileobject in a decompressing fileobject
NOTE: For all files in Python 3.2 and for bzip'd files under all Python
versions, this means reading in the entire file and then re-wrapping it in
StringIO.
"""
compression = compression.lower()
encoding = encoding or get_option('display.encoding')
if compression == 'gzip':
import gzip
f = gzip.GzipFile(fileobj=f)
if compat.PY3_2:
# 3.2's gzip doesn't support read1
f = StringIO(f.read().decode(encoding))
elif compat.PY3:
from io import TextIOWrapper
f = TextIOWrapper(f)
return f
elif compression == 'bz2':
import bz2
# bz2 module can't take file objects, so have to run through decompress
# manually
data = bz2.decompress(f.read())
if compat.PY3:
data = data.decode(encoding)
f = StringIO(data)
return f
else:
raise ValueError('do not recognize compression method %s'
% compression)
class PythonParser(ParserBase):
def __init__(self, f, **kwds):
"""
Workhorse function for processing nested list into DataFrame
Should be replaced by np.genfromtxt eventually?
"""
ParserBase.__init__(self, kwds)
self.data = None
self.buf = []
self.pos = 0
self.encoding = kwds['encoding']
self.compression = kwds['compression']
self.skiprows = kwds['skiprows']
self.skip_footer = kwds['skip_footer']
self.delimiter = kwds['delimiter']
self.quotechar = kwds['quotechar']
self.escapechar = kwds['escapechar']
self.doublequote = kwds['doublequote']
self.skipinitialspace = kwds['skipinitialspace']
self.lineterminator = kwds['lineterminator']
self.quoting = kwds['quoting']
self.mangle_dupe_cols = kwds.get('mangle_dupe_cols', True)
self.usecols = kwds['usecols']
self.names_passed = kwds['names'] or None
self.has_index_names = False
if 'has_index_names' in kwds:
self.has_index_names = kwds['has_index_names']
self.verbose = kwds['verbose']
self.converters = kwds['converters']
self.thousands = kwds['thousands']
self.comment = kwds['comment']
self._comment_lines = []
if isinstance(f, compat.string_types):
f = com._get_handle(f, 'r', encoding=self.encoding,
compression=self.compression)
elif self.compression:
f = _wrap_compressed(f, self.compression, self.encoding)
# in Python 3, convert BytesIO or fileobjects passed with an encoding
elif compat.PY3 and isinstance(f, compat.BytesIO):
from io import TextIOWrapper
f = TextIOWrapper(f, encoding=self.encoding)
# Set self.data to something that can read lines.
if hasattr(f, 'readline'):
self._make_reader(f)
else:
self.data = f
# Get columns in two steps: infer from data, then
# infer column indices from self.usecols if is is specified.
self._col_indices = None
self.columns, self.num_original_columns = self._infer_columns()
# Now self.columns has the set of columns that we will process.
# The original set is stored in self.original_columns.
if len(self.columns) > 1:
# we are processing a multi index column
self.columns, self.index_names, self.col_names, _ = (
self._extract_multi_indexer_columns(
self.columns, self.index_names, self.col_names
)
)
# Update list of original names to include all indices.
self.num_original_columns = len(self.columns)
else:
self.columns = self.columns[0]
# get popped off for index
self.orig_names = list(self.columns)
# needs to be cleaned/refactored
# multiple date column thing turning into a real spaghetti factory
if not self._has_complex_date_col:
(index_names,
self.orig_names, columns_) = self._get_index_name(self.columns)
self._name_processed = True
if self.index_names is None:
self.index_names = index_names
self._first_chunk = True
if self.parse_dates:
self._no_thousands_columns = self._set_no_thousands_columns()
else:
self._no_thousands_columns = None
def _set_no_thousands_columns(self):
# Create a set of column ids that are not to be stripped of thousands
# operators.
noconvert_columns = set()
def _set(x):
if com.is_integer(x):
noconvert_columns.add(x)
else:
noconvert_columns.add(self.columns.index(x))
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
return noconvert_columns
def _make_reader(self, f):
sep = self.delimiter
if sep is None or len(sep) == 1:
if self.lineterminator:
raise ValueError('Custom line terminators not supported in '
'python parser (yet)')
class MyDialect(csv.Dialect):
delimiter = self.delimiter
quotechar = self.quotechar
escapechar = self.escapechar
doublequote = self.doublequote
skipinitialspace = self.skipinitialspace
quoting = self.quoting
lineterminator = '\n'
dia = MyDialect
sniff_sep = True
if sep is not None:
sniff_sep = False
dia.delimiter = sep
# attempt to sniff the delimiter
if sniff_sep:
line = f.readline()
while self.pos in self.skiprows:
self.pos += 1
line = f.readline()
line = self._check_comments([line])[0]
self.pos += 1
sniffed = csv.Sniffer().sniff(line)
dia.delimiter = sniffed.delimiter
if self.encoding is not None:
self.buf.extend(list(
com.UnicodeReader(StringIO(line),
dialect=dia,
encoding=self.encoding)))
else:
self.buf.extend(list(csv.reader(StringIO(line),
dialect=dia)))
if self.encoding is not None:
reader = com.UnicodeReader(f, dialect=dia,
encoding=self.encoding,
strict=True)
else:
reader = csv.reader(f, dialect=dia,
strict=True)
else:
def _read():
line = next(f)
pat = re.compile(sep)
yield pat.split(line.strip())
for line in f:
yield pat.split(line.strip())
reader = _read()
self.data = reader
def read(self, rows=None):
try:
content = self._get_lines(rows)
except StopIteration:
if self._first_chunk:
content = []
else:
raise
# done with first read, next time raise StopIteration
self._first_chunk = False
columns = list(self.orig_names)
if not len(content): # pragma: no cover
# DataFrame with the right metadata, even though it's length 0
return _get_empty_meta(self.orig_names,
self.index_col,
self.index_names)
# handle new style for names in index
count_empty_content_vals = count_empty_vals(content[0])
indexnamerow = None
if self.has_index_names and count_empty_content_vals == len(columns):
indexnamerow = content[0]
content = content[1:]
alldata = self._rows_to_cols(content)
data = self._exclude_implicit_index(alldata)
columns, data = self._do_date_conversions(self.columns, data)
data = self._convert_data(data)
index, columns = self._make_index(data, alldata, columns, indexnamerow)
return index, columns, data
def _exclude_implicit_index(self, alldata):
if self._implicit_index:
excl_indices = self.index_col
data = {}
offset = 0
for i, col in enumerate(self.orig_names):
while i + offset in excl_indices:
offset += 1
data[col] = alldata[i + offset]
else:
data = dict((k, v) for k, v in zip(self.orig_names, alldata))
return data
# legacy
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
return self.read(nrows=size)
def _convert_data(self, data):
# apply converters
clean_conv = {}
for col, f in compat.iteritems(self.converters):
if isinstance(col, int) and col not in self.orig_names:
col = self.orig_names[col]
clean_conv[col] = f
return self._convert_to_ndarrays(data, self.na_values, self.na_fvalues,
self.verbose, clean_conv)
def _infer_columns(self):
names = self.names
num_original_columns = 0
clear_buffer = True
if self.header is not None:
header = self.header
# we have a mi columns, so read and extra line
if isinstance(header, (list, tuple, np.ndarray)):
have_mi_columns = True
header = list(header) + [header[-1] + 1]
else:
have_mi_columns = False
header = [header]
columns = []
for level, hr in enumerate(header):
line = self._buffered_line()
while self.pos <= hr:
line = self._next_line()
unnamed_count = 0
this_columns = []
for i, c in enumerate(line):
if c == '':
if have_mi_columns:
this_columns.append('Unnamed: %d_level_%d'
% (i, level))
else:
this_columns.append('Unnamed: %d' % i)
unnamed_count += 1
else:
this_columns.append(c)
if not have_mi_columns and self.mangle_dupe_cols:
counts = {}
for i, col in enumerate(this_columns):
cur_count = counts.get(col, 0)
if cur_count > 0:
this_columns[i] = '%s.%d' % (col, cur_count)
counts[col] = cur_count + 1
elif have_mi_columns:
# if we have grabbed an extra line, but its not in our
# format so save in the buffer, and create an blank extra
# line for the rest of the parsing code
if hr == header[-1]:
lc = len(this_columns)
ic = (len(self.index_col)
if self.index_col is not None else 0)
if lc != unnamed_count and lc-ic > unnamed_count:
clear_buffer = False
this_columns = [None] * lc
self.buf = [self.buf[-1]]
columns.append(this_columns)
if len(columns) == 1:
num_original_columns = len(this_columns)
if clear_buffer:
self._clear_buffer()
if names is not None:
if ((self.usecols is not None
and len(names) != len(self.usecols))
or (self.usecols is None
and len(names) != len(columns[0]))):
raise ValueError('Number of passed names did not match '
'number of header fields in the file')
if len(columns) > 1:
raise TypeError('Cannot pass names with multi-index '
'columns')
if self.usecols is not None:
# Set _use_cols. We don't store columns because they are
# overwritten.
self._handle_usecols(columns, names)
else:
self._col_indices = None
num_original_columns = len(names)
columns = [names]
else:
columns = self._handle_usecols(columns, columns[0])
else:
# header is None
line = self._buffered_line()
ncols = len(line)
num_original_columns = ncols
if not names:
if self.prefix:
columns = [['%s%d' % (self.prefix,i) for i in range(ncols)]]
else:
columns = [lrange(ncols)]
columns = self._handle_usecols(columns, columns[0])
else:
if self.usecols is None or len(names) == num_original_columns:
columns = self._handle_usecols([names], names)
num_original_columns = len(names)
else:
if self.usecols and len(names) != len(self.usecols):
raise ValueError(
'Number of passed names did not match number of '
'header fields in the file'
)
# Ignore output but set used columns.
self._handle_usecols([names], names)
columns = [names]
num_original_columns = ncols
return columns, num_original_columns
def _handle_usecols(self, columns, usecols_key):
"""
Sets self._col_indices
usecols_key is used if there are string usecols.
"""
if self.usecols is not None:
if any([isinstance(u, string_types) for u in self.usecols]):
if len(columns) > 1:
raise ValueError("If using multiple headers, usecols must "
"be integers.")
col_indices = []
for u in self.usecols:
if isinstance(u, string_types):
col_indices.append(usecols_key.index(u))
else:
col_indices.append(u)
else:
col_indices = self.usecols
columns = [[n for i, n in enumerate(column) if i in col_indices]
for column in columns]
self._col_indices = col_indices
return columns
def _buffered_line(self):
"""
Return a line from buffer, filling buffer if required.
"""
if len(self.buf) > 0:
return self.buf[0]
else:
return self._next_line()
def _next_line(self):
if isinstance(self.data, list):
while self.pos in self.skiprows:
self.pos += 1
try:
line = self.data[self.pos]
except IndexError:
raise StopIteration
else:
while self.pos in self.skiprows:
next(self.data)
self.pos += 1
line = next(self.data)
line = self._check_comments([line])[0]
self.pos += 1
self.buf.append(line)
return line
def _check_comments(self, lines):
if self.comment is None:
return lines
ret = []
for l in lines:
rl = []
for x in l:
if (not isinstance(x, compat.string_types) or
self.comment not in x):
rl.append(x)
else:
x = x[:x.find(self.comment)]
if len(x) > 0:
rl.append(x)
break
ret.append(rl)
return ret
def _check_thousands(self, lines):
if self.thousands is None:
return lines
nonnum = re.compile('[^-^0-9^%s^.]+' % self.thousands)
ret = []
for l in lines:
rl = []
for i, x in enumerate(l):
if (not isinstance(x, compat.string_types) or
self.thousands not in x or
(self._no_thousands_columns
and i in self._no_thousands_columns)
or nonnum.search(x.strip())):
rl.append(x)
else:
rl.append(x.replace(self.thousands, ''))
ret.append(rl)
return ret
def _clear_buffer(self):
self.buf = []
_implicit_index = False
def _get_index_name(self, columns):
"""
Try several cases to get lines:
0) There are headers on row 0 and row 1 and their
total summed lengths equals the length of the next line.
Treat row 0 as columns and row 1 as indices
1) Look for implicit index: there are more columns
on row 1 than row 0. If this is true, assume that row
1 lists index columns and row 0 lists normal columns.
2) Get index from the columns if it was listed.
"""
orig_names = list(columns)
columns = list(columns)
try:
line = self._next_line()
except StopIteration:
line = None
try:
next_line = self._next_line()
except StopIteration:
next_line = None
# implicitly index_col=0 b/c 1 fewer column names
implicit_first_cols = 0
if line is not None:
# leave it 0, #2442
# Case 1
if self.index_col is not False:
implicit_first_cols = len(line) - self.num_original_columns
# Case 0
if next_line is not None:
if len(next_line) == len(line) + self.num_original_columns:
# column and index names on diff rows
self.index_col = lrange(len(line))
self.buf = self.buf[1:]
for c in reversed(line):
columns.insert(0, c)
# Update list of original names to include all indices.
self.num_original_columns = len(next_line)
return line, columns, orig_names
if implicit_first_cols > 0:
# Case 1
self._implicit_index = True
if self.index_col is None:
self.index_col = lrange(implicit_first_cols)
index_name = None
else:
# Case 2
(index_name, columns,
self.index_col) = _clean_index_names(columns, self.index_col)
return index_name, orig_names, columns
def _rows_to_cols(self, content):
zipped_content = list(lib.to_object_array(content).T)
col_len = self.num_original_columns
zip_len = len(zipped_content)
if self._implicit_index:
col_len += len(self.index_col)
if self.skip_footer < 0:
raise ValueError('skip footer cannot be negative')
# Loop through rows to verify lengths are correct.
if col_len != zip_len and self.index_col is not False:
i = 0
for (i, l) in enumerate(content):
if len(l) != col_len:
break
footers = 0
if self.skip_footer:
footers = self.skip_footer
row_num = self.pos - (len(content) - i + footers)
msg = ('Expected %d fields in line %d, saw %d' %
(col_len, row_num + 1, zip_len))
raise ValueError(msg)
if self.usecols:
if self._implicit_index:
zipped_content = [
a for i, a in enumerate(zipped_content)
if (i < len(self.index_col)
or i - len(self.index_col) in self._col_indices)
]
else:
zipped_content = [a for i, a in enumerate(zipped_content)
if i in self._col_indices]
return zipped_content
def _get_lines(self, rows=None):
source = self.data
lines = self.buf
new_rows = None
# already fetched some number
if rows is not None:
# we already have the lines in the buffer
if len(self.buf) >= rows:
new_rows, self.buf = self.buf[:rows], self.buf[rows:]
# need some lines
else:
rows -= len(self.buf)
if new_rows is None:
if isinstance(source, list):
if self.pos > len(source):
raise StopIteration
if rows is None:
new_rows = source[self.pos:]
new_pos = len(source)
else:
new_rows = source[self.pos:self.pos + rows]
new_pos = self.pos + rows
# Check for stop rows. n.b.: self.skiprows is a set.
if self.skiprows:
new_rows = [row for i, row in enumerate(new_rows)
if i + self.pos not in self.skiprows]
lines.extend(new_rows)
self.pos = new_pos
else:
new_rows = []
try:
if rows is not None:
for _ in range(rows):
new_rows.append(next(source))
lines.extend(new_rows)
else:
rows = 0
while True:
try:
new_rows.append(next(source))
rows += 1
except csv.Error as inst:
if 'newline inside string' in str(inst):
row_num = str(self.pos + rows)
msg = ('EOF inside string starting with '
'line ' + row_num)
raise Exception(msg)
raise
except StopIteration:
if self.skiprows:
new_rows = [row for i, row in enumerate(new_rows)
if self.pos + i not in self.skiprows]
lines.extend(new_rows)
if len(lines) == 0:
raise
self.pos += len(new_rows)
self.buf = []
else:
lines = new_rows
if self.skip_footer:
lines = lines[:-self.skip_footer]
lines = self._check_comments(lines)
return self._check_thousands(lines)
def _make_date_converter(date_parser=None, dayfirst=False,
infer_datetime_format=False):
def converter(*date_cols):
if date_parser is None:
strs = _concat_date_cols(date_cols)
try:
return tools.to_datetime(
com._ensure_object(strs),
utc=None,
box=False,
dayfirst=dayfirst,
infer_datetime_format=infer_datetime_format
)
except:
return lib.try_parse_dates(strs, dayfirst=dayfirst)
else:
try:
result = date_parser(*date_cols)
if isinstance(result, datetime.datetime):
raise Exception('scalar parser')
return result
except Exception:
try:
return lib.try_parse_dates(_concat_date_cols(date_cols),
parser=date_parser,
dayfirst=dayfirst)
except Exception:
return generic_parser(date_parser, *date_cols)
return converter
def _process_date_conversion(data_dict, converter, parse_spec,
index_col, index_names, columns,
keep_date_col=False):
def _isindex(colspec):
return ((isinstance(index_col, list) and
colspec in index_col)
or (isinstance(index_names, list) and
colspec in index_names))
new_cols = []
new_data = {}
orig_names = columns
columns = list(columns)
date_cols = set()
if parse_spec is None or isinstance(parse_spec, bool):
return data_dict, columns
if isinstance(parse_spec, list):
# list of column lists
for colspec in parse_spec:
if np.isscalar(colspec):
if isinstance(colspec, int) and colspec not in data_dict:
colspec = orig_names[colspec]
if _isindex(colspec):
continue
data_dict[colspec] = converter(data_dict[colspec])
else:
new_name, col, old_names = _try_convert_dates(
converter, colspec, data_dict, orig_names)
if new_name in data_dict:
raise ValueError('New date column already in dict %s' %
new_name)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
elif isinstance(parse_spec, dict):
# dict of new name to column list
for new_name, colspec in compat.iteritems(parse_spec):
if new_name in data_dict:
raise ValueError('Date column %s already in dict' %
new_name)
_, col, old_names = _try_convert_dates(converter, colspec,
data_dict, orig_names)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
data_dict.update(new_data)
new_cols.extend(columns)
if not keep_date_col:
for c in list(date_cols):
data_dict.pop(c)
new_cols.remove(c)
return data_dict, new_cols
def _try_convert_dates(parser, colspec, data_dict, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int) and c not in columns:
colnames.append(str(columns[c]))
else:
colnames.append(c)
new_name = '_'.join([str(x) for x in colnames])
to_parse = [data_dict[c] for c in colnames if c in data_dict]
new_col = parser(*to_parse)
return new_name, new_col, colnames
def _clean_na_values(na_values, keep_default_na=True):
if na_values is None:
if keep_default_na:
na_values = _NA_VALUES
else:
na_values = []
na_fvalues = set()
elif isinstance(na_values, dict):
if keep_default_na:
for k, v in compat.iteritems(na_values):
v = set(list(v)) | _NA_VALUES
na_values[k] = v
na_fvalues = dict([
(k, _floatify_na_values(v)) for k, v in na_values.items()
])
else:
if not com.is_list_like(na_values):
na_values = [na_values]
na_values = _stringify_na_values(na_values)
if keep_default_na:
na_values = na_values | _NA_VALUES
na_fvalues = _floatify_na_values(na_values)
return na_values, na_fvalues
def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
return None, columns, index_col
columns = list(columns)
cp_cols = list(columns)
index_names = []
# don't mutate
index_col = list(index_col)
for i, c in enumerate(index_col):
if isinstance(c, compat.string_types):
index_names.append(c)
for j, name in enumerate(cp_cols):
if name == c:
index_col[i] = j
columns.remove(name)
break
else:
name = cp_cols[c]
columns.remove(name)
index_names.append(name)
# hack
if isinstance(index_names[0], compat.string_types)\
and 'Unnamed' in index_names[0]:
index_names[0] = None
return index_names, columns, index_col
def _get_empty_meta(columns, index_col, index_names):
columns = list(columns)
if index_col is not None:
index = MultiIndex.from_arrays([[]] * len(index_col),
names=index_names)
for n in index_col:
columns.pop(n)
else:
index = Index([])
return index, columns, {}
def _floatify_na_values(na_values):
# create float versions of the na_values
result = set()
for v in na_values:
try:
v = float(v)
if not np.isnan(v):
result.add(v)
except:
pass
return result
def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
for x in na_values:
result.append(str(x))
result.append(x)
try:
v = float(x)
# we are like 999 here
if v == int(v):
v = int(v)
result.append("%s.0" % v)
result.append(str(v))
result.append(v)
except:
pass
try:
result.append(int(x))
except:
pass
return set(result)
def _get_na_values(col, na_values, na_fvalues):
if isinstance(na_values, dict):
if col in na_values:
values = na_values[col]
fvalues = na_fvalues[col]
return na_values[col], na_fvalues[col]
else:
return _NA_VALUES, set()
else:
return na_values, na_fvalues
def _get_col_names(colspec, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int):
colnames.append(columns[c])
return colnames
def _concat_date_cols(date_cols):
if len(date_cols) == 1:
if compat.PY3:
return np.array([compat.text_type(x) for x in date_cols[0]],
dtype=object)
else:
return np.array([
str(x) if not isinstance(x, compat.string_types) else x
for x in date_cols[0]
], dtype=object)
rs = np.array([' '.join([compat.text_type(y) for y in x])
for x in zip(*date_cols)], dtype=object)
return rs
class FixedWidthReader(object):
"""
A reader of fixed-width lines.
"""
def __init__(self, f, colspecs, delimiter, comment):
self.f = f
self.buffer = None
self.delimiter = '\r\n' + delimiter if delimiter else '\n\r\t '
self.comment = comment
if colspecs == 'infer':
self.colspecs = self.detect_colspecs()
else:
self.colspecs = colspecs
if not isinstance(self.colspecs, (tuple, list)):
raise TypeError("column specifications must be a list or tuple, "
"input was a %r" % type(colspecs).__name__)
for colspec in self.colspecs:
if not (isinstance(colspec, (tuple, list)) and
len(colspec) == 2 and
isinstance(colspec[0], (int, np.integer)) and
isinstance(colspec[1], (int, np.integer))):
raise TypeError('Each column specification must be '
'2 element tuple or list of integers')
def get_rows(self, n):
rows = []
for i, row in enumerate(self.f, 1):
rows.append(row)
if i >= n:
break
self.buffer = iter(rows)
return rows
def detect_colspecs(self, n=100):
# Regex escape the delimiters
delimiters = ''.join([r'\%s' % x for x in self.delimiter])
pattern = re.compile('([^%s]+)' % delimiters)
rows = self.get_rows(n)
max_len = max(map(len, rows))
mask = np.zeros(max_len + 1, dtype=int)
if self.comment is not None:
rows = [row.partition(self.comment)[0] for row in rows]
for row in rows:
for m in pattern.finditer(row):
mask[m.start():m.end()] = 1
shifted = np.roll(mask, 1)
shifted[0] = 0
edges = np.where((mask ^ shifted) == 1)[0]
return list(zip(edges[::2], edges[1::2]))
def next(self):
if self.buffer is not None:
try:
line = next(self.buffer)
except StopIteration:
self.buffer = None
line = next(self.f)
else:
line = next(self.f)
# Note: 'colspecs' is a sequence of half-open intervals.
return [line[fromm:to].strip(self.delimiter)
for (fromm, to) in self.colspecs]
# Iterator protocol in Python 3 uses __next__()
__next__ = next
class FixedWidthFieldParser(PythonParser):
"""
Specialization that Converts fixed-width fields into DataFrames.
See PythonParser for details.
"""
def __init__(self, f, **kwds):
# Support iterators, convert to a list.
self.colspecs = kwds.pop('colspecs')
PythonParser.__init__(self, f, **kwds)
def _make_reader(self, f):
self.data = FixedWidthReader(f, self.colspecs, self.delimiter,
self.comment)
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/pandas/io/parsers.py
|
Python
|
gpl-3.0
| 76,769
|
#Assignment No :6(B)
# Take the words from file and arrange it in sorted form.
# Take the file and insert data in a file and close it
file1 = raw_input("Enter the name of file with address:")
data = "ABC\nPQR\nXYZ\nABD\nAAA\nGEF\nWXY"
f = open(file1,'w')
f.write(data)
f.close()
a = [] # Take empty list/array for holding the words
f = open(file1,'r') # open a file in read mode.
for line in f: # Take the data line by line from a file
line = line.split() # Seperate words from each line.
a.append(line) # Add seperated word into in an array.
print "Words present in a file: \n",a # print array.
f.close() # close the file for security.
# Use Bubble sort algorithm.
print ("1. For Ascending order \n2. For Descending Order ")
choice = int(input("Enter your choice :"))
if(choice == 1):
for i in range(len(a)-1,0,-1):
for j in range(i):
if(a[j] > a[j+1]):
temp = a[j]
a[j] = a[j+1]
a[j+1] = temp
print "Words present in Sorted form :\n",a
if(choice == 2):
for i in range(len(a)-1,0,-1):
for j in range(i):
if(a[j] < a[j+1]):
temp = a[j]
a[j] = a[j+1]
a[j+1] = temp
print "Words present in Sorted form :\n",a
|
sumitkhandelwal/computerprogrammingusingpython
|
assignmentno6bb2.py
|
Python
|
gpl-3.0
| 1,303
|
#-*- coding:utf-8 -*-
import subprocess
from ErrorGenerator import ErrorGenerator
class RubyErrorGenerator(ErrorGenerator):
command = ["ruby", "-c"]
startFilePath = True
parseRegex = "^:([0-9]+): (.*)"
lineIndex = 1
messageIndex = 2
stdout = None
stderr = subprocess.PIPE
|
utisam/gfly
|
gfly/generators/RubyErrorGenerator.py
|
Python
|
gpl-3.0
| 280
|
# $HeadURL$
__RCSID__ = "$Id$"
import datetime, time
import types
import threading
import random
from DIRAC.Core.Base.DB import DB
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.Core.Utilities import List, ThreadSafe, Time, DEncode
from DIRAC.AccountingSystem.private.TypeLoader import TypeLoader
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
gSynchro = ThreadSafe.Synchronizer()
class AccountingDB( DB ):
def __init__( self, name = 'Accounting/AccountingDB', readOnly = False ):
DB.__init__( self, 'AccountingDB', name )
self.maxBucketTime = 604800 #1 w
self.autoCompact = False
self.__readOnly = readOnly
self.__doingCompaction = False
self.__oldBucketMethod = False
self.__doingPendingLockTime = 0
self.__deadLockRetries = 2
self.__queuedRecordsLock = ThreadSafe.Synchronizer()
self.__queuedRecordsToInsert = []
self.dbCatalog = {}
self.dbBucketsLength = {}
self.__keysCache = {}
maxParallelInsertions = self.getCSOption( "ParallelRecordInsertions", 10 )
self.__threadPool = ThreadPool( 1, maxParallelInsertions )
self.__threadPool.daemonize()
self.catalogTableName = _getTableName( "catalog", "Types" )
self._createTables( { self.catalogTableName : { 'Fields' : { 'name' : "VARCHAR(64) UNIQUE NOT NULL",
'keyFields' : "VARCHAR(255) NOT NULL",
'valueFields' : "VARCHAR(255) NOT NULL",
'bucketsLength' : "VARCHAR(255) NOT NULL",
},
'PrimaryKey' : 'name'
}
}
)
self.__loadCatalogFromDB()
gMonitor.registerActivity( "registeradded",
"Register added",
"Accounting",
"entries",
gMonitor.OP_ACUM )
gMonitor.registerActivity( "insertiontime",
"Record insertion time",
"Accounting",
"seconds",
gMonitor.OP_MEAN )
gMonitor.registerActivity( "querytime",
"Records query time",
"Accounting",
"seconds",
gMonitor.OP_MEAN )
self.__compactTime = datetime.time( hour = 2,
minute = random.randint( 0, 59 ),
second = random.randint( 0, 59 ) )
lcd = Time.dateTime()
lcd.replace( hour = self.__compactTime.hour + 1,
minute = 0,
second = 0 )
self.__lastCompactionEpoch = Time.toEpoch( lcd )
self.__registerTypes()
def __loadTablesCreated( self ):
result = self._query( "show tables" )
if not result[ 'OK' ]:
return result
return S_OK( [ f[0] for f in result[ 'Value' ] ] )
def autoCompactDB( self ):
self.autoCompact = True
th = threading.Thread( target = self.__periodicAutoCompactDB )
th.setDaemon( 1 )
th.start()
def __periodicAutoCompactDB( self ):
while self.autoCompact:
nct = Time.dateTime()
if nct.hour >= self.__compactTime.hour:
nct = nct + datetime.timedelta( days = 1 )
nct = nct.replace( hour = self.__compactTime.hour,
minute = self.__compactTime.minute,
second = self.__compactTime.second )
self.log.info( "Next db compaction will be at %s" % nct )
sleepTime = Time.toEpoch( nct ) - Time.toEpoch()
time.sleep( sleepTime )
self.compactBuckets()
def __registerTypes( self ):
"""
Register all types
"""
retVal = gConfig.getSections( "/DIRAC/Setups" )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't get a list of setups: %s" % retVal[ 'Message' ] )
setupsList = retVal[ 'Value' ]
objectsLoaded = TypeLoader().getTypes()
#Load the files
for pythonClassName in sorted( objectsLoaded ):
typeClass = objectsLoaded[ pythonClassName ]
for setup in setupsList:
typeName = "%s_%s" % ( setup, pythonClassName )
typeDef = typeClass().getDefinition()
#dbTypeName = "%s_%s" % ( setup, typeName )
definitionKeyFields, definitionAccountingFields, bucketsLength = typeDef[1:]
#If already defined check the similarities
if typeName in self.dbCatalog:
bucketsLength.sort()
if bucketsLength != self.dbBucketsLength[ typeName ]:
bucketsLength = self.dbBucketsLength[ typeName ]
self.log.warn( "Bucket length has changed for type %s" % typeName )
keyFields = [ f[0] for f in definitionKeyFields ]
if keyFields != self.dbCatalog[ typeName ][ 'keys' ]:
keyFields = self.dbCatalog[ typeName ][ 'keys' ]
self.log.error( "Definition fields have changed", "Type %s" % typeName )
valueFields = [ f[0] for f in definitionAccountingFields ]
if valueFields != self.dbCatalog[ typeName ][ 'values' ]:
valueFields = self.dbCatalog[ typeName ][ 'values' ]
self.log.error( "Accountable fields have changed", "Type %s" % typeName )
#Try to re register to check all the tables are there
retVal = self.registerType( typeName, definitionKeyFields,
definitionAccountingFields, bucketsLength )
if not retVal[ 'OK' ]:
self.log.error( "Can't register type", "%s: %s" % ( typeName, retVal[ 'Message' ] ) )
#If it has been properly registered, update info
elif retVal[ 'Value' ]:
#Set the timespan
self.dbCatalog[ typeName ][ 'dataTimespan' ] = typeClass().getDataTimespan()
self.dbCatalog[ typeName ][ 'definition' ] = { 'keys' : definitionKeyFields,
'values' : definitionAccountingFields }
return S_OK()
def __loadCatalogFromDB( self ):
retVal = self._query( "SELECT `name`, `keyFields`, `valueFields`, `bucketsLength` FROM `%s`" % self.catalogTableName )
if not retVal[ 'OK' ]:
raise Exception( retVal[ 'Message' ] )
for typesEntry in retVal[ 'Value' ]:
typeName = typesEntry[0]
keyFields = List.fromChar( typesEntry[1], "," )
valueFields = List.fromChar( typesEntry[2], "," )
bucketsLength = DEncode.decode( typesEntry[3] )[0]
self.__addToCatalog( typeName, keyFields, valueFields, bucketsLength )
def getWaitingRecordsLifeTime( self ):
"""
Get the time records can live in the IN tables without no retry
"""
return self.getCSOption( "RecordMaxWaitingTime", 86400 )
def markAllPendingRecordsAsNotTaken( self ):
"""
Mark all records to be processed as not taken
NOTE: ONLY EXECUTE THIS AT THE BEGINNING OF THE DATASTORE SERVICE!
"""
self.log.always( "Marking all records to be processed as not taken" )
for typeName in self.dbCatalog:
sqlTableName = _getTableName( "in", typeName )
result = self._update( "UPDATE `%s` SET taken=0" % sqlTableName )
if not result[ 'OK' ]:
return result
return S_OK()
def loadPendingRecords( self ):
"""
Load all records pending to insertion and generate threaded jobs
"""
gSynchro.lock()
try:
now = time.time()
if now - self.__doingPendingLockTime <= 3600:
return S_OK()
self.__doingPendingLockTime = now
finally:
gSynchro.unlock()
self.log.info( "[PENDING] Loading pending records for insertion" )
pending = 0
now = Time.toEpoch()
recordsPerSlot = self.getCSOption( "RecordsPerSlot", 100 )
for typeName in self.dbCatalog:
self.log.info( "[PENDING] Checking %s" % typeName )
pendingInQueue = self.__threadPool.pendingJobs()
emptySlots = max( 0, 3000 - pendingInQueue )
self.log.info( "[PENDING] %s in the queue, %d empty slots" % ( pendingInQueue, emptySlots ) )
if emptySlots < 1:
continue
emptySlots = min( 100, emptySlots )
sqlTableName = _getTableName( "in", typeName )
sqlFields = [ 'id' ] + self.dbCatalog[ typeName ][ 'typeFields' ]
sqlCond = "WHERE taken = 0 or TIMESTAMPDIFF( SECOND, takenSince, UTC_TIMESTAMP() ) > %s" % self.getWaitingRecordsLifeTime()
result = self._query( "SELECT %s FROM `%s` %s ORDER BY id ASC LIMIT %d" % ( ", ".join( [ "`%s`" % f for f in sqlFields ] ),
sqlTableName,
sqlCond,
emptySlots * recordsPerSlot ) )
if not result[ 'OK' ]:
self.log.error( "[PENDING] Error when trying to get pending records", "for %s : %s" % ( typeName, result[ 'Message' ] ) )
return result
self.log.info( "[PENDING] Got %s pending records for type %s" % ( len( result[ 'Value' ] ), typeName ) )
dbData = result[ 'Value' ]
idList = [ str( r[0] ) for r in dbData ]
#If nothing to do, continue
if not idList:
continue
result = self._update( "UPDATE `%s` SET taken=1, takenSince=UTC_TIMESTAMP() WHERE id in (%s)" % ( sqlTableName,
", ".join( idList ) ) )
if not result[ 'OK' ]:
self.log.error( "[PENDING] Error when trying set state to waiting records", "for %s : %s" % ( typeName, result[ 'Message' ] ) )
self.__doingPendingLockTime = 0
return result
#Group them in groups of 10
recordsToProcess = []
for record in dbData:
pending += 1
iD = record[ 0 ]
startTime = record[ -2 ]
endTime = record[ -1 ]
valuesList = list( record[ 1:-2 ] )
recordsToProcess.append( ( iD, typeName, startTime, endTime, valuesList, now ) )
if len( recordsToProcess ) % recordsPerSlot == 0:
self.__threadPool.generateJobAndQueueIt( self.__insertFromINTable ,
args = ( recordsToProcess, ) )
recordsToProcess = []
if recordsToProcess:
self.__threadPool.generateJobAndQueueIt( self.__insertFromINTable ,
args = ( recordsToProcess, ) )
self.log.info( "[PENDING] Got %s records requests for all types" % pending )
self.__doingPendingLockTime = 0
return S_OK()
def __addToCatalog( self, typeName, keyFields, valueFields, bucketsLength ):
"""
Add type to catalog
"""
self.log.verbose( "Adding to catalog type %s" % typeName, "with length %s" % str( bucketsLength ) )
self.dbCatalog[ typeName ] = { 'keys' : keyFields , 'values' : valueFields,
'typeFields' : [], 'bucketFields' : [], 'dataTimespan' : 0 }
self.dbCatalog[ typeName ][ 'typeFields' ].extend( keyFields )
self.dbCatalog[ typeName ][ 'typeFields' ].extend( valueFields )
self.dbCatalog[ typeName ][ 'bucketFields' ] = list( self.dbCatalog[ typeName ][ 'typeFields' ] )
self.dbCatalog[ typeName ][ 'typeFields' ].extend( [ 'startTime', 'endTime' ] )
self.dbCatalog[ typeName ][ 'bucketFields' ].extend( [ 'entriesInBucket', 'startTime', 'bucketLength' ] )
self.dbBucketsLength[ typeName ] = bucketsLength
#ADRI: TEST COMPACT BUCKETS
#self.dbBucketsLength[ typeName ] = [ ( 31104000, 3600 ) ]
def changeBucketsLength( self, typeName, bucketsLength ):
gSynchro.lock()
try:
if not typeName in self.dbCatalog:
return S_ERROR( "%s is not a valid type name" % typeName )
bucketsLength.sort()
bucketsEncoding = DEncode.encode( bucketsLength )
retVal = self._update( "UPDATE `%s` set bucketsLength = '%s' where name = '%s'" % ( self.catalogTableName,
bucketsEncoding,
typeName ) )
if not retVal[ 'OK' ]:
return retVal
self.dbBucketsLength[ typeName ] = bucketsLength
finally:
gSynchro.unlock()
return self.regenerateBuckets( typeName )
@gSynchro
def registerType( self, name, definitionKeyFields, definitionAccountingFields, bucketsLength ):
"""
Register a new type
"""
gMonitor.registerActivity( "registerwaiting:%s" % name,
"Records waiting for insertion for %s" % " ".join( name.split( "_" ) ),
"Accounting",
"records",
gMonitor.OP_MEAN )
gMonitor.registerActivity( "registeradded:%s" % name,
"Register added for %s" % " ".join( name.split( "_" ) ),
"Accounting",
"entries",
gMonitor.OP_ACUM )
result = self.__loadTablesCreated()
if not result[ 'OK' ]:
return result
tablesInThere = result[ 'Value' ]
keyFieldsList = []
valueFieldsList = []
for key in definitionKeyFields:
keyFieldsList.append( key[0] )
for value in definitionAccountingFields:
valueFieldsList.append( value[0] )
for field in definitionKeyFields:
if field in valueFieldsList:
return S_ERROR( "Key field %s is also in the list of value fields" % field )
for field in definitionAccountingFields:
if field in keyFieldsList:
return S_ERROR( "Value field %s is also in the list of key fields" % field )
for bucket in bucketsLength:
if type( bucket ) != types.TupleType:
return S_ERROR( "Length of buckets should be a list of tuples" )
if len( bucket ) != 2:
return S_ERROR( "Length of buckets should have 2d tuples" )
updateDBCatalog = True
if name in self.dbCatalog:
updateDBCatalog = False
tables = {}
for key in definitionKeyFields:
keyTableName = _getTableName( "key", name, key[0] )
if keyTableName not in tablesInThere:
self.log.info( "Table for key %s has to be created" % key[0] )
tables[ keyTableName ] = { 'Fields' : { 'id' : 'INTEGER NOT NULL AUTO_INCREMENT',
'value' : '%s NOT NULL' % key[1]
},
'UniqueIndexes' : { 'valueindex' : [ 'value' ] },
'PrimaryKey' : 'id'
}
#Registering type
fieldsDict = {}
bucketFieldsDict = {}
inbufferDict = { 'id' : 'INTEGER NOT NULL AUTO_INCREMENT' }
bucketIndexes = { 'startTimeIndex' : [ 'startTime' ], 'bucketLengthIndex' : [ 'bucketLength' ] }
uniqueIndexFields = ['startTime']
for field in definitionKeyFields:
bucketIndexes[ "%sIndex" % field[0] ] = [ field[0] ]
uniqueIndexFields.append( field[ 0 ] )
fieldsDict[ field[0] ] = "INTEGER NOT NULL"
bucketFieldsDict[ field[0] ] = "INTEGER NOT NULL"
inbufferDict[ field[0] ] = field[1] + " NOT NULL"
for field in definitionAccountingFields:
fieldsDict[ field[0] ] = field[1] + " NOT NULL"
bucketFieldsDict[ field[0] ] = "DECIMAL(30,10) NOT NULL"
inbufferDict[ field[0] ] = field[1] + " NOT NULL"
fieldsDict[ 'startTime' ] = "INT UNSIGNED NOT NULL"
fieldsDict[ 'endTime' ] = "INT UNSIGNED NOT NULL"
bucketFieldsDict[ 'entriesInBucket' ] = "DECIMAL(30,10) NOT NULL"
bucketFieldsDict[ 'startTime' ] = "INT UNSIGNED NOT NULL"
inbufferDict[ 'startTime' ] = "INT UNSIGNED NOT NULL"
inbufferDict[ 'endTime' ] = "INT UNSIGNED NOT NULL"
inbufferDict[ 'taken' ] = "TINYINT(1) DEFAULT 1 NOT NULL"
inbufferDict[ 'takenSince' ] = "DATETIME NOT NULL"
bucketFieldsDict[ 'bucketLength' ] = "MEDIUMINT UNSIGNED NOT NULL"
uniqueIndexFields.append( 'bucketLength' )
bucketTableName = _getTableName( "bucket", name )
if bucketTableName not in tablesInThere:
tables[ bucketTableName ] = { 'Fields' : bucketFieldsDict,
'UniqueIndexes' : { 'UniqueConstraint' : uniqueIndexFields }
}
typeTableName = _getTableName( "type", name )
if typeTableName not in tablesInThere:
tables[ typeTableName ] = { 'Fields' : fieldsDict }
inTableName = _getTableName( "in", name )
if inTableName not in tablesInThere:
tables[ inTableName ] = { 'Fields' : inbufferDict,
'PrimaryKey' : 'id'
}
if self.__readOnly:
if tables:
self.log.notice( "ReadOnly mode: Skipping create of tables for %s. Removing from memory catalog" % name )
self.log.verbose( "Skipping creation of tables %s" % ", ".join( [ tn for tn in tables ] ) )
try:
self.dbCatalog.pop( name )
except KeyError:
pass
else:
self.log.notice( "ReadOnly mode: %s is OK" % name )
return S_OK( not updateDBCatalog )
if tables:
retVal = self._createTables( tables )
if not retVal[ 'OK' ]:
self.log.error( "Can't create type", "%s: %s" % ( name, retVal[ 'Message' ] ) )
return S_ERROR( "Can't create type %s: %s" % ( name, retVal[ 'Message' ] ) )
if updateDBCatalog:
bucketsLength.sort()
bucketsEncoding = DEncode.encode( bucketsLength )
self.insertFields( self.catalogTableName,
[ 'name', 'keyFields', 'valueFields', 'bucketsLength' ],
[ name, ",".join( keyFieldsList ), ",".join( valueFieldsList ), bucketsEncoding ] )
self.__addToCatalog( name, keyFieldsList, valueFieldsList, bucketsLength )
self.log.info( "Registered type %s" % name )
return S_OK( True )
def getRegisteredTypes( self ):
"""
Get list of registered types
"""
retVal = self._query( "SELECT `name`, `keyFields`, `valueFields`, `bucketsLength` FROM `%s`" % self.catalogTableName )
if not retVal[ 'OK' ]:
return retVal
typesList = []
for typeInfo in retVal[ 'Value' ]:
typesList.append( [ typeInfo[0],
List.fromChar( typeInfo[1] ),
List.fromChar( typeInfo[2] ),
DEncode.decode( typeInfo[3] )
]
)
return S_OK( typesList )
def getKeyValues( self, typeName, condDict, connObj = False ):
"""
Get all values for a given key field in a type
"""
keyValuesDict = {}
keyTables = []
sqlCond = []
mainTable = "`%s`" % _getTableName( "bucket", typeName )
typeKeysList = self.dbCatalog[ typeName ][ 'keys' ]
for keyName in condDict:
if keyName in typeKeysList:
keyTable = "`%s`" % _getTableName( "key", typeName, keyName )
if not keyTable in keyTables:
keyTables.append( keyTable )
sqlCond.append( "%s.id = %s.`%s`" % ( keyTable, mainTable, keyName ) )
for value in condDict[ keyName ]:
sqlCond.append( "%s.value = %s" % ( keyTable, self._escapeString( value )[ 'Value' ] ) )
for keyName in typeKeysList:
keyTable = "`%s`" % _getTableName( "key", typeName, keyName )
allKeyTables = keyTables
if not keyTable in allKeyTables:
allKeyTables = list( keyTables )
allKeyTables.append( keyTable )
cmd = "SELECT DISTINCT %s.value FROM %s" % ( keyTable, ", ".join( allKeyTables ) )
if sqlCond:
sqlValueLink = "%s.id = %s.`%s`" % ( keyTable, mainTable, keyName )
cmd += ", %s WHERE %s AND %s" % ( mainTable, sqlValueLink, " AND ".join( sqlCond ) )
retVal = self._query( cmd, conn = connObj )
if not retVal[ 'OK' ]:
return retVal
keyValuesDict[ keyName ] = [ r[0] for r in retVal[ 'Value' ] ]
return S_OK( keyValuesDict )
@gSynchro
def deleteType( self, typeName ):
"""
Deletes a type
"""
if self.__readOnly:
return S_ERROR( "ReadOnly mode enabled. No modification allowed" )
if typeName not in self.dbCatalog:
return S_ERROR( "Type %s does not exist" % typeName )
self.log.info( "Deleting type", typeName )
tablesToDelete = []
for keyField in self.dbCatalog[ typeName ][ 'keys' ]:
tablesToDelete.append( "`%s`" % _getTableName( "key", typeName, keyField ) )
tablesToDelete.insert( 0, "`%s`" % _getTableName( "type", typeName ) )
tablesToDelete.insert( 0, "`%s`" % _getTableName( "bucket", typeName ) )
tablesToDelete.insert( 0, "`%s`" % _getTableName( "in", typeName ) )
retVal = self._query( "DROP TABLE %s" % ", ".join( tablesToDelete ) )
if not retVal[ 'OK' ]:
return retVal
retVal = self._update( "DELETE FROM `%s` WHERE name='%s'" % ( _getTableName( "catalog", "Types" ), typeName ) )
del( self.dbCatalog[ typeName ] )
return S_OK()
def __getIdForKeyValue( self, typeName, keyName, keyValue, conn = False ):
"""
Finds id number for value in a key table
"""
retVal = self._escapeString( keyValue )
if not retVal[ 'OK' ]:
return retVal
keyValue = retVal[ 'Value' ]
retVal = self._query( "SELECT `id` FROM `%s` WHERE `value`=%s" % ( _getTableName( "key", typeName, keyName ),
keyValue ), conn = conn )
if not retVal[ 'OK' ]:
return retVal
if len( retVal[ 'Value' ] ) > 0:
return S_OK( retVal[ 'Value' ][0][0] )
return S_ERROR( "Key id %s for value %s does not exist although it shoud" % ( keyName, keyValue ) )
def __addKeyValue( self, typeName, keyName, keyValue ):
"""
Adds a key value to a key table if not existant
"""
#Cast to string just in case
if type( keyValue ) != types.StringType:
keyValue = str( keyValue )
#No more than 64 chars for keys
if len( keyValue ) > 64:
keyValue = keyValue[:64]
#Look into the cache
if typeName not in self.__keysCache:
self.__keysCache[ typeName ] = {}
typeCache = self.__keysCache[ typeName ]
if keyName not in typeCache:
typeCache[ keyName ] = {}
keyCache = typeCache[ keyName ]
if keyValue in keyCache:
return S_OK( keyCache[ keyValue ] )
#Retrieve key
keyTable = _getTableName( "key", typeName, keyName )
retVal = self.__getIdForKeyValue( typeName, keyName, keyValue )
if retVal[ 'OK' ]:
keyCache[ keyValue ] = retVal[ 'Value' ]
return retVal
#Key is not in there
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return retVal
connection = retVal[ 'Value' ]
self.log.info( "Value %s for key %s didn't exist, inserting" % ( keyValue, keyName ) )
retVal = self.insertFields( keyTable, [ 'id', 'value' ], [ 0, keyValue ], connection )
if not retVal[ 'OK' ] and retVal[ 'Message' ].find( "Duplicate key" ) == -1:
return retVal
result = self.__getIdForKeyValue( typeName, keyName, keyValue, connection )
if not result[ 'OK' ]:
return result
keyCache[ keyValue ] = result[ 'Value' ]
return result
def calculateBucketLengthForTime( self, typeName, now, when ):
"""
Get the expected bucket time for a moment in time
"""
for granuT in self.dbBucketsLength[ typeName ]:
nowBucketed = now - now % granuT[1]
dif = max( 0, nowBucketed - when )
if dif <= granuT[0]:
return granuT[1]
return self.maxBucketTime
def calculateBuckets( self, typeName, startTime, endTime, nowEpoch = False ):
"""
Magic function for calculating buckets between two times and
the proportional part for each bucket
"""
if not nowEpoch:
nowEpoch = int( Time.toEpoch( Time.dateTime() ) )
bucketTimeLength = self.calculateBucketLengthForTime( typeName, nowEpoch, startTime )
currentBucketStart = startTime - startTime % bucketTimeLength
if startTime == endTime:
return [ ( currentBucketStart,
1,
bucketTimeLength ) ]
buckets = []
totalLength = endTime - startTime
while currentBucketStart < endTime:
start = max( currentBucketStart, startTime )
end = min( currentBucketStart + bucketTimeLength, endTime )
proportion = float( end - start ) / totalLength
buckets.append( ( currentBucketStart,
proportion,
bucketTimeLength ) )
currentBucketStart += bucketTimeLength
bucketTimeLength = self.calculateBucketLengthForTime( typeName, nowEpoch, currentBucketStart )
return buckets
def __insertInQueueTable( self, typeName, startTime, endTime, valuesList ):
sqlFields = [ 'id', 'taken', 'takenSince' ] + self.dbCatalog[ typeName ][ 'typeFields' ]
sqlValues = [ '0', '0', 'UTC_TIMESTAMP()' ] + valuesList + [ startTime, endTime ]
if len( sqlFields ) != len( sqlValues ):
numRcv = len( valuesList ) + 2
numExp = len( self.dbCatalog[ typeName ][ 'typeFields' ] )
return S_ERROR( "Fields mismatch for record %s. %s fields and %s expected" % ( typeName,
numRcv,
numExp ) )
retVal = self.insertFields( _getTableName( "in", typeName ),
sqlFields,
sqlValues )
if not retVal[ 'OK' ]:
return retVal
return S_OK( retVal[ 'lastRowId' ] )
def insertRecordBundleThroughQueue( self, recordsToQueue ) :
if self.__readOnly:
return S_ERROR( "ReadOnly mode enabled. No modification allowed" )
recordsToProcess = []
now = Time.toEpoch()
for record in recordsToQueue:
typeName, startTime, endTime, valuesList = record
result = self.__insertInQueueTable( typeName, startTime, endTime, valuesList )
if not result[ 'OK' ]:
return result
iD = result[ 'Value' ]
recordsToProcess.append( ( iD, typeName, startTime, endTime, valuesList, now ) )
return S_OK()
def insertRecordThroughQueue( self, typeName, startTime, endTime, valuesList ):
"""
Insert a record in the intable to be really insterted afterwards
"""
if self.__readOnly:
return S_ERROR( "ReadOnly mode enabled. No modification allowed" )
self.log.info( "Adding record to queue", "for type %s\n [%s -> %s]" % ( typeName, Time.fromEpoch( startTime ), Time.fromEpoch( endTime ) ) )
if not typeName in self.dbCatalog:
return S_ERROR( "Type %s has not been defined in the db" % typeName )
result = self.__insertInQueueTable( typeName, startTime, endTime, valuesList )
if not result[ '0K' ]:
return result
return S_OK()
def __insertFromINTable( self, recordTuples ):
"""
Do the real insert and delete from the in buffer table
"""
self.log.verbose( "Received bundle to process", "of %s elements" % len( recordTuples ) )
for record in recordTuples:
iD, typeName, startTime, endTime, valuesList, insertionEpoch = record
result = self.insertRecordDirectly( typeName, startTime, endTime, valuesList )
if not result[ 'OK' ]:
self._update( "UPDATE `%s` SET taken=0 WHERE id=%s" % ( _getTableName( "in", typeName ), iD ) )
self.log.error( "Can't insert row", result[ 'Message' ] )
continue
result = self._update( "DELETE FROM `%s` WHERE id=%s" % ( _getTableName( "in", typeName ), iD ) )
if not result[ 'OK' ]:
self.log.error( "Can't delete row from the IN table", result[ 'Message' ] )
gMonitor.addMark( "insertiontime", Time.toEpoch() - insertionEpoch )
def insertRecordDirectly( self, typeName, startTime, endTime, valuesList ):
"""
Add an entry to the type contents
"""
if self.__readOnly:
return S_ERROR( "ReadOnly mode enabled. No modification allowed" )
gMonitor.addMark( "registeradded", 1 )
gMonitor.addMark( "registeradded:%s" % typeName, 1 )
self.log.info( "Adding record", "for type %s\n [%s -> %s]" % ( typeName, Time.fromEpoch( startTime ), Time.fromEpoch( endTime ) ) )
if not typeName in self.dbCatalog:
return S_ERROR( "Type %s has not been defined in the db" % typeName )
#Discover key indexes
for keyPos in range( len( self.dbCatalog[ typeName ][ 'keys' ] ) ):
keyName = self.dbCatalog[ typeName ][ 'keys' ][ keyPos ]
keyValue = valuesList[ keyPos ]
retVal = self.__addKeyValue( typeName, keyName, keyValue )
if not retVal[ 'OK' ]:
return retVal
self.log.verbose( "Value %s for key %s has id %s" % ( keyValue, keyName, retVal[ 'Value' ] ) )
valuesList[ keyPos ] = retVal[ 'Value' ]
insertList = list( valuesList )
insertList.append( startTime )
insertList.append( endTime )
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return retVal
connObj = retVal[ 'Value' ]
try:
retVal = self.insertFields( _getTableName( "type", typeName ),
self.dbCatalog[ typeName ][ 'typeFields' ],
insertList,
conn = connObj )
if not retVal[ 'OK' ]:
return retVal
#HACK: One more record to split in the buckets to be able to count total entries
valuesList.append( 1 )
retVal = self.__startTransaction( connObj )
if not retVal[ 'OK' ]:
return retVal
retVal = self.__splitInBuckets( typeName, startTime, endTime, valuesList, connObj = connObj )
if not retVal[ 'OK' ]:
self.__rollbackTransaction( connObj )
return retVal
return self.__commitTransaction( connObj )
finally:
connObj.close()
def deleteRecord( self, typeName, startTime, endTime, valuesList ):
"""
Add an entry to the type contents
"""
if self.__readOnly:
return S_ERROR( "ReadOnly mode enabled. No modification allowed" )
self.log.info( "Deleting record record", "for type %s\n [%s -> %s]" % ( typeName, Time.fromEpoch( startTime ), Time.fromEpoch( endTime ) ) )
if not typeName in self.dbCatalog:
return S_ERROR( "Type %s has not been defined in the db" % typeName )
sqlValues = []
sqlValues.extend( valuesList )
#Discover key indexes
for keyPos in range( len( self.dbCatalog[ typeName ][ 'keys' ] ) ):
keyName = self.dbCatalog[ typeName ][ 'keys' ][ keyPos ]
keyValue = sqlValues[ keyPos ]
retVal = self.__addKeyValue( typeName, keyName, keyValue )
if not retVal[ 'OK' ]:
return retVal
self.log.verbose( "Value %s for key %s has id %s" % ( keyValue, keyName, retVal[ 'Value' ] ) )
sqlValues[ keyPos ] = retVal[ 'Value' ]
sqlCond = []
mainTable = _getTableName( "type", typeName )
sqlValues.extend( [ startTime, endTime ] )
numKeyFields = len( self.dbCatalog[ typeName ][ 'keys' ] )
numValueFields = len( self.dbCatalog[ typeName ][ 'values' ] )
for i in range( len( sqlValues ) ):
needToRound = False
if i >= numKeyFields and i - numKeyFields < numValueFields:
vIndex = i - numKeyFields
if self.dbCatalog[ typeName ][ 'definition' ][ 'values' ][vIndex][1].find( "FLOAT" ) > -1:
needToRound = True
if needToRound:
compVal = [ "`%s`.`%s`" % ( mainTable, self.dbCatalog[ typeName ][ 'typeFields' ][i] ),
"%f" % sqlValues[i] ]
compVal = [ "CEIL( %s * 1000 )" % v for v in compVal ]
compVal = "ABS( %s ) <= 1 " % " - ".join( compVal )
else:
sqlCond.append( "`%s`.`%s`=%s" % ( mainTable,
self.dbCatalog[ typeName ][ 'typeFields' ][i],
sqlValues[i] ) )
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return retVal
connObj = retVal[ 'Value' ]
retVal = self.__startTransaction( connObj )
if not retVal[ 'OK' ]:
return retVal
retVal = self._update( "DELETE FROM `%s` WHERE %s" % ( mainTable, " AND ".join( sqlCond ) ),
conn = connObj )
if not retVal[ 'OK' ]:
return retVal
numInsertions = retVal[ 'Value' ]
#Deleted from type, now the buckets
#HACK: One more record to split in the buckets to be able to count total entries
if numInsertions == 0:
return S_OK( 0 )
sqlValues.append( 1 )
retVal = self.__deleteFromBuckets( typeName, startTime, endTime, sqlValues, numInsertions, connObj = connObj )
if not retVal[ 'OK' ]:
self.__rollbackTransaction( connObj )
return retVal
retVal = self.__commitTransaction( connObj )
if not retVal[ 'OK' ]:
self.__rollbackTransaction( connObj )
return retVal
return S_OK( numInsertions )
def __splitInBuckets( self, typeName, startTime, endTime, valuesList, connObj = False ):
"""
Bucketize a record
"""
#Calculate amount of buckets
buckets = self.calculateBuckets( typeName, startTime, endTime )
#Separate key values from normal values
numKeys = len( self.dbCatalog[ typeName ][ 'keys' ] )
keyValues = valuesList[ :numKeys ]
valuesList = valuesList[ numKeys: ]
self.log.verbose( "Splitting entry", " in %s buckets" % len( buckets ) )
return self.__writeBuckets( typeName, buckets, keyValues, valuesList, connObj = connObj )
def __deleteFromBuckets( self, typeName, startTime, endTime, valuesList, numInsertions, connObj = False ):
"""
DeBucketize a record
"""
#Calculate amount of buckets
buckets = self.calculateBuckets( typeName, startTime, endTime, self.__lastCompactionEpoch )
#Separate key values from normal values
numKeys = len( self.dbCatalog[ typeName ][ 'keys' ] )
keyValues = valuesList[ :numKeys ]
valuesList = valuesList[ numKeys: ]
self.log.verbose( "Deleting bucketed entry", "from %s buckets" % len( buckets ) )
for bucketInfo in buckets:
bucketStartTime = bucketInfo[0]
bucketProportion = bucketInfo[1]
bucketLength = bucketInfo[2]
for _i in range( max( 1, self.__deadLockRetries ) ):
retVal = self.__extractFromBucket( typeName,
bucketStartTime,
bucketLength,
keyValues,
valuesList, bucketProportion * numInsertions, connObj = connObj )
if not retVal[ 'OK' ]:
#If failed because of dead lock try restarting
if retVal[ 'Message' ].find( "try restarting transaction" ):
continue
return retVal
#If OK, break loop
if retVal[ 'OK' ]:
break
return S_OK()
def getBucketsDef( self, typeName ):
return self.dbBucketsLength[ typeName ]
def __generateSQLConditionForKeys( self, typeName, keyValues ):
"""
Generate sql condition for buckets, values are indexes to real values
"""
realCondList = []
for keyPos in range( len( self.dbCatalog[ typeName ][ 'keys' ] ) ):
keyField = self.dbCatalog[ typeName ][ 'keys' ][ keyPos ]
keyValue = keyValues[ keyPos ]
retVal = self._escapeString( keyValue )
if not retVal[ 'OK' ]:
return retVal
keyValue = retVal[ 'Value' ]
realCondList.append( "`%s`.`%s` = %s" % ( _getTableName( "bucket", typeName ), keyField, keyValue ) )
return " AND ".join( realCondList )
def __getBucketFromDB( self, typeName, startTime, bucketLength, keyValues, connObj = False ):
"""
Get a bucket from the DB
"""
tableName = _getTableName( "bucket", typeName )
sqlFields = []
for valueField in self.dbCatalog[ typeName ][ 'values' ]:
sqlFields.append( "`%s`.`%s`" % ( tableName, valueField ) )
sqlFields.append( "`%s`.`entriesInBucket`" % ( tableName ) )
cmd = "SELECT %s FROM `%s`" % ( ", ".join( sqlFields ), _getTableName( "bucket", typeName ) )
cmd += " WHERE `%s`.`startTime`='%s' AND `%s`.`bucketLength`='%s' AND " % (
tableName,
startTime,
tableName,
bucketLength )
cmd += self.__generateSQLConditionForKeys( typeName, keyValues )
return self._query( cmd, conn = connObj )
def __extractFromBucket( self, typeName, startTime, bucketLength, keyValues, bucketValues, proportion, connObj = False ):
"""
Update a bucket when coming from the raw insert
"""
tableName = _getTableName( "bucket", typeName )
cmd = "UPDATE `%s` SET " % tableName
sqlValList = []
for pos in range( len( self.dbCatalog[ typeName ][ 'values' ] ) ):
valueField = self.dbCatalog[ typeName ][ 'values' ][ pos ]
value = bucketValues[ pos ]
fullFieldName = "`%s`.`%s`" % ( tableName, valueField )
sqlValList.append( "%s=GREATEST(0,%s-(%s*%s))" % ( fullFieldName, fullFieldName, value, proportion ) )
sqlValList.append( "`%s`.`entriesInBucket`=GREATEST(0,`%s`.`entriesInBucket`-(%s*%s))" % ( tableName,
tableName,
bucketValues[-1],
proportion ) )
cmd += ", ".join( sqlValList )
cmd += " WHERE `%s`.`startTime`='%s' AND `%s`.`bucketLength`='%s' AND " % (
tableName,
startTime,
tableName,
bucketLength )
cmd += self.__generateSQLConditionForKeys( typeName, keyValues )
return self._update( cmd, conn = connObj )
def __writeBuckets( self, typeName, buckets, keyValues, valuesList, connObj = False ):
""" Insert or update a bucket
"""
# tableName = _getTableName( "bucket", typeName )
#INSERT PART OF THE QUERY
sqlFields = [ '`startTime`', '`bucketLength`', '`entriesInBucket`' ]
for keyPos in range( len( self.dbCatalog[ typeName ][ 'keys' ] ) ):
sqlFields.append( "`%s`" % self.dbCatalog[ typeName ][ 'keys' ][ keyPos ] )
sqlUpData = [ "`entriesInBucket`=`entriesInBucket`+VALUES(`entriesInBucket`)" ]
for valPos in range( len( self.dbCatalog[ typeName ][ 'values' ] ) ):
valueField = "`%s`" % self.dbCatalog[ typeName ][ 'values' ][ valPos ]
sqlFields.append( valueField )
sqlUpData.append( "%s=%s+VALUES(%s)" % ( valueField, valueField, valueField ) )
valuesGroups = []
for bucketInfo in buckets:
bStartTime = bucketInfo[0]
bProportion = bucketInfo[1]
bLength = bucketInfo[2]
sqlValues = [ bStartTime, bLength, "(%s*%s)" % ( valuesList[-1], bProportion )]
for keyPos in range( len( self.dbCatalog[ typeName ][ 'keys' ] ) ):
sqlValues.append( keyValues[ keyPos ] )
for valPos in range( len( self.dbCatalog[ typeName ][ 'values' ] ) ):
# value = valuesList[ valPos ]
sqlValues.append( "(%s*%s)" % ( valuesList[ valPos ], bProportion ) )
valuesGroups.append( "( %s )" % ",".join( str( val ) for val in sqlValues ) )
cmd = "INSERT INTO `%s` ( %s ) " % ( _getTableName( "bucket", typeName ), ", ".join( sqlFields ) )
cmd += "VALUES %s " % ", ".join( valuesGroups)
cmd += "ON DUPLICATE KEY UPDATE %s" % ", ".join( sqlUpData )
for _i in range( max( 1, self.__deadLockRetries ) ):
result = self._update( cmd, conn = connObj )
if not result[ 'OK' ]:
#If failed because of dead lock try restarting
if result[ 'Message' ].find( "try restarting transaction" ):
continue
return result
#If OK, break loopo
if result[ 'OK' ]:
return result
return S_ERROR( "Cannot update bucket: %s" % result[ 'Message' ] )
def __checkFieldsExistsInType( self, typeName, fields, tableType ):
"""
Check wether a list of fields exist for a given typeName
"""
missing = []
tableFields = self.dbCatalog[ typeName ][ '%sFields' % tableType ]
for key in fields:
if key not in tableFields:
missing.append( key )
return missing
def __checkIncomingFieldsForQuery( self, typeName, selectFields, condDict, groupFields, orderFields, tableType ):
missing = self.__checkFieldsExistsInType( typeName, selectFields[1], tableType )
if missing:
return S_ERROR( "Value keys %s are not defined" % ", ".join( missing ) )
missing = self.__checkFieldsExistsInType( typeName, condDict, tableType )
if missing:
return S_ERROR( "Condition keys %s are not defined" % ", ".join( missing ) )
if groupFields:
missing = self.__checkFieldsExistsInType( typeName, groupFields[1], tableType )
if missing:
return S_ERROR( "Group fields %s are not defined" % ", ".join( missing ) )
if orderFields:
missing = self.__checkFieldsExistsInType( typeName, orderFields[1], tableType )
if missing:
return S_ERROR( "Order fields %s are not defined" % ", ".join( missing ) )
return S_OK()
def retrieveRawRecords( self, typeName, startTime, endTime, condDict, orderFields, connObj = False ):
"""
Get RAW data from the DB
"""
if typeName not in self.dbCatalog:
return S_ERROR( "Type %s not defined" % typeName )
selectFields = [ [ "%s", "%s" ], [ "startTime", "endTime" ] ]
for tK in ( 'keys', 'values' ):
for key in self.dbCatalog[ typeName ][ tK ]:
selectFields[ 0 ].append( "%s" )
selectFields[ 1 ].append( key )
selectFields[ 0 ] = ", ".join( selectFields[ 0 ] )
return self.__queryType( typeName, startTime, endTime, selectFields,
condDict, False, orderFields, "type" )
def retrieveBucketedData( self, typeName, startTime, endTime, selectFields, condDict, groupFields, orderFields, connObj = False ):
"""
Get data from the DB
Parameters:
- typeName -> typeName
- startTime & endTime -> int epoch objects. Do I need to explain the meaning?
- selectFields -> tuple containing a string and a list of fields:
( "SUM(%s), %s/%s", ( "field1name", "field2name", "field3name" ) )
- condDict -> conditions for the query
key -> name of the field
value -> list of possible values
- groupFields -> list of fields to group by
( "%s, %s, %s", ( "field1name", "field2name", "field3name" ) )
- orderFields -> list of fields to order by
( "%s, %s, %s", ( "field1name", "field2name", "field3name" ) )
"""
if typeName not in self.dbCatalog:
return S_ERROR( "Type %s is not defined" % typeName )
startQueryEpoch = time.time()
if len( selectFields ) < 2:
return S_ERROR( "selectFields has to be a list containing a string and a list of fields" )
retVal = self.__checkIncomingFieldsForQuery( typeName, selectFields, condDict, groupFields, orderFields, "bucket" )
if not retVal[ 'OK' ]:
return retVal
nowEpoch = Time.toEpoch( Time.dateTime () )
bucketTimeLength = self.calculateBucketLengthForTime( typeName, nowEpoch , startTime )
startTime = startTime - startTime % bucketTimeLength
result = self.__queryType( typeName,
startTime,
endTime,
selectFields,
condDict,
groupFields,
orderFields,
"bucket",
connObj = connObj )
gMonitor.addMark( "querytime", Time.toEpoch() - startQueryEpoch )
return result
def __queryType( self, typeName, startTime, endTime, selectFields, condDict, groupFields, orderFields, tableType, connObj = False ):
"""
Execute a query over a main table
"""
tableName = _getTableName( tableType, typeName )
cmd = "SELECT"
sqlLinkList = []
#Check if groupFields and orderFields are in ( "%s", ( field1, ) ) form
if groupFields:
try:
groupFields[0] % tuple( groupFields[1] )
except Exception, e:
return S_ERROR( "Cannot format properly group string: %s" % str( e ) )
if orderFields:
try:
orderFields[0] % tuple( orderFields[1] )
except Exception, e:
return S_ERROR( "Cannot format properly order string: %s" % str( e ) )
#Calculate fields to retrieve
realFieldList = []
for rawFieldName in selectFields[1]:
keyTable = _getTableName( "key", typeName, rawFieldName )
if rawFieldName in self.dbCatalog[ typeName ][ 'keys' ]:
realFieldList.append( "`%s`.`value`" % keyTable )
List.appendUnique( sqlLinkList, "`%s`.`%s` = `%s`.`id`" % ( tableName,
rawFieldName,
keyTable ) )
else:
realFieldList.append( "`%s`.`%s`" % ( tableName, rawFieldName ) )
try:
cmd += " %s" % selectFields[0] % tuple( realFieldList )
except Exception, e:
return S_ERROR( "Error generating select fields string: %s" % str( e ) )
#Calculate tables needed
sqlFromList = [ "`%s`" % tableName ]
for key in self.dbCatalog[ typeName ][ 'keys' ]:
if key in condDict or key in selectFields[1] \
or ( groupFields and key in groupFields[1] ) \
or ( orderFields and key in orderFields[1] ):
sqlFromList.append( "`%s`" % _getTableName( "key", typeName, key ) )
cmd += " FROM %s" % ", ".join( sqlFromList )
#Calculate time conditions
sqlTimeCond = []
if startTime:
if tableType == 'bucket':
#HACK because MySQL and UNIX do not start epoch at the same time
startTime = startTime + 3600
startTime = self.calculateBuckets( typeName, startTime, startTime )[0][0]
sqlTimeCond.append( "`%s`.`startTime` >= %s" % ( tableName, startTime ) )
if endTime:
if tableType == "bucket":
endTimeSQLVar = "startTime"
endTime = endTime + 3600
endTime = self.calculateBuckets( typeName, endTime, endTime )[0][0]
else:
endTimeSQLVar = "endTime"
sqlTimeCond.append( "`%s`.`%s` <= %s" % ( tableName, endTimeSQLVar, endTime ) )
cmd += " WHERE %s" % " AND ".join( sqlTimeCond )
#Calculate conditions
sqlCondList = []
for keyName in condDict:
sqlORList = []
if keyName in self.dbCatalog[ typeName ][ 'keys' ]:
List.appendUnique( sqlLinkList, "`%s`.`%s` = `%s`.`id`" % ( tableName,
keyName,
_getTableName( "key", typeName, keyName )
) )
if type( condDict[ keyName ] ) not in ( types.ListType, types.TupleType ):
condDict[ keyName ] = [ condDict[ keyName ] ]
for keyValue in condDict[ keyName ]:
retVal = self._escapeString( keyValue )
if not retVal[ 'OK' ]:
return retVal
keyValue = retVal[ 'Value' ]
if keyName in self.dbCatalog[ typeName ][ 'keys' ]:
sqlORList.append( "`%s`.`value` = %s" % ( _getTableName( "key", typeName, keyName ), keyValue ) )
else:
sqlORList.append( "`%s`.`%s` = %s" % ( tableName, keyName, keyValue ) )
sqlCondList.append( "( %s )" % " OR ".join( sqlORList ) )
if sqlCondList:
cmd += " AND %s" % " AND ".join( sqlCondList )
#Calculate grouping and sorting
for preGenFields in ( groupFields, orderFields ):
if preGenFields:
for i in range( len( preGenFields[1] ) ):
field = preGenFields[1][i]
if field in self.dbCatalog[ typeName ][ 'keys' ]:
List.appendUnique( sqlLinkList, "`%s`.`%s` = `%s`.`id`" % ( tableName,
field,
_getTableName( "key", typeName, field )
) )
if preGenFields[0] != "%s":
# The default grouping was changed
preGenFields[1][i] = "`%s`.Value" % _getTableName( "key", typeName, field )
else:
# The default grouping is maintained
preGenFields[1][i] = "`%s`.`%s`" % ( tableName, field )
if sqlLinkList:
cmd += " AND %s" % " AND ".join( sqlLinkList )
if groupFields:
cmd += " GROUP BY %s" % ( groupFields[0] % tuple( groupFields[1] ) )
if orderFields:
cmd += " ORDER BY %s" % ( orderFields[0] % tuple( orderFields[1] ) )
self.log.verbose( cmd )
return self._query( cmd, conn = connObj )
def compactBuckets( self, typeFilter = False ):
"""
Compact buckets for all defined types
"""
if self.__readOnly:
return S_ERROR( "ReadOnly mode enabled. No modification allowed" )
gSynchro.lock()
try:
if self.__doingCompaction:
return S_OK()
self.__doingCompaction = True
finally:
gSynchro.unlock()
slow = True
for typeName in self.dbCatalog:
if typeFilter and typeName.find( typeFilter ) == -1:
self.log.info( "[COMPACT] Skipping %s" % typeName )
continue
if self.dbCatalog[ typeName ][ 'dataTimespan' ] > 0:
self.log.info( "[COMPACT] Deleting records older that timespan for type %s" % typeName )
self.__deleteRecordsOlderThanDataTimespan( typeName )
self.log.info( "[COMPACT] Compacting %s" % typeName )
if slow:
self.__slowCompactBucketsForType( typeName )
else:
self.__compactBucketsForType( typeName )
self.log.info( "[COMPACT] Compaction finished" )
self.__lastCompactionEpoch = int( Time.toEpoch() )
gSynchro.lock()
try:
if self.__doingCompaction:
self.__doingCompaction = False
finally:
gSynchro.unlock()
return S_OK()
def __selectForCompactBuckets( self, typeName, timeLimit, bucketLength, nextBucketLength, connObj = False ):
"""
Nasty SQL query to get ideal buckets using grouping by date calculations and adding value contents
"""
tableName = _getTableName( "bucket", typeName )
selectSQL = "SELECT "
sqlSelectList = []
for field in self.dbCatalog[ typeName ][ 'keys' ]:
sqlSelectList.append( "`%s`.`%s`" % ( tableName, field ) )
for field in self.dbCatalog[ typeName ][ 'values' ]:
sqlSelectList.append( "SUM( `%s`.`%s` )" % ( tableName, field ) )
sqlSelectList.append( "SUM( `%s`.`entriesInBucket` )" % ( tableName ) )
sqlSelectList.append( "MIN( `%s`.`startTime` )" % tableName )
sqlSelectList.append( "MAX( `%s`.`startTime` )" % tableName )
selectSQL += ", ".join( sqlSelectList )
selectSQL += " FROM `%s`" % tableName
selectSQL += " WHERE `%s`.`startTime` < '%s' AND" % ( tableName, timeLimit )
selectSQL += " `%s`.`bucketLength` = %s" % ( tableName, bucketLength )
#MAGIC bucketing
sqlGroupList = [ _bucketizeDataField( "`%s`.`startTime`" % tableName, nextBucketLength ) ]
for field in self.dbCatalog[ typeName ][ 'keys' ]:
sqlGroupList.append( "`%s`.`%s`" % ( tableName, field ) )
selectSQL += " GROUP BY %s" % ", ".join( sqlGroupList )
return self._query( selectSQL, conn = connObj )
def __deleteForCompactBuckets( self, typeName, timeLimit, bucketLength, connObj = False ):
"""
Delete compacted buckets
"""
tableName = _getTableName( "bucket", typeName )
deleteSQL = "DELETE FROM `%s` WHERE " % tableName
deleteSQL += "`%s`.`startTime` < '%s' AND " % ( tableName, timeLimit )
deleteSQL += "`%s`.`bucketLength` = %s" % ( tableName, bucketLength )
return self._update( deleteSQL, conn = connObj )
def __compactBucketsForType( self, typeName ):
"""
Compact all buckets for a given type
"""
nowEpoch = Time.toEpoch()
#retVal = self.__startTransaction( connObj )
#if not retVal[ 'OK' ]:
# return retVal
for bPos in range( len( self.dbBucketsLength[ typeName ] ) - 1 ):
self.log.info( "[COMPACT] Query %d of %d" % ( bPos + 1, len( self.dbBucketsLength[ typeName ] ) - 1 ) )
secondsLimit = self.dbBucketsLength[ typeName ][ bPos ][0]
bucketLength = self.dbBucketsLength[ typeName ][ bPos ][1]
timeLimit = ( nowEpoch - nowEpoch % bucketLength ) - secondsLimit
nextBucketLength = self.dbBucketsLength[ typeName ][ bPos + 1 ][1]
self.log.info( "[COMPACT] Compacting data newer that %s with bucket size %s" % ( Time.fromEpoch( timeLimit ), bucketLength ) )
#Retrieve the data
retVal = self.__selectForCompactBuckets( typeName, timeLimit, bucketLength, nextBucketLength )
if not retVal[ 'OK' ]:
#self.__rollbackTransaction( connObj )
return retVal
bucketsData = retVal[ 'Value' ]
self.log.info( "[COMPACT] Got %d records to compact" % len( bucketsData ) )
if len( bucketsData ) == 0:
continue
retVal = self.__deleteForCompactBuckets( typeName, timeLimit, bucketLength )
if not retVal[ 'OK' ]:
#self.__rollbackTransaction( connObj )
return retVal
self.log.info( "[COMPACT] Compacting %s records %s seconds size for %s" % ( len( bucketsData ), bucketLength, typeName ) )
#Add data
for record in bucketsData:
startTime = record[-2]
endTime = record[-1]
valuesList = record[:-2]
retVal = self.__splitInBuckets( typeName, startTime, endTime, valuesList )
if not retVal[ 'OK' ]:
#self.__rollbackTransaction( connObj )
self.log.error( "[COMPACT] Error while compacting data for record", "%s: %s" % ( typeName, retVal[ 'Value' ] ) )
self.log.info( "[COMPACT] Finished compaction %d of %d" % ( bPos, len( self.dbBucketsLength[ typeName ] ) - 1 ) )
#return self.__commitTransaction( connObj )
return S_OK()
def __slowCompactBucketsForType( self, typeName ):
"""
Compact all buckets for a given type
"""
nowEpoch = Time.toEpoch()
for bPos in range( len( self.dbBucketsLength[ typeName ] ) - 1 ):
self.log.info( "[COMPACT] Query %d of %d" % ( bPos, len( self.dbBucketsLength[ typeName ] ) - 1 ) )
secondsLimit = self.dbBucketsLength[ typeName ][ bPos ][0]
bucketLength = self.dbBucketsLength[ typeName ][ bPos ][1]
timeLimit = ( nowEpoch - nowEpoch % bucketLength ) - secondsLimit
nextBucketLength = self.dbBucketsLength[ typeName ][ bPos + 1 ][1]
self.log.info( "[COMPACT] Compacting data newer that %s with bucket size %s for %s" % ( Time.fromEpoch( timeLimit ), bucketLength, typeName ) )
querySize = 10000
previousRecordsSelected = querySize
totalCompacted = 0
while previousRecordsSelected == querySize:
#Retrieve the data
self.log.info( "[COMPACT] Retrieving buckets to compact newer that %s with size %s" % ( Time.fromEpoch( timeLimit ),
bucketLength ) )
roundStartTime = time.time()
result = self.__selectIndividualForCompactBuckets( typeName, timeLimit, bucketLength,
nextBucketLength, querySize )
if not result[ 'OK' ]:
#self.__rollbackTransaction( connObj )
return result
bucketsData = result[ 'Value' ]
previousRecordsSelected = len( bucketsData )
selectEndTime = time.time()
self.log.info( "[COMPACT] Got %d buckets (%d done) (took %.2f secs)" % ( previousRecordsSelected,
totalCompacted,
selectEndTime - roundStartTime ) )
if len( bucketsData ) == 0:
break
result = self.__deleteIndividualForCompactBuckets( typeName, bucketsData )
if not result[ 'OK' ]:
#self.__rollbackTransaction( connObj )
return result
bucketsData = result[ 'Value' ]
deleteEndTime = time.time()
self.log.info( "[COMPACT] Deleted %s out-of-bounds buckets (took %.2f secs)" % ( len( bucketsData ),
deleteEndTime - selectEndTime ) )
#Add data
for record in bucketsData:
startTime = record[-2]
endTime = record[-2] + record[-1]
valuesList = record[:-2]
retVal = self.__splitInBuckets( typeName, startTime, endTime, valuesList )
if not retVal[ 'OK' ]:
self.log.error( "[COMPACT] Error while compacting data for buckets", "%s: %s" % ( typeName, retVal[ 'Value' ] ) )
totalCompacted += len( bucketsData )
insertElapsedTime = time.time() - deleteEndTime
self.log.info( "[COMPACT] Records compacted (took %.2f secs, %.2f secs/bucket)" % ( insertElapsedTime,
insertElapsedTime / len( bucketsData ) ) )
self.log.info( "[COMPACT] Finised compaction %d of %d" % ( bPos, len( self.dbBucketsLength[ typeName ] ) - 1 ) )
#return self.__commitTransaction( connObj )
return S_OK()
def __selectIndividualForCompactBuckets( self, typeName, timeLimit, bucketLength, nextBucketLength, querySize, connObj = False ):
"""
Nasty SQL query to get ideal buckets using grouping by date calculations and adding value contents
"""
tableName = _getTableName( "bucket", typeName )
selectSQL = "SELECT "
sqlSelectList = []
for field in self.dbCatalog[ typeName ][ 'keys' ]:
sqlSelectList.append( "`%s`.`%s`" % ( tableName, field ) )
for field in self.dbCatalog[ typeName ][ 'values' ]:
sqlSelectList.append( "`%s`.`%s`" % ( tableName, field ) )
sqlSelectList.append( "`%s`.`entriesInBucket`" % ( tableName ) )
sqlSelectList.append( "`%s`.`startTime`" % tableName )
sqlSelectList.append( "`%s`.bucketLength" % ( tableName ) )
selectSQL += ", ".join( sqlSelectList )
selectSQL += " FROM `%s`" % tableName
selectSQL += " WHERE `%s`.`startTime` < '%s' AND" % ( tableName, timeLimit )
selectSQL += " `%s`.`bucketLength` = %s" % ( tableName, bucketLength )
#MAGIC bucketing
selectSQL += " LIMIT %d" % querySize
return self._query( selectSQL, conn = connObj )
def __deleteIndividualForCompactBuckets( self, typeName, bucketsData, connObj = False ):
"""
Delete compacted buckets
"""
tableName = _getTableName( "bucket", typeName )
keyFields = self.dbCatalog[ typeName ][ 'keys' ]
deleteQueryLimit = 50
deletedBuckets = []
for bLimit in range( 0, len( bucketsData ) , deleteQueryLimit ):
delCondsSQL = []
for record in bucketsData[ bLimit : bLimit + deleteQueryLimit ]:
condSQL = []
for iPos in range( len( keyFields ) ):
field = keyFields[ iPos ]
condSQL.append( "`%s`.`%s` = %s" % ( tableName, field, record[ iPos ] ) )
condSQL.append( "`%s`.`startTime` = %d" % ( tableName, record[-2] ) )
condSQL.append( "`%s`.`bucketLength` = %d" % ( tableName, record[-1] ) )
delCondsSQL.append( "(%s)" % " AND ".join( condSQL ) )
delSQL = "DELETE FROM `%s` WHERE %s" % ( tableName, " OR ".join( delCondsSQL ) )
result = self._update( delSQL, conn = connObj )
if not result[ 'OK' ]:
self.log.error( "Cannot delete individual records for compaction", result[ 'Message' ] )
else:
deletedBuckets.extend( bucketsData[ bLimit : bLimit + deleteQueryLimit ] )
return S_OK( deletedBuckets )
def __deleteRecordsOlderThanDataTimespan( self, typeName ):
"""
IF types define dataTimespan, then records older than datatimespan seconds will be deleted
automatically
"""
dataTimespan = self.dbCatalog[ typeName ][ 'dataTimespan' ]
if dataTimespan < 86400 * 30:
return
for table, field in ( ( _getTableName( "type", typeName ), 'endTime' ),
( _getTableName( "bucket", typeName ), 'startTime + %s' % self.dbBucketsLength[ typeName ][-1][1] ) ):
self.log.info( "[COMPACT] Deleting old records for table %s" % table )
deleteLimit = 100000
deleted = deleteLimit
while deleted >= deleteLimit:
sqlCmd = "DELETE FROM `%s` WHERE %s < UNIX_TIMESTAMP()-%d LIMIT %d" % ( table, field, dataTimespan, deleteLimit )
result = self._update( sqlCmd )
if not result[ 'OK' ]:
self.log.error( "[COMPACT] Cannot delete old records", "Table: %s Timespan: %s Error: %s" % ( table,
dataTimespan,
result[ 'Message' ] ) )
break
self.log.info( "[COMPACT] Deleted %d records for %s table" % ( result[ 'Value' ], table ) )
deleted = result[ 'Value' ]
time.sleep( 1 )
def regenerateBuckets( self, typeName ):
if self.__readOnly:
return S_ERROR( "ReadOnly mode enabled. No modification allowed" )
#Delete old entries if any
if self.dbCatalog[ typeName ][ 'dataTimespan' ] > 0:
self.log.info( "[REBUCKET] Deleting records older that timespan for type %s" % typeName )
self.__deleteRecordsOlderThanDataTimespan( typeName )
self.log.info( "[REBUCKET] Done deleting old records" )
rawTableName = _getTableName( "type", typeName )
#retVal = self.__startTransaction( connObj )
#if not retVal[ 'OK' ]:
# return retVal
self.log.info( "[REBUCKET] Deleting buckets for %s" % typeName )
retVal = self._update( "DELETE FROM `%s`" % _getTableName( "bucket", typeName ) )
if not retVal[ 'OK' ]:
return retVal
#Generate the common part of the query
#SELECT fields
startTimeTableField = "`%s`.startTime" % rawTableName
endTimeTableField = "`%s`.endTime" % rawTableName
#Select strings and sum select strings
sqlSUMSelectList = []
sqlSelectList = []
for field in self.dbCatalog[ typeName ][ 'keys' ]:
sqlSUMSelectList.append( "`%s`.`%s`" % ( rawTableName, field ) )
sqlSelectList.append( "`%s`.`%s`" % ( rawTableName, field ) )
for field in self.dbCatalog[ typeName ][ 'values' ]:
sqlSUMSelectList.append( "SUM( `%s`.`%s` )" % ( rawTableName, field ) )
sqlSelectList.append( "`%s`.`%s`" % ( rawTableName, field ) )
sumSelectString = ", ".join( sqlSUMSelectList )
selectString = ", ".join( sqlSelectList )
#Grouping fields
sqlGroupList = []
for field in self.dbCatalog[ typeName ][ 'keys' ]:
sqlGroupList.append( "`%s`.`%s`" % ( rawTableName, field ) )
groupingString = ", ".join( sqlGroupList )
#List to contain all queries
sqlQueries = []
dateInclusiveConditions = []
countedField = "`%s`.`%s`" % ( rawTableName, self.dbCatalog[ typeName ][ 'keys' ][0] )
lastTime = Time.toEpoch()
#Iterate for all ranges
for iRange in range( len( self.dbBucketsLength[ typeName ] ) ):
bucketTimeSpan = self.dbBucketsLength[ typeName ][iRange][0]
bucketLength = self.dbBucketsLength[ typeName ][iRange][1]
startRangeTime = lastTime - bucketTimeSpan
endRangeTime = lastTime
lastTime -= bucketTimeSpan
bucketizedStart = _bucketizeDataField( startTimeTableField, bucketLength )
bucketizedEnd = _bucketizeDataField( endTimeTableField, bucketLength )
timeSelectString = "MIN(%s), MAX(%s)" % ( startTimeTableField,
endTimeTableField )
#Is the last bucket?
if iRange == len( self.dbBucketsLength[ typeName ] ) - 1:
whereString = "%s <= %d" % ( endTimeTableField,
endRangeTime )
else:
whereString = "%s > %d AND %s <= %d" % ( startTimeTableField,
startRangeTime,
endTimeTableField,
endRangeTime )
sameBucketCondition = "(%s) = (%s)" % ( bucketizedStart, bucketizedEnd )
#Records that fit in a bucket
sqlQuery = "SELECT %s, %s, COUNT(%s) FROM `%s` WHERE %s AND %s GROUP BY %s, %s" % ( timeSelectString,
sumSelectString,
countedField,
rawTableName,
whereString,
sameBucketCondition,
groupingString,
bucketizedStart )
sqlQueries.append( sqlQuery )
#Records that fit in more than one bucket
sqlQuery = "SELECT %s, %s, %s, 1 FROM `%s` WHERE %s AND NOT %s" % ( startTimeTableField,
endTimeTableField,
selectString,
rawTableName,
whereString,
sameBucketCondition
)
sqlQueries.append( sqlQuery )
dateInclusiveConditions.append( "( %s )" % whereString )
#Query for records that are in between two ranges
sqlQuery = "SELECT %s, %s, %s, 1 FROM `%s` WHERE NOT %s" % ( startTimeTableField,
endTimeTableField,
selectString,
rawTableName,
" AND NOT ".join( dateInclusiveConditions ) )
sqlQueries.append( sqlQuery )
self.log.info( "[REBUCKET] Retrieving data for rebuilding buckets for type %s..." % ( typeName ) )
queryNum = 0
for sqlQuery in sqlQueries:
self.log.info( "[REBUCKET] Executing query #%s..." % queryNum )
queryNum += 1
retVal = self._query( sqlQuery )
if not retVal[ 'OK' ]:
self.log.error( "[REBUCKET] Can't retrieve data for rebucketing", retVal[ 'Message' ] )
#self.__rollbackTransaction( connObj )
return retVal
rawData = retVal[ 'Value' ]
self.log.info( "[REBUCKET] Retrieved %s records" % len( rawData ) )
rebucketedRecords = 0
startQuery = time.time()
startBlock = time.time()
numRecords = len( rawData )
for entry in rawData:
startT = entry[0]
endT = entry[1]
values = entry[2:]
retVal = self.__splitInBuckets( typeName, startT, endT, values )
if not retVal[ 'OK' ]:
#self.__rollbackTransaction( connObj )
return retVal
rebucketedRecords += 1
if rebucketedRecords % 1000 == 0:
queryAvg = rebucketedRecords / float( time.time() - startQuery )
blockAvg = 1000 / float( time.time() - startBlock )
startBlock = time.time()
perDone = 100 * rebucketedRecords / float ( numRecords )
expectedEnd = str( datetime.timedelta( seconds = int( ( numRecords - rebucketedRecords ) / blockAvg ) ) )
self.log.info( "[REBUCKET] Rebucketed %.2f%% %s (%.2f r/s block %.2f r/s query | ETA %s )..." % ( perDone, typeName,
blockAvg, queryAvg,
expectedEnd ) )
#return self.__commitTransaction( connObj )
return S_OK()
def __startTransaction( self, connObj ):
return self._query( "START TRANSACTION", conn = connObj )
def __commitTransaction( self, connObj ):
return self._query( "COMMIT", conn = connObj )
def __rollbackTransaction( self, connObj ):
return self._query( "ROLLBACK", conn = connObj )
def _bucketizeDataField( dataField, bucketLength ):
return "%s - ( %s %% %s )" % ( dataField, dataField, bucketLength )
def _getTableName( tableType, typeName, keyName = None ):
"""
Generate table name
"""
if not keyName:
return "ac_%s_%s" % ( tableType, typeName )
elif tableType == "key" :
return "ac_%s_%s_%s" % ( tableType, typeName, keyName )
else:
raise Exception( "Call to _getTableName with tableType as key but with no keyName" )
|
vmendez/DIRAC
|
AccountingSystem/DB/AccountingDB.py
|
Python
|
gpl-3.0
| 70,044
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-29 09:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_pokemon_qr_code'),
]
operations = [
migrations.AddField(
model_name='pokemon',
name='qr_code_image',
field=models.ImageField(blank=True, null=True, upload_to='qr_code_image'),
),
]
|
petersterling1/poke-qr-viewer
|
main/migrations/0003_pokemon_qr_code_image.py
|
Python
|
gpl-3.0
| 487
|
x = 1.56
print( int( x ) )
print( x )
x = 2.1
y = ' 3 '
z = int( x )
print( z )
print( int( y ) )
print( 10 * int( "100000000" ) )
|
paulmcquad/Python
|
4-Simple Functions/int_func.py
|
Python
|
gpl-3.0
| 133
|
from django.contrib import admin
# Register your models here.
from .models import simulation,thing,User
class FilterUserAdmin(admin.ModelAdmin):
#this class is used to filter objects by user(account)
def save_model(self, request, obj, form, change):
obj.account = request.user
obj.save()
def get_queryset(self, request):
# For Django < 1.6, override queryset instead of get_queryset
qs = super(FilterUserAdmin, self).get_queryset(request)
return qs.filter(account=request.user)
def has_change_permission(self, request, obj=None):
if not obj:
# the changelist itself
return True
return obj.account == request.user
class simulationAdmin(FilterUserAdmin):
pass # (replace this with anything else you need)
class sectionAdmin(FilterUserAdmin):
pass # (replace this with anything else you need)
admin.site.register(simulation,simulationAdmin)
admin.site.register(thing)
|
zibawa/zibawa
|
simulator/admin.py
|
Python
|
gpl-3.0
| 1,016
|
# -*- coding: utf-8 -*-
"""Test case for backend."""
from pytest import raises
from ..context import Context, ContextProvider, current_context
def test_empty_context():
"""Check that context have functions and globals."""
context = Context()
assert context.functions == {}
assert context.globals == {}
def test_empty_context_provider():
"""Current context should be None by default."""
with current_context() as context:
assert context is None
def test_simple_context_provider():
"""Check that context provider works with current_context proxy."""
context = Context()
with ContextProvider(context):
with current_context() as inner_context:
assert inner_context is context
def test_exception():
"""Check that context provider throws exceptions."""
context = Context()
with raises(RuntimeError):
with ContextProvider(context):
with current_context():
raise RuntimeError('simple error')
def test_recursive():
"""Check that context provider can be recursive."""
context1 = Context()
context2 = Context()
with ContextProvider(context1):
with current_context() as inner_context1:
assert context1 is inner_context1
with ContextProvider(context2):
with current_context() as inner_context2:
assert context2 is inner_context2
with current_context() as inner_context3:
assert context1 is inner_context3
|
vslutov/LLB3D
|
llb3d/tests/test_context.py
|
Python
|
gpl-3.0
| 1,509
|
# Copyright (C) 2011, 2012 Abhijit Mahabal
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this
# program. If not, see <http://www.gnu.org/licenses/>
import logging
from farg.apps.seqsee.anchored import SAnchored
from farg.apps.seqsee.exceptions import ConflictingGroupException
from farg.apps.seqsee.subspaces.are_we_done import SubspaceAreWeDone
from farg.apps.seqsee.subspaces.deal_with_conflicting_groups import SubspaceDealWithConflictingGroups
from farg.apps.seqsee.subspaces.is_this_interlaced import SubspaceIsThisInterlaced
from farg.core.codelet import CodeletFamily
from farg.core.exceptions import AnswerFoundException
from farg.core.history import History
class CF_FocusOn(CodeletFamily):
"""Causes the required focusable to be added to the stream."""
@classmethod
def Run(cls, controller, focusable, *, me):
controller.stream.FocusOn(focusable, parents=[me])
class CF_GroupFromRelation(CodeletFamily):
"""Causes the required relations' ends to create a group."""
@classmethod
def Run(cls, controller, relation, *, me):
# If there is a group spanning the proposed group, perish the thought.
left, right = relation.first.start_pos, relation.second.end_pos
from farg.apps.seqsee.util import GreaterThanEq, LessThanEq
if tuple(controller.workspace.GetGroupsWithSpan(LessThanEq(left), GreaterThanEq(right))):
History.Note("CF_GroupFromRelation: a spanning group exists")
return
anchored = SAnchored.Create((relation.first, relation.second),
underlying_mapping_set=relation.mapping_set)
try:
controller.workspace.InsertGroup(anchored, parent=[me])
except ConflictingGroupException as e:
SubspaceDealWithConflictingGroups(
controller,
workspace_arguments=dict(new_group=anchored,
incumbents=e.conflicting_groups),
parents=[me, relation],
msg="Conflict while inserting %s" % anchored.BriefLabel()).Run()
class CF_DescribeAs(CodeletFamily):
"""Attempt to describe item as belonging to category."""
@classmethod
def Run(cls, controller, item, category, *, me):
if not item.IsKnownAsInstanceOf(category):
item.DescribeAs(category)
class CF_AreWeDone(CodeletFamily):
"""Check using a subspace if we are done. If yes, quit."""
@classmethod
def Run(cls, controller, *, me):
answer = SubspaceAreWeDone(controller, parents=[me]).Run()
if answer:
controller.ui.DisplayMessage("In its current nascent stage, Seqsee decides that it "
"has found the solution as soon as it has added 10 new "
"terms. This is something that needs fixing. Quitting.")
raise AnswerFoundException("AnswerFound", codelet_count=controller.steps_taken)
class CF_IsThisInterlaced(CodeletFamily):
"""Check using a subspace if we may be looking at an interlaced sequence."""
@classmethod
def Run(cls, controller, distance, *, me):
logging.debug("RUNNING CF_AreweDone with distance=%s", str(distance))
SubspaceIsThisInterlaced(controller,
nsteps=20,
workspace_arguments=dict(distance=distance),
parents=[me]).Run()
class CF_RemoveSpuriousRelations(CodeletFamily):
"""Removes relations between all pairs (A, B) where both belong to supuergroups but their
supergroups don't overlap.
"""
@classmethod
def Run(cls, controller, *, me):
workspace = controller.workspace
supergroups_map = workspace.CalculateSupergroupMap()
History.Note("CF_RemoveSpuriousRelations: called")
for element in workspace.elements:
supergps = supergroups_map[element]
if not supergps:
continue
relations_to_remove = []
for relation in element.relations:
if relation.first == element:
other_end = relation.second
else:
other_end = relation.first
other_supergps = supergroups_map[other_end]
if not other_supergps:
continue
if supergps.intersection(other_supergps):
continue
other_end.relations.discard(relation)
relations_to_remove.append(relation)
History.Note("CF_RemoveSpuriousRelations: removed", times=len(relations_to_remove))
for relation in relations_to_remove:
element.relations.discard(relation)
|
amahabal/PySeqsee
|
farg/apps/seqsee/codelet_families/all.py
|
Python
|
gpl-3.0
| 4,940
|
__problem_title__ = "Hypocycloid and Lattice points"
__problem_url___ = "https://projecteuler.net/problem=450"
__problem_description__ = "A hypocycloid is the curve drawn by a point on a small circle rolling " \
"inside a larger circle. The parametric equations of a hypocycloid " \
"centered at the origin, and starting at the right most point is given " \
"by: $x(t) = (R - r) \cos(t) + r \cos(\frac {R - r} r t)$ $y(t) = (R - " \
"r) \sin(t) - r \sin(\frac {R - r} r t)$ Where is the radius of the " \
"large circle and the radius of the small circle. Let $C(R, r)$ be the " \
"set of distinct points with integer coordinates on the hypocycloid " \
"with radius and and for which there is a corresponding value of such " \
"that $\sin(t)$ and $\cos(t)$ are rational numbers. Let $S(R, r) = " \
"\sum_{(x,y) \in C(R, r)} |x| + |y|$ be the sum of the absolute values " \
"of the and coordinates of the points in $C(R, r)$. Let $T(N) = " \
"\sum_{R = 3}^N \sum_{r=1}^{\lfloor \frac {R - 1} 2 \rfloor} S(R, r)$ " \
"be the sum of $S(R, r)$ for and positive integers, $R\leq N$ and $2r " \
"You are given: (3, 1) = {(3, 0), (-1, 2), (-1,0), (-1,-2)} (2500, " \
"1000) = (3, 1) = (|3| + |0|) + (|-1| + |2|) + (|-1| + |0|) + (|-1| + " \
"|-2|) = 10 (3) = 10; (10) = 524 ; (100) = 580442; (10 ) = 583108600. " \
"Find (10 )."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
jrichte43/ProjectEuler
|
Problem-0450/solutions.py
|
Python
|
gpl-3.0
| 2,107
|
"""autogenerated by genpy from rosserial_arduino/TestRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TestRequest(genpy.Message):
_md5sum = "39e92f1778057359c64c7b8a7d7b19de"
_type = "rosserial_arduino/TestRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string input
"""
__slots__ = ['input']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
input
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TestRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.input is None:
self.input = ''
else:
self.input = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.input
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.input = str[start:end].decode('utf-8')
else:
self.input = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.input
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.input = str[start:end].decode('utf-8')
else:
self.input = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
"""autogenerated by genpy from rosserial_arduino/TestResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TestResponse(genpy.Message):
_md5sum = "0825d95fdfa2c8f4bbb4e9c74bccd3fd"
_type = "rosserial_arduino/TestResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string output
"""
__slots__ = ['output']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
output
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TestResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.output is None:
self.output = ''
else:
self.output = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.output
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.output = str[start:end].decode('utf-8')
else:
self.output = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.output
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.output = str[start:end].decode('utf-8')
else:
self.output = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
class Test(object):
_type = 'rosserial_arduino/Test'
_md5sum = 'c63e85f503b805d84df783e71c6bb0d2'
_request_class = TestRequest
_response_class = TestResponse
|
jgrizou/robot_omniwheel
|
catkin_ws/devel/lib/python2.7/dist-packages/rosserial_arduino/srv/_Test.py
|
Python
|
gpl-3.0
| 7,141
|
"""
WSGI config for twittmap project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "twittmap.settings")
application = get_wsgi_application()
|
anshulsharmanyu/twitter_plot
|
Twitter Map Cloud Assignment/twittmap/wsgi.py
|
Python
|
gpl-3.0
| 394
|
import pycuda.autoinit
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
import warnings
import numpy as np
import os
from dbcv.__handler__ import Handler
class CudaHandler(Handler):
_MIN_N_THREADS = 32
_MAX_N_THREADS = 1024
_N_OUTPUT = 3
def __init__(self, dataset):
super(CudaHandler, self).__init__(dataset)
sep = '\\' if os.name == 'nt' else '/'
cur_path = os.path.relpath(__file__)
split = cur_path.split(sep)[:-1]
kernel = '\n'.join(open(sep.join(split + ['kernel.cu']), 'r').readlines())
mod = SourceModule(kernel)
self._func_coredist = mod.get_function("all_pts_coredist")
self._func_mreach = mod.get_function("global_mreach_matrix")
self._func_dm = mod.get_function("distance_matrix")
self._threads_per_block = ((self._n_objects / CudaHandler._MIN_N_THREADS) + 1) * CudaHandler._MIN_N_THREADS
if self._threads_per_block > CudaHandler._MAX_N_THREADS:
warnings.warn('Warning: using more threads per GPU than allowed! Rolling back to ' + str(self._MAX_N_THREADS) + '.')
self._threads_per_block = CudaHandler._MAX_N_THREADS
n_blocks = (self._n_objects / self._threads_per_block) + 1
self._grid_size = (
int(np.sqrt(n_blocks)),
int(np.sqrt(n_blocks))
)
self._mem_coredist = cuda.mem_alloc(self._data_coredist.nbytes)
self._mem_mreach = cuda.mem_alloc(self._data_mreach.nbytes)
self._mem_dm = cuda.mem_alloc(self._data_dm.nbytes)
self._mem_labels = cuda.mem_alloc(self._n_objects * np.float32(1).itemsize)
self._mem_dataset = cuda.mem_alloc(self._dataset.nbytes)
cuda.memcpy_htod(self._mem_dataset, self._dataset)
self.__get_distance_matrix__(copy=True)
def __get_distance_matrix__(self, copy=False):
"""
Compute the euclidean distance matrix WITHOUT the square root.
:param copy: optional - whether to copy the matrix from GPU to host memory. If True, will also return the matrix.
"""
self._func_dm(
self._mem_dm,
np.int32(self._n_objects),
np.int32(self._n_attributes),
self._mem_dataset,
block=(self._threads_per_block, 1, 1), # block size
grid=self._grid_size
)
if copy:
cuda.memcpy_dtoh(self._data_dm, self._mem_dm) # get info from gpu memory
return self._data_dm
def __get_coredist__(self):
"""
Compute all points coredist.
"""
cuda.memcpy_htod(self._mem_labels, self._data_labels) # send info to gpu memory
self._func_coredist(
self._mem_coredist,
np.int32(self._n_objects),
np.int32(self._n_attributes),
self._mem_dataset,
self._mem_labels,
block=(self._threads_per_block, 1, 1), # block size
grid=self._grid_size
)
def __mrd__(self):
self._func_mreach(
self._mem_dm,
self._mem_mreach,
self._mem_coredist,
np.int32(self._n_objects),
block=(self._threads_per_block, 1, 1), # block size
grid=self._grid_size
)
cuda.memcpy_dtoh(self._data_mreach, self._mem_mreach) # get info from gpu memory
return self._data_mreach
def __copy_labels__(self, labels):
"""
Copy labels to self._data_labels and GPU memory.
:type labels: numpy.ndarray
:param labels: cluster assignment for each object.
"""
super(CudaHandler, self).__copy_labels__(labels)
cuda.memcpy_htod(self._mem_labels, self._data_labels) # send info to gpu memory
|
henryzord/clustering
|
src/measures/dbcv/cuda/__init__.py
|
Python
|
gpl-3.0
| 3,770
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from diraccfg import CFG
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import List
def loadJDLAsCFG(jdl):
"""
Load a JDL as CFG
"""
def cleanValue(value):
value = value.strip()
if value[0] == '"':
entries = []
iPos = 1
current = ""
state = "in"
while iPos < len(value):
if value[iPos] == '"':
if state == "in":
entries.append(current)
current = ""
state = "out"
elif state == "out":
current = current.strip()
if current not in (",", ):
return S_ERROR("value seems a list but is not separated in commas")
current = ""
state = "in"
else:
current += value[iPos]
iPos += 1
if state == "in":
return S_ERROR('value is opened with " but is not closed')
return S_OK(", ".join(entries))
else:
return S_OK(value.replace('"', ''))
def assignValue(key, value, cfg):
key = key.strip()
if len(key) == 0:
return S_ERROR("Invalid key name")
value = value.strip()
if not value:
return S_ERROR("No value for key %s" % key)
if value[0] == "{":
if value[-1] != "}":
return S_ERROR("Value '%s' seems a list but does not end in '}'" % (value))
valList = List.fromChar(value[1:-1])
for i in range(len(valList)):
result = cleanValue(valList[i])
if not result['OK']:
return S_ERROR("Var %s : %s" % (key, result['Message']))
valList[i] = result['Value']
if valList[i] is None:
return S_ERROR("List value '%s' seems invalid for item %s" % (value, i))
value = ", ".join(valList)
else:
result = cleanValue(value)
if not result['OK']:
return S_ERROR("Var %s : %s" % (key, result['Message']))
nV = result['Value']
if nV is None:
return S_ERROR("Value '%s seems invalid" % (value))
value = nV
cfg.setOption(key, value)
return S_OK()
if jdl[0] == "[":
iPos = 1
else:
iPos = 0
key = ""
value = ""
action = "key"
insideLiteral = False
cfg = CFG()
while iPos < len(jdl):
char = jdl[iPos]
if char == ";" and not insideLiteral:
if key.strip():
result = assignValue(key, value, cfg)
if not result['OK']:
return result
key = ""
value = ""
action = "key"
elif char == "[" and not insideLiteral:
key = key.strip()
if not key:
return S_ERROR("Invalid key in JDL")
if value.strip():
return S_ERROR("Key %s seems to have a value and open a sub JDL at the same time" % key)
result = loadJDLAsCFG(jdl[iPos:])
if not result['OK']:
return result
subCfg, subPos = result['Value']
cfg.createNewSection(key, contents=subCfg)
key = ""
value = ""
action = "key"
insideLiteral = False
iPos += subPos
elif char == "=" and not insideLiteral:
if action == "key":
action = "value"
insideLiteral = False
else:
value += char
elif char == "]" and not insideLiteral:
key = key.strip()
if len(key) > 0:
result = assignValue(key, value, cfg)
if not result['OK']:
return result
return S_OK((cfg, iPos))
else:
if action == "key":
key += char
else:
value += char
if char == '"':
insideLiteral = not insideLiteral
iPos += 1
return S_OK((cfg, iPos))
def dumpCFGAsJDL(cfg, level=1, tab=" "):
indent = tab * level
contents = ["%s[" % (tab * (level - 1))]
sections = cfg.listSections()
for key in cfg:
if key in sections:
contents.append("%s%s =" % (indent, key))
contents.append("%s;" % dumpCFGAsJDL(cfg[key], level + 1, tab))
else:
val = List.fromChar(cfg[key])
# Some attributes are never lists
if len(val) < 2 or key in ['Arguments', 'Executable', 'StdOutput', 'StdError']:
value = cfg[key]
try:
try_value = float(value)
contents.append('%s%s = %s;' % (tab * level, key, value))
except Exception:
contents.append('%s%s = "%s";' % (tab * level, key, value))
else:
contents.append("%s%s =" % (indent, key))
contents.append("%s{" % indent)
for iPos in range(len(val)):
try:
value = float(val[iPos])
except Exception:
val[iPos] = '"%s"' % val[iPos]
contents.append(",\n".join(['%s%s' % (tab * (level + 1), value) for value in val]))
contents.append("%s};" % indent)
contents.append("%s]" % (tab * (level - 1)))
return "\n".join(contents)
|
yujikato/DIRAC
|
src/DIRAC/Core/Utilities/JDL.py
|
Python
|
gpl-3.0
| 4,840
|
from .status import startSession
from .general import *
|
ttm/percolation
|
percolation/utils/__init__.py
|
Python
|
gpl-3.0
| 56
|
from corr2 import app
app.run()
|
eepp/corr2
|
bin/corr.py
|
Python
|
gpl-3.0
| 33
|
# Copyright (C) 2011 Mark Burnett
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from actin_dynamics import database
from unit_tests.database.base_test_cases import DBTestCase
class TestParameter(DBTestCase):
def test_inheritance_for_cross_talk(self):
s = database.SessionParameter(name='hi', value=0.3)
s.session_id = 0
self.db_session.add(s)
self.db_session.commit()
self.assertEqual(1, self.db_session.query(database.Parameter
).count())
self.assertEqual(1, self.db_session.query(database.SessionParameter
).count())
self.assertEqual(0, self.db_session.query(database.ExperimentParameter
).count())
self.assertEqual(0, self.db_session.query(database.RunParameter
).count())
self.assertEqual(0, self.db_session.query(database.ObjectiveParameter
).count())
o = database.ObjectiveParameter(name='bye', value=7.6)
o.objective_id = 0
self.db_session.add(o)
self.db_session.commit()
self.assertEqual(2, self.db_session.query(database.Parameter
).count())
self.assertEqual(1, self.db_session.query(database.SessionParameter
).count())
self.assertEqual(0, self.db_session.query(database.ExperimentParameter
).count())
self.assertEqual(0, self.db_session.query(database.RunParameter
).count())
self.assertEqual(1, self.db_session.query(database.ObjectiveParameter
).count())
def test_repeated_name_assignment(self):
sp = database.SessionParameter(name='hi', value=0.3)
sp.session_id = 0
self.db_session.add(sp)
self.db_session.commit()
rp = database.RunParameter(name='hi', value=2.6)
rp.run_id = 0
self.db_session.add(rp)
self.db_session.commit()
if '__main__' == __name__:
unittest.main()
|
mark-burnett/filament-dynamics
|
unit_tests/database/test_parameters.py
|
Python
|
gpl-3.0
| 2,579
|
print((hex(int(input())))[2:])
|
LTKills/languages
|
python/33.py
|
Python
|
gpl-3.0
| 32
|
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from nose.plugins.skip import SkipTest
from rootpy.utils.silence import silence_sout
try:
with silence_sout():
import ROOT
from ROOT import (RooFit, RooRealVar, RooGaussian, RooArgusBG,
RooAddPdf, RooArgList, RooArgSet, RooAbsData)
from rootpy.stats import mute_roostats; mute_roostats()
from rootpy import asrootpy
except ImportError:
raise SkipTest("ROOT is not compiled with RooFit and RooStats enabled")
from rootpy.io import TemporaryFile
from nose.tools import assert_true
def test_plottable():
# construct pdf and toy data following example at
# http://root.cern.ch/drupal/content/roofit
# Observable
mes = RooRealVar("mes", "m_{ES} (GeV)", 5.20, 5.30)
# Parameters
sigmean = RooRealVar("sigmean", "B^{#pm} mass", 5.28, 5.20, 5.30)
sigwidth = RooRealVar("sigwidth", "B^{#pm} width", 0.0027, 0.001, 1.)
# Build Gaussian PDF
signal = RooGaussian("signal", "signal PDF", mes, sigmean, sigwidth)
# Build Argus background PDF
argpar = RooRealVar("argpar", "argus shape parameter", -20.0, -100., -1.)
background = RooArgusBG("background", "Argus PDF",
mes, RooFit.RooConst(5.291), argpar)
# Construct signal+background PDF
nsig = RooRealVar("nsig", "#signal events", 200, 0., 10000)
nbkg = RooRealVar("nbkg", "#background events", 800, 0., 10000)
model = RooAddPdf("model", "g+a",
RooArgList(signal, background),
RooArgList(nsig, nbkg))
# Generate a toyMC sample from composite PDF
data = model.generate(RooArgSet(mes), 2000)
# Perform extended ML fit of composite PDF to toy data
fitresult = model.fitTo(data, RooFit.Save(), RooFit.PrintLevel(-1))
# Plot toy data and composite PDF overlaid
mesframe = asrootpy(mes.frame())
data.plotOn(mesframe)
model.plotOn(mesframe)
for obj in mesframe.objects:
assert_true(obj)
for curve in mesframe.curves:
assert_true(curve)
for hist in mesframe.data_hists:
assert_true(hist)
assert_true(mesframe.plotvar)
with TemporaryFile():
mesframe.Write()
|
henryiii/rootpy
|
rootpy/stats/tests/test_plottable.py
|
Python
|
gpl-3.0
| 2,282
|
RIGHT = 'right'
LEFT = 'left'
WINDOW_WIDTH = 1024
WINDOW_HEIGHT = 768
|
DChaushev/the-last-stand
|
enumerations.py
|
Python
|
gpl-3.0
| 70
|
import pytest
from pysisyphus.helpers import geom_loader
from pysisyphus.io.pdb import geom_to_pdb_str
@pytest.mark.parametrize(
"pdb_fn, fragment_num", [
("lib:pdbs/1gcn.pdb", 29),
("lib:pdbs/1bl8.pdb", 388+4),
]
)
def test_fragment_num(pdb_fn, fragment_num):
geom = geom_loader(pdb_fn)
# geom.jmol()
assert len(geom.fragments) == fragment_num
def test_get_fragments():
full_geom = geom_loader("lib:pdbs/1bl8.pdb")
geom = full_geom.get_fragments("75_THR")
# geom.jmol()
assert len(geom.fragments) == 4
def test_pdb_write(this_dir):
geom = geom_loader("lib:h2o.xyz")
pdb_str = geom_to_pdb_str(geom)
# with open("h2o_ref.pdb", "w") as handle:
# handle.write(pdb_str)
# Reference pdb
with open(this_dir / "h2o_ref.pdb") as handle:
ref = handle.read()
assert pdb_str == ref
def test_geom_to_pdb(this_dir):
geom = geom_loader(this_dir / "five_chloroforms_xtbopt.xyz")
pdb_str = geom_to_pdb_str(geom, detect_fragments=True)
with open("five_chloroforms_ref.pdb", "w") as handle:
handle.write(pdb_str)
# Reference pdb
with open(this_dir / "five_chloroforms_ref.pdb") as handle:
ref = handle.read()
assert pdb_str == ref
|
eljost/pysisyphus
|
tests/test_pdb/test_pdb.py
|
Python
|
gpl-3.0
| 1,262
|
#
# This file is part of Checkbox.
#
# Copyright 2008 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
from checkbox.contrib.persist import Persist
from checkbox.properties import Path
from checkbox.plugin import Plugin
class PersistInfo(Plugin):
# Filename where to persist information
filename = Path(default="%(checkbox_data)s/plugins.bpickle")
def register(self, manager):
super(PersistInfo, self).register(manager)
self.persist = None
for (rt, rh) in [
("begin", self.begin),
("prompt-begin", self.begin),
("prompt-job", self.save)]:
self._manager.reactor.call_on(rt, rh, -100)
# Save persist data last
self._manager.reactor.call_on("stop", self.save, 1000)
def begin(self, interface=None):
if self.persist is None:
self.persist = Persist(self.filename)
self._manager.reactor.fire("begin-persist", self.persist)
def save(self, *args):
# Flush data to disk
if self.persist:
self.persist.save()
factory = PersistInfo
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/checkbox/plugins/persist_info.py
|
Python
|
gpl-3.0
| 1,695
|
# -*- coding: utf-8 -*-
"""
.. module: switching
Aquesta llibreria proveeix de les classes i mètodes necessaris pel switching
"""
import os
__version__ = '2.12.2'
_ROOT = os.path.abspath(os.path.dirname(__file__))
def get_data(path):
filename = isinstance(path, (list, tuple)) and path[0] or path
return os.path.join(_ROOT, 'data', filename)
|
gisce/switching
|
switching/__init__.py
|
Python
|
gpl-3.0
| 357
|
import logging
import os
import select
import sys
from six import PY2
from threading import Thread
if PY2:
from Queue import Queue
else:
from queue import Queue
class IOManager:
thread = None
control_pipe = None
control_queue = None
def __init__(self):
self.control_queue = Queue()
read_fd, write_fd = os.pipe()
self.control_pipe = os.fdopen(write_fd, 'w')
thread_pipe = os.fdopen(read_fd, 'r')
logging.debug("IOManager starting up")
self.thread = Thread(target=self._thread_loop, name="IOManager", args=[thread_pipe])
self.thread.start()
def stop(self):
logging.debug("IOManager shutting down")
def stop_handler(poller, callbacks):
return False
self.control_queue.put(stop_handler)
self._wake_io_thread()
self.control_queue.join()
self.thread.join()
logging.debug("IOManager finished shutting down")
def is_running(self):
return self.thread is not None and self.thread.is_alive()
def add_file(self, file, callback):
logging.debug("IOManager adding file: %s", str(file))
def add_handler(poller, callbacks):
poller.register(
file, select.POLLIN | select.POLLPRI | select.POLLERR | select.POLLHUP | select.POLLNVAL)
callbacks[file.fileno()] = callback
logging.debug("IOManager thread added file successfully")
return True
self.control_queue.put(add_handler)
self._wake_io_thread()
self.control_queue.join()
def remove_file(self, file):
logging.debug("IOManager removing file: %s", str(file))
def remove_handler(poller, callbacks):
poller.unregister(file)
del callbacks[file.fileno()]
logging.debug("IOManager thread removed file successfully")
return True
self.control_queue.put(remove_handler)
self._wake_io_thread()
self.control_queue.join()
def _wake_io_thread(self):
self.control_pipe.write(" ")
self.control_pipe.flush()
def _thread_loop(self, control_pipe):
try:
keep_running = True
callbacks = {}
poller = select.poll()
poller.register(control_pipe)
while keep_running:
active_fds = poller.poll()
for fd, flag in active_fds:
if fd == control_pipe.fileno():
logging.debug("IOManagerThread processing control events")
keep_running = self._handle_events(control_pipe, poller, callbacks)
logging.debug("IOManagerThread finished processing control events")
else:
try:
logging.debug("IOManagerThread calling callback %s with flags %s", str(fd), str(flag))
callbacks[fd](flag)
except:
logging.warn("IOManager caught exception from a callback: %s", str(sys.exc_info()))
except:
logging.error("IOManager terminating due to %s", str(sys.exc_info()))
logging.debug("IOManager terminating")
def _handle_events(self, control_pipe, poller, callbacks):
control_pipe.read(1)
keep_running = True
while not self.control_queue.empty():
control_item = self.control_queue.get_nowait()
keep_running = keep_running and control_item(poller, callbacks)
self.control_queue.task_done()
logging.debug("IOManager finished control item")
return keep_running
|
intelligent-agent/redeem
|
redeem/IOManager.py
|
Python
|
gpl-3.0
| 3,274
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import oioioi.base.fields
import oioioi.participants.fields
class Migration(migrations.Migration):
dependencies = [
('contests', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', oioioi.base.fields.EnumField(default=b'ACTIVE', max_length=64, choices=[(b'ACTIVE', 'Active'), (b'BANNED', 'Banned')])),
('anonymous', models.BooleanField(default=False)),
('contest', models.ForeignKey(to='contests.Contest')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TestRegistration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('participant', oioioi.participants.fields.OneToOneBothHandsCascadingParticipantField(related_name='participants_testregistration', to='participants.Participant')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='participant',
unique_together=set([('contest', 'user')]),
),
]
|
papedaniel/oioioi
|
oioioi/participants/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 1,775
|
##############################################################################
#
# Copyright (C) Zenoss, Inc. 2013, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
'''
Windows Running Processes
Models running processes by querying Win32_Process and
Win32_PerfFormattedData_PerfProc_Process via WMI.
'''
import re
from itertools import ifilter, imap
from Products.ZenModel import OSProcess
from Products.ZenModel.Device import Device
from Products.ZenUtils.Utils import prepId
from ZenPacks.zenoss.Microsoft.Windows.modeler.WinRMPlugin import WinRMPlugin
from ZenPacks.zenoss.Microsoft.Windows.utils import (
get_processNameAndArgs,
get_processText,
save
)
try:
# Introduced in Zenoss 4.2 2013-10-15 RPS.
from Products.ZenModel.OSProcessMatcher import buildObjectMapData
except ImportError:
def buildObjectMapData(processClassMatchData, lines):
raise Exception("buildObjectMapData does not exist on this Zenoss")
return []
if hasattr(Device, 'osProcessClassMatchData'):
# Introduced in Zenoss 4.2 2013-10-15 RPS.
PROXY_MATCH_PROPERTY = 'osProcessClassMatchData'
else:
# Older property.
PROXY_MATCH_PROPERTY = 'getOSProcessMatchers'
class Processes(WinRMPlugin):
compname = 'os'
relname = 'processes'
modname = 'ZenPacks.zenoss.Microsoft.Windows.OSProcess'
deviceProperties = WinRMPlugin.deviceProperties + (
PROXY_MATCH_PROPERTY,
)
queries = {
'Win32_Process': "SELECT Name, ExecutablePath, CommandLine FROM Win32_Process",
'Win32_PerfFormattedData_PerfProc_Process': "SELECT * FROM Win32_PerfFormattedData_PerfProc_Process",
}
@save
def process(self, device, results, log):
log.info(
"Modeler %s processing data for device %s",
self.name(), device.id)
rm = self.relMap()
# Get process ObjectMap instances.
if hasattr(device, 'osProcessClassMatchData'):
oms = self.new_process(device, results, log)
else:
oms = self.old_process(device, results, log)
# Determine if WorkingSetPrivate is supported.
try:
perfproc = results.get('Win32_PerfFormattedData_PerfProc_Process', (None,))[0]
supports_WorkingSetPrivate = hasattr(perfproc, 'WorkingSetPrivate')
except IndexError:
supports_WorkingSetPrivate = False
for om in oms:
om.supports_WorkingSetPrivate = supports_WorkingSetPrivate
rm.append(om)
return rm
def new_process(self, device, results, log):
'''
Model processes according to new style.
Handles style introduced by Zenoss 4.2 2013-10-15 RPS.
'''
processes = ifilter(bool, imap(get_processText, results.values()[0]))
oms = imap(
self.objectMap,
buildObjectMapData(device.osProcessClassMatchData, processes))
for om in oms:
yield om
def old_process(self, device, results, log):
'''
Model processes according to old style.
Handles Zenoss 4.1 and Zenoss 4.2 prior to the 2013-10-15 RPS.
'''
self.compile_regexes(device, log)
seen = set()
for item in results.values()[0]:
procName, parameters = get_processNameAndArgs(item)
processText = get_processText(item)
for matcher in device.getOSProcessMatchers:
if hasattr(OSProcess.OSProcess, 'matchRegex'):
match = OSProcess.OSProcess.matchRegex(
matcher['regex'],
matcher['excludeRegex'],
processText)
else:
match = matcher['regex'].search(processText)
if not match:
continue
if hasattr(OSProcess.OSProcess, 'generateId'):
process_id = OSProcess.OSProcess.generateId(
matcher['regex'],
matcher['getPrimaryUrlPath'],
processText)
else:
process_id = prepId(OSProcess.getProcessIdentifier(
procName,
None if matcher['ignoreParameters'] else parameters))
if process_id in seen:
continue
seen.add(process_id)
data = {
'id': process_id,
'procName': procName,
'parameters': parameters,
'setOSProcessClass': matcher['getPrimaryDmdId'],
}
if hasattr(OSProcess.OSProcess, 'processText'):
data['processText'] = processText
yield self.objectMap(data)
def compile_regexes(self, device, log):
for matcher in device.getOSProcessMatchers:
try:
matcher['regex'] = re.compile(matcher['regex'])
except Exception:
log.warning(
"Invalid process regex '%s' -- ignoring",
matcher['regex'])
if 'excludeRegex' in matcher:
try:
matcher['excludeRegex'] = re.compile(
matcher['excludeRegex'])
except Exception:
log.warning(
"Invalid process exclude regex '%s' -- ignoring",
matcher['excludeRegex'])
|
krull/docker-zenoss4
|
init_fs/usr/local/zenoss/ZenPacks/ZenPacks.zenoss.Microsoft.Windows-2.6.9.egg/ZenPacks/zenoss/Microsoft/Windows/modeler/plugins/zenoss/winrm/Processes.py
|
Python
|
gpl-3.0
| 5,712
|
"""Module that gather several informations about host"""
import logging
import re
from robottelo.cli.base import CLIReturnCodeError
from robottelo.helpers import lru_cache
from robottelo import ssh
LOGGER = logging.getLogger(__name__)
@lru_cache(maxsize=1)
def get_host_os_version():
"""Fetches host's OS version through SSH
:return: str with version
"""
cmd = ssh.command('cat /etc/redhat-release')
if cmd.stdout:
version_description = cmd.stdout[0]
version_re = (
r'Red Hat Enterprise Linux Server release (?P<version>\d(\.\d)*)'
)
result = re.search(version_re, version_description)
if result:
host_os_version = 'RHEL{}'.format(result.group('version'))
LOGGER.debug('Host version: {}'.format(host_os_version))
return host_os_version
LOGGER.warning('Host version not available: {!r}'.format(cmd))
return 'Not Available'
_SAT_6_2_VERSION_COMMAND = u'rpm -q satellite'
_SAT_6_1_VERSION_COMMAND = (
u'grep "VERSION" /usr/share/foreman/lib/satellite/version.rb'
)
@lru_cache(maxsize=1)
def get_host_sat_version():
"""Fetches host's Satellite version through SSH
:return: Satellite version
:rtype: version
"""
commands = (
_extract_sat_version(c) for c in
(_SAT_6_2_VERSION_COMMAND, _SAT_6_1_VERSION_COMMAND)
)
for version, ssh_result in commands:
if version != 'Not Available':
LOGGER.debug('Host Satellite version: {}'.format(version))
return version
LOGGER.warning(
'Host Satellite version not available: {!r}'.format(ssh_result)
)
return version
def _extract_sat_version(ssh_cmd):
"""Extracts Satellite version if possible or 'Not Available' otherwise
:param ssh_cmd: str ssh command
:return: Satellite version
:rtype: str
"""
ssh_result = ssh.command(ssh_cmd)
if ssh_result.stdout:
version_description = ssh_result.stdout[0]
version_re = (
r'[^\d]*(?P<version>\d(\.\d){1})'
)
result = re.search(version_re, version_description)
if result:
host_sat_version = result.group('version')
return host_sat_version, ssh_result
return 'Not Available', ssh_result
def get_repo_files(repo_path, extension='rpm', hostname=None):
"""Returns a list of repo files (for example rpms) in specific repository
directory.
:param str repo_path: unix path to the repo, e.g. '/var/lib/pulp/fooRepo/'
:param str extension: extension of searched files. Defaults to 'rpm'
:param str optional hostname: hostname or IP address of the remote host. If
``None`` the hostname will be get from ``main.server.hostname`` config.
:return: list representing rpm package names
:rtype: list
"""
if not repo_path.endswith('/'):
repo_path += '/'
result = ssh.command(
"find {} -name '*.{}' | awk -F/ '{{print $NF}}'"
.format(repo_path, extension),
hostname=hostname,
)
if result.return_code != 0:
raise CLIReturnCodeError(
result.return_code,
result.stderr,
'No .{} found'.format(extension)
)
# strip empty lines and sort alphabetically (as order may be wrong because
# of different paths)
return sorted([repo_file for repo_file in result.stdout if repo_file])
def get_repomd_revision(repo_path, hostname=None):
"""Fetches a revision of repository.
:param str repo_path: unix path to the repo, e.g. '/var/lib/pulp/fooRepo'
:param str optional hostname: hostname or IP address of the remote host. If
``None`` the hostname will be get from ``main.server.hostname`` config.
:return: string containing repository revision
:rtype: str
"""
repomd_path = 'repodata/repomd.xml'
result = ssh.command(
"grep -oP '(?<=<revision>).*?(?=</revision>)' {}/{}"
.format(repo_path, repomd_path),
hostname=hostname,
)
# strip empty lines
stdout = [line for line in result.stdout if line]
if result.return_code != 0 or len(stdout) != 1:
raise CLIReturnCodeError(
result.return_code,
result.stderr,
'Unable to fetch revision for {}. Please double check your '
'hostname, path and contents of repomd.xml'.format(repo_path)
)
return stdout[0]
class SatVersionDependentValues(object):
"""Class which return values depending on Satellite host version"""
def __init__(self, *dcts, **kwargs):
"""
Hold values for different Satellite versions.
Each value of ``dcts`` must be a dictionary with form {version:dict}
:param dcts: dct
"""
self._common = kwargs.get('common', {})
self._versioned_values = {}
for dct in dcts:
self._versioned_values.update(dct)
def __getitem__(self, item):
"""
Return value dependent on Satellite version
:param item: str
:return: respective Satellite version values
"""
sat_version = get_host_sat_version()
try:
return self._versioned_values[sat_version][item]
except KeyError:
return self._common[item]
|
sghai/robottelo
|
robottelo/host_info.py
|
Python
|
gpl-3.0
| 5,289
|
class Solution(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
length = len(nums)
if length == 0:
return 0
elif length == 1:
return 1
else:
opt_array = [1]
for i in xrange(length):
for j in xrange(i):
if opt_array[i]<opt_array[j]+1 and nums[i]>nums[j]:
opt_array[i] = opt_array[j]+1
mysolution = Solution()
print mysolution.lengthOfLIS([10,9,2,5,3,7,101,18])
|
urashima9616/Leetcode_Python
|
leet300.py
|
Python
|
gpl-3.0
| 566
|
from collections import namedtuple
from lxml import etree
from lxml.builder import E
from eruditarticle.utils import remove_xml_namespaces
Author = namedtuple("Author", "firstname lastname othername")
class BaseDomChanger:
def __init__(self, xml):
self.root = remove_xml_namespaces(etree.fromstring(xml))
def tostring(self):
return etree.tostring(self.root)
class EruditArticleDomChanger(BaseDomChanger):
def set_authors(self, authors):
# authors is a list of element with lastname/firstname/othername attributes.
grauteur = self.root.find(".//grauteur")
assert grauteur is not None
def auteur(author):
nameelems = []
if author.firstname:
nameelems.append(E.prenom(author.firstname))
if author.lastname:
nameelems.append(E.nomfamille(author.lastname))
if author.othername:
nameelems.append(E.autreprenom(author.othername))
return E.auteur(E.nompers(*nameelems))
grauteur[:] = [auteur(a) for a in authors]
def set_author(self, firstname="", lastname="", othername=""):
self.set_authors([Author(firstname, lastname, othername)])
def set_section_titles(self, titles):
grtitre = self.root.find(".//grtitre")
REPLACE_ELEMS = {
"surtitre",
"surtitreparal",
"surtitre2",
"surtitreparal2",
"surtitre3",
"surtitreparal3",
}
for name in REPLACE_ELEMS:
elem = grtitre.find(name)
if elem is not None:
grtitre.remove(elem)
for title in titles:
name = "surtitreparal" if title.paral else "surtitre"
if title.level > 1:
name += str(title.level)
elem = etree.Element(name)
elem.text = title.title
grtitre.append(elem)
def set_notegens(self, notegens):
article = self.root.getroot()
for notegen in notegens:
subelem = E.notegen(
E.alinea(notegen["content"]),
porteenoteg=notegen["scope"],
typenoteg=notegen["type"],
)
article.append(subelem)
def set_title(self, title):
titre = self.root.find(".//grtitre/titre")
titre.text = title
def set_abstracts(self, abstracts):
liminaire = self.root.find("./liminaire")
for abstract in abstracts:
subelem = E.resume(
E.alinea(
E.marquage(
abstract["content"],
typemarq="italique",
),
),
lang=abstract["lang"],
typeresume="resume",
)
liminaire.append(subelem)
def set_type(self, type):
self.root.getroot().attrib["typeart"] = type
def set_roc(self):
elem = self.root.find("//corps/texte")
if elem is not None:
elem.attrib["typetexte"] = "roc"
else:
self.root.find("//corps").insert(0, E.texte(typetexte="roc"))
def add_keywords(self, lang, keywords):
elem = E.grmotcle(*[E.motcle(k) for k in keywords], lang=lang)
liminaire = self.root.find("./liminaire")
liminaire.append(elem)
def tostring(self):
return etree.tostring(self.root)
class EruditPublicationDomChanger(BaseDomChanger):
def add_article(self, article, publication_allowed=True, pdf_url=None, html_url=None):
if self.root.find('article[@idproprio="{}"]'.format(article.localidentifier)) is not None:
return # already there
ordseq = len(self.root.findall("article")) + 1
elem = E.article(idproprio=article.localidentifier, lang="fr", ordseq=str(ordseq))
titre = E.liminaire(E.grtitre(E.titre(str(article.html_title))))
elem.append(titre)
if not publication_allowed:
subelem = E.accessible("non")
elem.append(subelem)
if pdf_url:
subelem = E.urlpdf(pdf_url, taille="0")
elem.append(subelem)
if html_url:
subelem = E.urlhtml(html_url)
elem.append(subelem)
self.root.getroot().append(elem)
class EruditJournalDomChanger(BaseDomChanger):
def add_issue(self, issue):
if not issue.pid:
return
if self.root.find('.//numero[pid="{}"]'.format(issue.pid)) is not None:
return # already there
num_issues = len(self.root.findall(".//numero"))
parentelem = self.root.find(".//decennie")
assert parentelem is not None
elem = E.numero(
pid=issue.pid, annee=str(issue.year), nonumero=str(num_issues + 1), volume="42"
)
# Publications are in reverse order
parentelem.insert(0, elem)
def add_notes(self, notes):
elem = E.notes()
for note in notes:
subelem = E.note(
note["content"],
langue=note["langue"],
pid=note["pid"],
)
elem.append(subelem)
self.root.find(".//revue").append(elem)
|
erudit/eruditorg
|
eruditorg/erudit/test/domchange.py
|
Python
|
gpl-3.0
| 5,219
|
#! /usr/bin/env python
# ==========================================================================
# Display test reports
#
# Copyright (C) 2016 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import glob
import gammalib
# ====================== #
# Report test case error #
# ====================== #
def report_one_error(name, xml):
"""
Report test case error
Parameters
----------
name : str
Test case name
xml : gammalib.GXmlElement
Error XML element
"""
# Extract information
message = xml.attribute('message')
type = xml.attribute('type')
# Print error information
print(' Error in %s:\n "%s" (%s)' % (name, message, type))
# Return
return
# ======================== #
# Report test case failure #
# ======================== #
def report_one_failure(name, xml):
"""
Report test case failure
Parameters
----------
name : str
Test case name
xml : gammalib.GXmlElement
Failure XML element
"""
# Extract information
message = xml.attribute('message')
type = xml.attribute('type')
# Print failure information
print(' Failure in "%s":\n "%s" (%s)' % (name, message, type))
# Return
return
# ======================= #
# Report test case result #
# ======================= #
def report_one_test_case(xml):
"""
Report test case result
Parameters
----------
xml : gammalib.GXmlElement
Test case XML element
"""
# Extract test case information
name = xml.attribute('name')
errors = int(xml.elements('error'))
failures = int(xml.elements('failure'))
# If there are errors then display them
if errors > 0:
num = int(xml.elements('error'))
for i in range(num):
error = xml.element('error[%d]' % i)
report_one_error(name, error)
# If there are failures then display them
if failures > 0:
num = int(xml.elements('failure'))
for i in range(num):
failure = xml.element('failure[%d]' % i)
report_one_failure(name, failure)
# Return
return
# ========================= #
# Report test suite results #
# ========================= #
def report_one_test_suite(xml):
"""
Report test suite result
Parameters
----------
xml : gammalib.GXmlElement
Test suite XML element
"""
# Extract test suite information
name = xml.attribute('name')
errors = int(xml.attribute('errors'))
failures = int(xml.attribute('failures'))
time = float(xml.attribute('time'))
# Optionally show time
#print('%.4f s spent in module "%s"' % (time, name))
# If there are errors or failures then print them
if errors > 0 or failures > 0:
# Print header
if errors == 0:
print('%d failures occured in module "%s":' %
(failures, name))
elif failures == 0:
print('%d errors occured in module "%s":' %
(errors, name))
else:
print('%d errors and %d failures occured in module "%s":' %
(errors, failures, name))
# Determine number of test cases
num = int(xml.elements('testcase'))
# Loop over all test cases
for i in range(num):
# Get test case
case = xml.element('testcase[%d]' % i)
# Report test case result
report_one_test_case(case)
# Return
return
# ====================== #
# Report one test result #
# ====================== #
def report_one_test_result(file):
"""
Report one test result
Parameters
----------
file : str
Test result file
"""
# Open XML file
xml = gammalib.GXml(file)
# Determine number of testsuites
num = int(xml.element('testsuites').elements('testsuite'))
# Loop over all test suites
for i in range(num):
# Get test suite
suite = xml.element('testsuites > testsuite[%d]' % i)
# Report test suite result
report_one_test_suite(suite)
# Return
return
# =================== #
# Report test results #
# =================== #
def report_test_results(path, pattern="*.xml"):
"""
Report test results
Parameters
----------
path : str
Directory containing test results
pattern : str, optional
Test report pattern
"""
# Get list of test reports
reports = glob.glob(path+'/'+pattern)
# Loop over all reports
for report in reports:
report_one_test_result(report)
# Return
return
# ======================== #
# Main routine entry point #
# ======================== #
if __name__ == '__main__':
# Print header
print('')
print('ctools test reports')
print('===================')
# Report test results
report_test_results('test/reports')
|
ctools/ctools
|
dev/testreport.py
|
Python
|
gpl-3.0
| 5,623
|
# Setup
import time
import webbrowser
import md5
import sys
import hashlib
import random
from urllib2 import urlopen
from yahoo_finance import Share
# Makes the webbrowser command
def open_link(link):
webbrowser.open(link)
speak("Opening web browser")
pause(2)
# Makes the pause command
def pause(seconds):
time.sleep(seconds)
# Makes the speak command
def speak(text):
print(text)
# Makes the warning command
def warning(warning_text):
print(warning_text)
# Makes the sha1 encrypter command
def encrypt_sha1(textsha1):
hash_objectsha1 = hashlib.sha1(textsha1)
hex_digsha1 = hash_objectsha1.hexdigest()
speak("Proccesing...")
speak(hex_digsha1)
# Makes the sha224 encrypter command
def encrypt_sha224(textsha224):
hash_objectsha224 = hashlib.sha224(textsha224)
hex_digsha224 = hash_objectsha224.hexdigest()
speak("Proccesing...")
speak(hex_digsha224)
# Makes the sha256 encrypter command
def encrypt_sha256(textsha256):
hash_objectsha256 = hashlib.sha256(textsha256)
hex_digsha256 = hash_objectsha256.hexdigest()
speak("Proccesing...")
speak(hex_digsha256)
# Initializes the time and date
time.ctime()
current_time = time.strftime('%H:%M')
current_date = time.strftime('%d %B %Y')
speak("Hello! i am PAI, your owm personal assistant")
# Main program
var = 1
while var == 1:
input = raw_input(">>>")
if input in {"help", "help me", }:
file = open('bin/commands.txt', 'r')
file_contents = file.read()
print (file_contents)
file.close()
elif input in {"exit", "kill", "escape"}:
speak("Killing PPA...")
sys.exit()
elif input in {"time", "the current time", "current time"}:
speak("The current time is " + current_time)
elif input in {"search", "google"}:
speak("What do you want to Google?")
google = raw_input(">>>")
google.replace(" ", "+")
open_link('https://www.google.nl/#q=' + google.replace(" ", "+"))
elif input in {"hello ppa", "hello", "hello!", "Hello PPA"}:
print("Hello human!")
elif input in {"date", "the current date", "current date"}:
print("The current date is " + current_date)
elif input in {"what is your favorite song?", "favorite song"}:
speak("I really like human music!")
pause(1)
speak("Let me show it to you!")
open_link('https://www.youtube.com/watch?v=q4k1IK_o59M')
elif input in {"im hungry", "i want to eat", "restaurant"}:
speak("What do you want to eat?")
eat = raw_input(">>>")
open_link('https://www.google.nl/maps/search/' + eat.replace(" ", "+"))
elif input in {"what is my ip?", "my ip", "ip", "what's my ip?", "whats my ip?", "whats my ip", "what is my ip"}:
ip = urlopen('http://ip.42.pl/raw').read()
speak(ip)
elif input in {"md5"}:
speak("What do you want to hash to MD5?")
md5_string = raw_input('>>>')
speak("proccesing...")
speak( md5.md5(md5_string).hexdigest() )
elif input in {"sha1"}:
speak("What do you want to encrypt?")
sha1_input = raw_input('>>>')
encrypt_sha1(sha1_input)
elif input in {"sha1"}:
speak("What do you want to encrypt?")
sha1_input = raw_input('>>>')
encrypt_sha1(sha1_input)
elif input in {"sha224"}:
speak("What do you want to encrypt?")
sha224_input = raw_input('>>>')
encrypt_sha224(sha224_input)
elif input in {"sha256"}:
speak("What do you want to encrypt?")
sha256_input = raw_input('>>>')
encrypt_sha256(sha256_input)
elif input in {"what is per?"}:
speak("A huge dumbass...")
pause(2)
speak("Take my word for it")
elif input in {"random", "random number"}:
mini = raw_input
print random.randrange(1,100)
elif input in {"stocks", "stock", "stock prices"}:
else:
warning("That is not a valid command")
|
Redstoners/PPA
|
ppa.py
|
Python
|
gpl-3.0
| 3,986
|
#!/usr/bin/env python
"""Get a random offensive ASCII art"""
from urlparse import urljoin
import re
import random
import urllib
from madcow.util import Module, strip_html
class Main(Module):
pattern = re.compile(r'^\s*artfart(?:\s+(.+?))?\s*$', re.I)
require_addressing = True
help = u'artfart - displays some offensive ascii art'
baseurl = u'http://www.asciiartfarts.com/'
random_url = urljoin(baseurl, u'random.cgi')
artfart = re.compile(r'<h1>#<a href="\S+.html">\d+</a>: (.*?)</h1>.*?(<pre>.*?</pre>)', re.DOTALL)
error = u"I had a problem with that, sorry."
def response(self, nick, args, kwargs):
kwargs['req'].blockquoted = True
query = args[0]
if query is None or query == u'':
url = self.random_url
else:
query = u' '.join(query.split())
query = query.replace(u' ', u'_')
query = urllib.quote(query) + u'.html'
url = urljoin(self.baseurl, query)
doc = self.geturl(url)
results = self.artfart.findall(doc)
result = random.choice(results)
title, art = result
art = strip_html(art)
return u'>>> %s <<<\n%s' % (title, art)
|
cjones/madcow
|
madcow/modules/artfart.py
|
Python
|
gpl-3.0
| 1,205
|
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Recruitment',
'version': '1.0',
'category': 'Human Resources/Recruitment',
'sequence': 90,
'summary': 'Track your recruitment pipeline',
'description': "",
'website': 'https://www.odoo.com/page/recruitment',
'depends': [
'hr',
'calendar',
'fetchmail',
'utm',
'attachment_indexation',
'web_tour',
'digest',
],
'data': [
'security/hr_recruitment_security.xml',
'security/ir.model.access.csv',
'data/hr_recruitment_data.xml',
'data/digest_data.xml',
'views/hr_recruitment_views.xml',
'views/res_config_settings_views.xml',
'views/hr_recruitment_templates.xml',
'views/hr_department_views.xml',
'views/hr_job_views.xml',
'views/mail_activity_views.xml',
'views/digest_views.xml',
],
'demo': [
'data/hr_recruitment_demo.xml',
],
'installable': True,
'auto_install': False,
'application': True,
}
|
t3dev/odoo
|
addons/hr_recruitment/__manifest__.py
|
Python
|
gpl-3.0
| 1,120
|
# -*- coding: utf-8 -*-
import json
import requests
import time
import sys
if len(sys.argv) ==1:
print 'Insert google api Key'
quit()
apiKey=sys.argv[1]
badCriminalData=[]
criminal = json.loads(open('rustCriminal2014-2016-nao-tratado.json').read())
def excludeWrongData(criminal):
noCity = isinstance(criminal['ESTABELECIMENTO'],int)
if noCity is True:
badCriminalData.append(criminal)
return noCity is False
def getExtraData(criminal):
cityName = str(criminal['ESTABELECIMENTO'].encode('utf-8')).split(',').pop().split('/')[0]
criminal['cityName']=cityName.decode('utf-8')
return criminal
def aggregrateGEO(criminal):
r= requests.get('https://maps.googleapis.com/maps/api/geocode/json',params={'address':criminal['cityName'],'key':apiKey,'components':'country:brasil'})
try:
criminal['geometry'] = r.json()['results'][0]['geometry']
except:
badCriminalData.append(criminal)
print criminal['cityName']
return criminal
healedCriminalData=map(getExtraData,filter(excludeWrongData,criminal))
healedCriminalData=map(aggregrateGEO,healedCriminalData)
with open('healedCriminalData.json', 'w') as fp:
json.dump(healedCriminalData, fp)
with open('badCriminalData.json', 'w') as fp:
json.dump(badCriminalData, fp)
|
devmessias/brasillivre
|
data/processoDoSTF(2014-2016)ExclusaoDeNomes/aggregateGEOData.py
|
Python
|
gpl-3.0
| 1,249
|
'''
The main index page
'''
import os
import sys
# For importing other modules!
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(path)
import pieces.page as page
'''
View the main index page
'''
def Render(db):
p = page.Page(db)
# Setup the page
p.title = "PuckLab"
p.content = "hello there"
p.nav.title = "PuckLab"
return p.render()
|
shawger/pucklab
|
webapp/src/views/index.py
|
Python
|
gpl-3.0
| 434
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2015 Karl Palsson <karlp@tweak.net.au>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
regs = {
0: 'Product_ID',
1: 'Revision_ID',
2: 'Motion',
3: 'Delta_X',
4: 'Delta_Y',
5: 'SQUAL',
6: 'Shutter_Upper',
7: 'Shutter_Lower',
8: 'Maximum_Pixel',
9: 'Pixel_Sum',
0xa: 'Minimum_Pixel',
0xb: 'Pixel_Grab',
0xd: 'Mouse_Control',
0x3a: 'Chip_Reset',
0x3f: 'Inv_Rev_ID',
0x63: 'Motion_Burst',
}
class Decoder(srd.Decoder):
api_version = 3
id = 'adns5020'
name = 'ADNS-5020'
longname = 'Avago ADNS-5020 optical mouse sensor'
desc = 'Bidirectional command and data over an SPI-like protocol.'
license = 'gplv2+'
inputs = ['spi']
outputs = ['adns5020']
annotations = (
('read', 'Register read commands'),
('write', 'Register write commands'),
('warning', 'Warnings'),
)
annotation_rows = (
('read', 'Read', (0,)),
('write', 'Write', (1,)),
('warnings', 'Warnings', (2,)),
)
def __init__(self):
self.reset()
def reset(self):
self.ss_cmd, self.es_cmd = 0, 0
self.mosi_bytes = []
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def putx(self, data):
self.put(self.ss_cmd, self.es_cmd, self.out_ann, data)
def put_warn(self, pos, msg):
self.put(pos[0], pos[1], self.out_ann, [2, [msg]])
def decode(self, ss, es, data):
ptype = data[0]
if ptype == 'CS-CHANGE':
# If we transition high mid-stream, toss out our data and restart.
cs_old, cs_new = data[1:]
if cs_old is not None and cs_old == 0 and cs_new == 1:
if len(self.mosi_bytes) not in [0, 2]:
self.put_warn([self.ss_cmd, es], 'Misplaced CS#!')
self.mosi_bytes = []
return
# Don't care about anything else.
if ptype != 'DATA':
return
mosi, miso = data[1:]
self.ss, self.es = ss, es
if len(self.mosi_bytes) == 0:
self.ss_cmd = ss
self.mosi_bytes.append(mosi)
# Writes/reads are mostly two transfers (burst mode is different).
if len(self.mosi_bytes) != 2:
return
self.es_cmd = es
cmd, arg = self.mosi_bytes
write = cmd & 0x80
reg = cmd & 0x7f
reg_desc = regs.get(reg, 'Reserved %#x' % reg)
if reg > 0x63:
reg_desc = 'Unknown'
if write:
self.putx([1, ['%s: %#x' % (reg_desc, arg)]])
else:
self.putx([0, ['%s: %d' % (reg_desc, arg)]])
self.mosi_bytes = []
|
hufsm/tu_gen2_libsigrokdecode
|
decoders/adns5020/pd.py
|
Python
|
gpl-3.0
| 3,386
|
import os
import re
import sys
import yaml
def die(s):
print(s, file=sys.stderr)
sys.exit(1)
def equals(name, s1, s2):
if s1 != s2:
die("in %s expected %r, got %r" % (name, s2, s1))
def matches(name, s, r):
if not re.search(r, s):
die("in %s expected to match %s, got %r" % (name, r, s))
def check(name, d, *a):
ka = set()
for k, op, *args in a:
if op == maybe:
d[k] = d.get(k,"")
if k not in d:
die("in %s expected to have a key %r" % (name, k))
op(name+"."+k, d[k], *args)
ka.add(k)
kd = set(d)
if ka < kd:
die("in %s: extra keys: %r" % (name, kd-ka))
def exists(name, d):
pass
def maybe(name, d):
pass
verNotesRx = re.compile(r"^\w\S*\s+-$")
def verRevNotesRx(s):
return re.compile(r"^\w\S*\s+\(\d+\)\s+[1-9][0-9]*\w+\s+" + s + "$")
if os.environ['SNAPPY_USE_STAGING_STORE'] == '1':
snap_ids={
"test-snapd-tools": "02AHdOomTzby7gTaiLX3M3SGMmXDfLJp",
"test-snapd-devmode": "FcHyKyMiQh71liP8P82SsyMXtZI5mvVj",
"test-snapd-python-webserver": "uHjTANBWSXSiYzNOUXZNDnOSH3POSqWS",
}
else:
snap_ids={
"test-snapd-tools": "eFe8BTR5L5V9F7yHeMAPxkEr2NdUXMtw",
"test-snapd-devmode": "821MII7GAzoRnPvTEb8R51Z1s9e0XmK5",
"test-snapd-python-webserver": "Wcs8QL2iRQMjsPYQ4qz4V1uOlElZ1ZOb",
}
res = list(yaml.load_all(sys.stdin))
equals("number of entries", len(res), 7)
check("basic", res[0],
("name", equals, "basic"),
("summary", equals, "Basic snap"),
("path", matches, r"^basic_[0-9.]+_all\.snap$"),
("version", matches, verNotesRx),
)
check("basic-desktop", res[1],
("name", equals, "basic-desktop"),
("path", matches, "snaps/basic-desktop/$"), # note the trailing slash
("summary", equals, ""),
("version", matches, verNotesRx),
)
check("test-snapd-tools", res[2],
("name", equals, "test-snapd-tools"),
("publisher", matches, r"(Canonical✓|canonical)"),
("contact", equals, "snaps@canonical.com"),
("summary", equals, "Tools for testing the snapd application"),
("description", equals, "A tool to test snapd\n"),
("commands", exists),
("tracking", equals, "stable"),
("installed", matches, verRevNotesRx("-")),
("refresh-date", exists),
("channels", check,
("stable", matches, verRevNotesRx("-")),
("candidate", equals, "↑"),
("beta", equals, "↑"),
("edge", matches, verRevNotesRx("-")),
),
("snap-id", equals, snap_ids["test-snapd-tools"]),
("license", matches, r"(unknown|unset)"), # TODO: update once snap.yaml contains the right license
)
check("test-snapd-devmode", res[3],
("name", equals, "test-snapd-devmode"),
("publisher", matches, r"(Canonical✓|canonical)"),
("contact", equals, "snaps@canonical.com"),
("summary", equals, "Basic snap with devmode confinement"),
("description", equals, "A basic buildable snap that asks for devmode confinement\n"),
("tracking", equals, "beta"),
("installed", matches, verRevNotesRx("devmode")),
("refresh-date", exists),
("channels", check,
("stable", equals, "–"),
("candidate", equals, "–"),
("beta", matches, verRevNotesRx("devmode")),
("edge", matches, verRevNotesRx("devmode")),
),
("snap-id", equals, snap_ids["test-snapd-devmode"]),
("license", matches, r"(unknown|unset)"), # TODO: update once snap.yaml contains the right license
)
check("core", res[4],
("name", equals, "core"),
("type", equals, "core"), # attenti al cane
("publisher", exists),
("summary", exists),
("description", exists),
# tracking not there for local snaps
("tracking", maybe),
("installed", exists),
("refresh-date", exists),
("channels", exists),
# contacts is set on classic but not on Ubuntu Core where we
# sideload "core"
("contact", maybe),
("snap-id", maybe),
("license", matches, r"(unknown|unset)"), # TODO: update once snap.yaml contains the right license
)
check("error", res[5],
("warning", equals, 'no snap found for "/etc/passwd"'),
)
# not installed snaps have "contact" information
check("test-snapd-python-webserver", res[6],
("name", equals, "test-snapd-python-webserver"),
("publisher", matches, r"(Canonical✓|canonical)"),
("contact", equals, "snaps@canonical.com"),
("summary", exists),
("description", exists),
("channels", exists),
("snap-id", equals, snap_ids["test-snapd-python-webserver"]),
("license", equals, "Other Open Source"),
)
|
kenvandine/snapd
|
tests/main/snap-info/check.py
|
Python
|
gpl-3.0
| 4,552
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
self.
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
|
TitoAlehandro/calc
|
book/tests.py
|
Python
|
gpl-3.0
| 397
|
from __future__ import (absolute_import, division, print_function)
import numpy as np
def PROJECT(k, n, xi): # xi modified in place
a = np.sum(xi[:n, k])
a = a / n
xi[:n, k] -= a
|
ScreamingUdder/mantid
|
scripts/Muon/MaxentTools/project.py
|
Python
|
gpl-3.0
| 195
|
import pymongo as Mongo
DB_NAME = 'localhost'
DB_PORT = 27017
TEST_JSON = {'url':'http://hello.com','content':'Lemon Tree'}
class DB():
def __init__(self,db,port):
self.client = Mongo.MongoClient(db,port)
self.db = self.client.test
self.collect = self.db.test_collect
def insert(self,c):
self.collect.insert_one(c)
def find(self,k):
return self.collect.find(k)
def delete(self,k):
return self.collect.delete_many(k)
def close(self):
self.client.close()
if __name__ == '__main__':
# Client = Mongo.MongoClient(DB,PORT)
# db = Client.test
# collect = db.test_collect
# collect.insert(TEST_JSON)
# for x in collect.find({'content':'Lemon Tree'}):
# print x
# Client.close()
print 'mongodb test start:'
db = DB(DB_NAME,DB_PORT)
db.insert(TEST_JSON)
result = db.find({'content':'Lemon Tree'})
for x in result:
print x
db.delete({'content':'Lemon Tree'})
db.close()
print 'mongodb test complete!'
|
CuteLemon/Learn
|
NewsAPI_Scraper/db_operation.py
|
Python
|
gpl-3.0
| 1,050
|
# coding=utf-8
# Author: adaur <adaur.underground@gmail.com>
# Contributor: PHD <phd59fr@gmail.com>, pluzun <pluzun59@gmail.com>
#
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import validators
from requests.compat import urljoin
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickchill.helper.common import convert_size, try_int
from sickchill.providers.torrent.TorrentProvider import TorrentProvider
class YggTorrentProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, 'YggTorrent')
# Credentials
self.username = None
self.password = None
# Torrent Stats
self.minseed = None
self.minleech = None
# URLs
self.custom_url = None
self.url = 'https://www6.yggtorrent.gg/'
self.urls = {
'login': urljoin(self.url, 'user/login'),
'search': urljoin(self.url, 'engine/search')
}
# Proper Strings
self.proper_strings = ['PROPER']
# Cache
self.cache = tvcache.TVCache(self, min_time=30)
def update_urls(self, new_url, custom=False):
if custom and not new_url:
return True
if not validators.url(new_url):
if custom:
logger.log("Invalid custom url: {0}".format(self.custom_url), logger.WARNING)
else:
logger.log('Url changing has failed!', logger.DEBUG)
return False
self.url = new_url
self.urls = {
'login': urljoin(self.url, 'user/login'),
'search': urljoin(self.url, 'engine/search')
}
return True
def login(self):
login_params = {
'id': self.username,
'pass': self.password,
}
self.update_urls(self.custom_url, True)
response = self.get_url(self.urls['login'], post_data=login_params, returns='response')
if self.url not in response.url:
new_url = response.url.split('user/login')[0]
logger.log('Changing base url from {} to {}'.format(self.url, new_url), logger.DEBUG)
if not self.update_urls(new_url):
return False
response = self.get_url(self.urls['login'], post_data=login_params, returns='response')
# The login is now an AJAX call (401 : Bad credentials, 200 : Logged in, other : server failure)
if not response or response.status_code != 200:
logger.log('Unable to connect to provider', logger.WARNING)
return False
else:
# It seems we are logged, let's verify that !
response = self.get_url(self.url, returns='response')
if response.status_code != 200:
logger.log('Unable to connect to provider', logger.WARNING)
return False
if 'logout' not in response.text:
logger.log('Invalid username or password. Check your settings', logger.WARNING)
return False
return True
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
for mode in search_strings:
items = []
logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log('Search string: {0}'.format
(search_string.decode('utf-8')), logger.DEBUG)
# search string needs to be normalized, single quotes are apparently not allowed on the site
# ç should also be replaced, people tend to use c instead
replace_chars = {
"'": '',
"ç": 'c'
}
for k, v in replace_chars.iteritems():
search_string = search_string.replace(k, v)
logger.log('Sanitized string: {0}'.format
(search_string.decode('utf-8')), logger.DEBUG)
try:
search_params = {
'category': '2145',
'sub_category' : 'all',
'name': re.sub(r'[()]', '', search_string),
'do': 'search'
}
data = self.get_url(self.urls['search'], params=search_params, returns='text')
if not data:
continue
if 'logout' not in data:
logger.log('Refreshing cookies', logger.DEBUG)
self.login()
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find(class_='table')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
continue
# Skip column headers
for result in torrent_rows[1:]:
cells = result('td')
if len(cells) < 9:
continue
title = cells[1].find('a').get_text(strip=True)
id = cells[2].find('a')['target']
download_url = urljoin(self.url, 'engine/download_torrent?id=' + id)
if not (title and download_url):
continue
seeders = try_int(cells[7].get_text(strip=True))
leechers = try_int(cells[8].get_text(strip=True))
torrent_size = cells[5].get_text()
size = convert_size(torrent_size) or -1
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log('Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'.format
(title, seeders, leechers), logger.DEBUG)
continue
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError):
logger.log('Failed parsing provider {}.'.format(self.name), logger.ERROR)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
provider = YggTorrentProvider()
|
dfalt974/SickRage
|
sickbeard/providers/yggtorrent.py
|
Python
|
gpl-3.0
| 8,174
|
#!/usr/bin/env python
#This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
from setuptools import setup, find_packages
import re
info = eval(file('__tryton__.py').read())
requires = ['BeautifulSoup < 3.1']
for dep in info.get('depends', []):
if not re.match(r'(ir|res|workflow|webdav)(\W|$)', dep):
requires.append('trytond_' + dep)
major_version, minor_version, _ = info.get('version', '0.0.1').split('.', 2)
requires.append('trytond >= %s.%s' % (major_version, minor_version))
requires.append('trytond < %s.%s' % (major_version, int(minor_version) + 1))
setup(name='trytond_google_translate',
version=info.get('version', '0.0.1'),
description=info.get('description', ''),
author=info.get('author', ''),
author_email=info.get('email', ''),
url=info.get('website', ''),
download_url="http://downloads.tryton.org/" + \
info.get('version', '0.0.1').rsplit('.', 1)[0] + '/',
package_dir={'trytond.modules.google_translate': '.'},
packages=[
'trytond.modules.google_translate',
],
package_data={
'trytond.modules.google_translate': info.get('xml', []) \
+ info.get('translation', []),
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Legal Industry',
'Intended Audience :: Manufacturing',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Natural Language :: French',
'Natural Language :: German',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Office/Business',
],
license='GPL-3',
install_requires=requires,
zip_safe=False,
entry_points="""
[trytond.modules]
google_translate = trytond.modules.google_translate
""",
)
|
tryton/google_translate
|
setup.py
|
Python
|
gpl-3.0
| 2,085
|
class Hand:
def __init__(self):
self.the_hand = []
def display_hand(self):
for c in range(0, len(self.the_hand)):
print(self.the_hand[c])
|
DalikarFT/CFVOP
|
Hand.py
|
Python
|
gpl-3.0
| 176
|
"""simple script that santizes the `output` directory.
works even if no santitation required.
'sanitation' is simply removing potentially sensitive fields from the results."""
import sys, os, json
from . import core
from os.path import join
def do():
output_dir = core.output_dir()
for dirname in os.listdir(output_dir):
for filename in os.listdir(join(output_dir, dirname)):
path = join(output_dir, dirname, filename)
if path.endswith('.json'):
sys.stdout.write('santizing %s' % path)
core.write_results(
core.sanitize_ga_response(json.load(open(path, 'r'))),
path)
sys.stdout.write(" ...done\n")
sys.stdout.flush()
if __name__ == '__main__':
do()
|
elifesciences/elife-metrics
|
src/article_metrics/ga_metrics/sanitize.py
|
Python
|
gpl-3.0
| 801
|