repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
AlmostBetterNetwork/podmaster-host
|
refs/heads/master
|
podcasts/migrations/0048_auto_20180501_0154.py
|
3
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-01 01:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('podcasts', '0047_auto_20180417_0301'),
]
operations = [
migrations.AlterField(
model_name='podcast',
name='homepage',
field=models.CharField(blank=True, max_length=500),
),
]
|
retomerz/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractsuperclass/moveExtendsCheckReference/source_module.py
|
80
|
from shared_module import TheParentOfItAll
class MyClass(TheParentOfItAll):
pass
|
Cynary/soar
|
refs/heads/master
|
test.py
|
1
|
import soar.brain.brain as brain
import soar.gui.robot_model as model
import soar.pioneer.geometry as geom
from soar.gui.robot import transform
import soar.gui.robot_model as model
from math import *
import time
def get_dright_angle(r):
s5,s6,s7 = r.getSonars()[5:8]
i = 6
if s5 is not None:
s6 = s5
i = 5
if s6 is None:
return s7,None
if s7 is None:
return s6,None
orig = geom.Point(0,0)
p1 = geom.Point(*transform(model.sonar_poses[i],(s6,0.)))
p2 = geom.Point(*transform(model.sonar_poses[7],(s7,0.)))
w = geom.Segment(p1,p2)
dist = w.distance(orig,segment=False)
w_vec = p1-p2
angle = -atan2(w_vec.y,w_vec.x)
return dist,angle
def step(r):
k_d = 7.5
k_theta = 1.5
k_v = 0.5
dist,angle = get_dright_angle(r)
ranges = [i for i in r.getSonars() if i is not None]
if len(ranges) == 0:
danger = 1.
else:
danger = min(ranges)
if angle is None:
r.setRotational(1.0)
r.setForward(danger*k_v)
else:
desired_dist = 0.4
desired_theta = k_d*(desired_dist-dist)
omega = k_theta*(desired_theta-angle)
r.setRotational(omega)
r.setForward(danger*k_v)
# print("HERE")
# assert False
brain.main(step,period=0.1)
|
swdream/neutron
|
refs/heads/master
|
neutron/cmd/eventlet/agents/metadata.py
|
61
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent import metadata_agent
def main():
metadata_agent.main()
|
hdinsight/hue
|
refs/heads/master
|
desktop/core/ext-py/python-ldap-2.3.13/Tests/Lib/ldap/test_modlist.py
|
40
|
"""
Tests for module ldap.modlist
"""
import ldap
from ldap.modlist import addModlist,modifyModlist
print '\nTesting function addModlist():'
addModlist_tests = [
(
{
'objectClass':['person','pilotPerson'],
'cn':['Michael Str\303\266der','Michael Stroeder'],
'sn':['Str\303\266der'],
'dummy1':[],
'dummy2':['2'],
'dummy3':[''],
},
[
('objectClass',['person','pilotPerson']),
('cn',['Michael Str\303\266der','Michael Stroeder']),
('sn',['Str\303\266der']),
('dummy2',['2']),
('dummy3',['']),
]
),
]
for entry,test_modlist in addModlist_tests:
test_modlist.sort()
result_modlist = addModlist(entry)
result_modlist.sort()
if test_modlist!=result_modlist:
print 'addModlist(%s) returns\n%s\ninstead of\n%s.' % (
repr(entry),repr(result_modlist),repr(test_modlist)
)
print '\nTesting function modifyModlist():'
modifyModlist_tests = [
(
{
'objectClass':['person','pilotPerson'],
'cn':['Michael Str\303\266der','Michael Stroeder'],
'sn':['Str\303\266der'],
'enum':['a','b','c'],
'c':['DE'],
},
{
'objectClass':['person','inetOrgPerson'],
'cn':['Michael Str\303\266der','Michael Stroeder'],
'sn':[],
'enum':['a','b','d'],
'mail':['michael@stroeder.com'],
},
[
(ldap.MOD_DELETE,'objectClass',None),
(ldap.MOD_ADD,'objectClass',['person','inetOrgPerson']),
(ldap.MOD_DELETE,'c',None),
(ldap.MOD_DELETE,'sn',None),
(ldap.MOD_ADD,'mail',['michael@stroeder.com']),
(ldap.MOD_DELETE,'enum',None),
(ldap.MOD_ADD,'enum',['a','b','d']),
]
),
(
{
'c':['DE'],
},
{
'c':['FR'],
},
[
(ldap.MOD_DELETE,'c',None),
(ldap.MOD_ADD,'c',['FR']),
]
),
# Now a weird test-case for catching all possibilities
# of removing an attribute with MOD_DELETE,attr_type,None
(
{
'objectClass':['person'],
'cn':[None],
'sn':[''],
'c':['DE'],
},
{
'objectClass':[],
'cn':[],
'sn':[None],
},
[
(ldap.MOD_DELETE,'c',None),
(ldap.MOD_DELETE,'objectClass',None),
(ldap.MOD_DELETE,'sn',None),
]
),
]
for old_entry,new_entry,test_modlist in modifyModlist_tests:
test_modlist.sort()
result_modlist = modifyModlist(old_entry,new_entry)
result_modlist.sort()
if test_modlist!=result_modlist:
print 'modifyModlist(%s,%s) returns\n%s\ninstead of\n%s.' % (
repr(old_entry),
repr(new_entry),
repr(result_modlist),
repr(test_modlist)
)
|
susansalkeld/discsongs
|
refs/heads/master
|
discsongs/lib/python2.7/site-packages/flask/ext/__init__.py
|
853
|
# -*- coding: utf-8 -*-
"""
flask.ext
~~~~~~~~~
Redirect imports for extensions. This module basically makes it possible
for us to transition from flaskext.foo to flask_foo without having to
force all extensions to upgrade at the same time.
When a user does ``from flask.ext.foo import bar`` it will attempt to
import ``from flask_foo import bar`` first and when that fails it will
try to import ``from flaskext.foo import bar``.
We're switching from namespace packages because it was just too painful for
everybody involved.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
def setup():
from ..exthook import ExtensionImporter
importer = ExtensionImporter(['flask_%s', 'flaskext.%s'], __name__)
importer.install()
setup()
del setup
|
dricciardelli/vae2vec
|
refs/heads/master
|
capt_gen_e2e_cs.py
|
1
|
# -*- coding: utf-8 -*-
import math
import os
import tensorflow as tf
import numpy as np
import pandas as pd
import pickle
import pickle as pkl
import cv2
import skimage
import tensorflow.python.platform
from tensorflow.python.ops import rnn
from keras.preprocessing import sequence
from collections import Counter
from collections import defaultdict
import itertools
test_image_path='./data/acoustic-guitar-player.jpg'
vgg_path='./data/vgg16-20160129.tfmodel'
n=50000-2
def map_lambda():
return n+1
def rev_map_lambda():
return "<UNK>"
def load_text(n,capts,num_samples=None):
# fname = 'Oxford_English_Dictionary.txt'
# txt = []
# with open(fname,'rb') as f:
# txt = f.readlines()
# txt = [x.decode('utf-8').strip() for x in txt]
# txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# List of words
# word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # List of definitions
# def_list = [x.split(' ', 1)[1].strip()for x in txt]
with open('./training_data/training_data.pkl','rb') as raw:
word_list,dl=pkl.load(raw)
def_list=[]
# def_list=[' '.join(defi) for defi in def_list]
i=0
while i<len( dl):
defi=dl[i]
if len(defi)>0:
def_list+=[' '.join(defi)]
i+=1
else:
dl.pop(i)
word_list.pop(i)
maxlen=0
minlen=100
for defi in def_list:
minlen=min(minlen,len(defi.split()))
maxlen=max(maxlen,len(defi.split()))
print(minlen)
print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
# _map,rev_map=get_one_hot_map(word_list,def_list,n)
_map=pkl.load(open('mapaoh.pkl','rb'))
rev_map=pkl.load(open('rev_mapaoh.pkl','rb'))
if num_samples is not None:
num_samples=len(capts)
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# y = (36665, 56210)
# print _map
if capts is not None:
# y,mask = map_one_hot(capts[:num_samples],_map,maxlen,n)
# np.save('ycoh')
y=np.load('ycoh.npy','r')
else:
# np.save('X',X)
# np.save('yc',y)
# np.save('maskc',mask)
y=np.load('yaoh.npy','r')
X=np.load('Xaoh.npy','r')
mask=np.load('maskaoh.npy','r')
print (np.max(y))
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(lambda :n+1)
rev_map=defaultdict(lambda:"<UNK>")
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
print (len(words))
# random.shuffle(words)
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
for num_bits in range(binary_dim):
for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1):
bitmap=np.zeros(binary_dim)
bitmap[np.array(bit_config)]=1
num=bitmap*(2** np.arange(binary_dim ))
num=np.sum(num).astype(np.uint32)
word=words[i]
_map[word]=num
rev_map[num]=word
i+=1
if i>=len(words):
break
if i>=len(words):
break
# for word in words:
# i+=1
# _map[word]=i
# rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[n+2]='End'
print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
# print rev_map
return _map,rev_map
def map_word_emb(corpus,_map):
### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY)
rtn=[]
rtn2=[]
for word in corpus:
mapped=_map[word]
rtn.append(mapped)
if get_rand_vec:
mapped_rand=random.choice(list(_map.keys()))
while mapped_rand==word:
mapped_rand=random.choice(list(_map.keys()))
mapped_rand=_map[mapped_rand]
rtn2.append(mapped_rand)
if get_rand_vec:
return np.array(rtn),np.array(rtn2)
return np.array(rtn)
def map_one_hot(corpus,_map,maxlen,n):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print (total_not,len(corpus))
return rtn
else:
total_not=0
rtn=np.zeros([len(corpus)],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l]=mapped
print (total_not,len(corpus))
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
for l,_line in enumerate(corpus):
x=0
line=_line.split()
for i in range(min(len(line),maxlen-1)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
rtn[l,i+1]=mapped
if mapped==n+1:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
print (nopes,totes,wtf)
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class Caption_Generator():
def __init__(self, dim_in, dim_embed, dim_hidden, batch_size, n_lstm_steps, n_words, init_b=None,from_image=False,n_input=None,n_lstm_input=None,n_z=None):
self.dim_in = dim_in
self.dim_embed = dim_embed
self.dim_hidden = dim_hidden
self.batch_size = batch_size
self.n_lstm_steps = n_lstm_steps
self.n_words = n_words
self.n_input = n_input
self.n_lstm_input=n_lstm_input
self.n_z=n_z
if from_image:
with open(vgg_path,'rb') as f:
fileContent = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)
self.images = tf.placeholder("float32", [1, 224, 224, 3])
tf.import_graph_def(graph_def, input_map={"images":self.images})
graph = tf.get_default_graph()
self.sess = tf.InteractiveSession(graph=graph)
self.from_image=from_image
# declare the variables to be used for our word embeddings
self.word_embedding = tf.Variable(tf.random_uniform([self.n_z, self.dim_embed], -0.1, 0.1), name='word_embedding')
self.embedding_bias = tf.Variable(tf.zeros([dim_embed]), name='embedding_bias')
# declare the LSTM itself
self.lstm = tf.contrib.rnn.BasicLSTMCell(dim_hidden)
# declare the variables to be used to embed the image feature embedding to the word embedding space
self.img_embedding = tf.Variable(tf.random_uniform([dim_in, dim_hidden], -0.1, 0.1), name='img_embedding')
self.img_embedding_bias = tf.Variable(tf.zeros([dim_hidden]), name='img_embedding_bias')
# declare the variables to go from an LSTM output to a word encoding output
self.word_encoding = tf.Variable(tf.random_uniform([dim_hidden, self.n_z], -0.1, 0.1), name='word_encoding')
# initialize this bias variable from the preProBuildWordVocab output
# optional initialization setter for encoding bias variable
if init_b is not None:
self.word_encoding_bias = tf.Variable(init_b, name='word_encoding_bias')
else:
self.word_encoding_bias = tf.Variable(tf.zeros([self.n_z]), name='word_encoding_bias')
self.embw=tf.Variable(xavier_init(self.n_input,self.n_z),name='embw')
self.embb=tf.Variable(tf.zeros([self.n_z]),name='embb')
self.all_encoding_weights=[self.embw,self.embb]
def build_model(self):
# declaring the placeholders for our extracted image feature vectors, our caption, and our mask
# (describes how long our caption is with an array of 0/1 values of length `maxlen`
img = tf.placeholder(tf.float32, [self.batch_size, self.dim_in])
caption_placeholder = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])
self.output_placeholder = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
network_weights = self._initialize_weights()
# getting an initial LSTM embedding from our image_imbedding
image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
flat_caption_placeholder=tf.reshape(caption_placeholder,[-1])
#leverage one-hot sparsity to lookup embeddings fast
embedded_input,KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],flat_caption_placeholder,logit=True)
KLD_loss=tf.multiply(KLD_loss,tf.reshape(mask,[-1,1]))
KLD_loss=tf.reduce_sum(KLD_loss)
word_embeddings=tf.matmul(embedded_input,self.word_embedding)+self.embedding_bias
word_embeddings=tf.reshape(word_embeddings,[self.batch_size,self.n_lstm_steps,-1])
embedded_input=tf.nn.l2_normalize(embedded_input,dim=-1)
embedded_input=tf.reshape(embedded_input,[self.batch_size,self.n_lstm_steps,-1])
#initialize lstm state
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
rnn_output=[]
total_loss=0
with tf.variable_scope("RNN"):
# unroll lstm
for i in range(self.n_lstm_steps):
if i > 0:
# if this isn’t the first iteration of our LSTM we need to get the word_embedding corresponding
# to the (i-1)th word in our caption
current_embedding = word_embeddings[:,i-1,:]
else:
#if this is the first iteration of our LSTM we utilize the embedded image as our input
current_embedding = image_embedding
if i > 0:
# allows us to reuse the LSTM tensor variable on each iteration
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
if i>0:
out=tf.nn.l2_normalize(tf.matmul(out,self.word_encoding)+self.word_encoding_bias,dim=-1)
total_loss+=tf.reduce_sum(tf.reduce_sum(tf.multiply(embedded_input[:,i,:],out),axis=-1)*mask[:,i])
# #perform classification of output
# rnn_output=tf.concat(rnn_output,axis=1)
# rnn_output=tf.reshape(rnn_output,[self.batch_size*(self.n_lstm_steps),-1])
# encoded_output=tf.matmul(rnn_output,self.word_encoding)+self.word_encoding_bias
# #get loss
# normed_embedding= tf.nn.l2_normalize(encoded_output, dim=-1)
# normed_target=tf.nn.l2_normalize(embedded_input,dim=-1)
# cos_sim=tf.multiply(normed_embedding,normed_target)[:,1:]
# cos_sim=(tf.reduce_sum(cos_sim,axis=-1))
# cos_sim=tf.reshape(cos_sim,[self.batch_size,-1])
# cos_sim=tf.reduce_sum(cos_sim[:,1:]*mask[:,1:])
cos_sim=total_loss/tf.reduce_sum(mask[:,1:])
# self.exp_loss=tf.reduce_sum((-cos_sim))
# self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size)
total_loss = tf.reduce_sum(-(cos_sim))
# mse=tf.reduce_sum(tf.reshape(tf.square(encoded_output-embedded_input),[self.batch_size,self.n_lstm_steps,-1]),axis=-1)[:,1:]*(mask[:,1:])
# mse=tf.reduce_sum(mse)/tf.reduce_sum(mask[:,1:])
#average over timeseries length
# total_loss=tf.reduce_sum(masked_xentropy)/tf.reduce_sum(mask[:,1:])
# total_loss=mse
self.print_loss=total_loss
total_loss+=KLD_loss/tf.reduce_sum(mask)
return total_loss, img, caption_placeholder, mask
def build_generator(self, maxlen, batchsize=1,from_image=False):
#same setup as `build_model` function
img = tf.placeholder(tf.float32, [self.batch_size, self.dim_in])
image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(batchsize,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(image_embedding, state)
previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(maxlen):
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(previous_word, state)
# get a get maximum probability word and it's encoding from the output of the LSTM
logit = tf.matmul(out, self.word_encoding) + self.word_encoding_bias
best_word = tf.argmax(logit, 1)
with tf.device("/cpu:0"):
# get the embedding of the best_word to use as input to the next iteration of our LSTM
previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
previous_word += self.embedding_bias
all_words.append(best_word)
self.img=img
self.all_words=all_words
return img, all_words
def _initialize_weights(self):
all_weights = dict()
trainability=False
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(self.n_z, self.n_lstm_input),name='affine_weight',trainable=trainability),
'affine_bias': tf.Variable(tf.zeros(self.n_lstm_input),name='affine_bias',trainable=trainability)}
with tf.device('/cpu:0'):
om=tf.Variable(xavier_init(self.n_input, self.n_z),name='out_mean',trainable=trainability)
if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_meanb',trainable=trainability),
'out_log_sigma': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_log_sigmab',trainable=trainability)}
all_weights['variational_encoding'] = {
'out_mean': om,
'out_log_sigma': tf.Variable(xavier_init(self.n_input, self.n_z),name='out_log_sigma',trainable=trainability)}
else:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_meanb',trainable=trainability)}
all_weights['variational_encoding'] = {
'out_mean': om}
# self.no_reload+=all_weights['input_meaning'].values()
# self.var_embs=[]
# if transfertype2:
# self.var_embs=all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
# self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
# if lstm_stack>1:
# self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
# all_weights['LSTM'] = {
# 'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
# 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
# 'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'),
# 'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'),
# 'lstm': self.lstm}
all_encoding_weights=[all_weights[x].values() for x in all_weights]
for w in all_encoding_weights:
self.all_encoding_weights+=w
return all_weights
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
# x=tf.matmul(x,self.embw)+self.embb
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x,lookup=True)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.n_input))
all_the_f_one_h.append(tf.one_hot(x,depth=self.n_input))
embedding=tf.matmul(z,self.word_embedding)+self.embedding_bias
embedding=z
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
with tf.device('/cpu:0'):
mu=tf.nn.embedding_lookup(weights['out_mean'],x)
mu+=biases['out_mean']
if not vanilla:
with tf.device('/cpu:0'):
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)
logvar+=biases['out_log_sigma']
if not vanilla:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def crop_image(self,x, target_height=227, target_width=227, as_float=True,from_path=True):
#image preprocessing to crop and resize image
image = (x)
if from_path==True:
image=cv2.imread(image)
if as_float:
image = image.astype(np.float32)
if len(image.shape) == 2:
image = np.tile(image[:,:,None], 3)
elif len(image.shape) == 4:
image = image[:,:,:,0]
height, width, rgb = image.shape
if width == height:
resized_image = cv2.resize(image, (target_height,target_width))
elif height < width:
resized_image = cv2.resize(image, (int(width * float(target_height)/height), target_width))
cropping_length = int((resized_image.shape[1] - target_height) / 2)
resized_image = resized_image[:,cropping_length:resized_image.shape[1] - cropping_length]
else:
resized_image = cv2.resize(image, (target_height, int(height * float(target_width) / width)))
cropping_length = int((resized_image.shape[0] - target_width) / 2)
resized_image = resized_image[cropping_length:resized_image.shape[0] - cropping_length,:]
return cv2.resize(resized_image, (target_height, target_width))
def read_image(self,path=None):
# parses image from file path and crops/resizes
if path is None:
path=test_image_path
img = crop_image(path, target_height=224, target_width=224)
if img.shape[2] == 4:
img = img[:,:,:3]
img = img[None, ...]
return img
def get_caption(self,x=None):
#gets caption from an image by feeding it through imported VGG16 graph
if self.from_image:
feat = read_image(x)
fc7 = self.sess.run(graph.get_tensor_by_name("import/Relu_1:0"), feed_dict={self.images:feat})
else:
fc7=np.load(x,'r')
generated_word_index= self.sess.run(self.generated_words, feed_dict={self.img:fc7})
generated_word_index = np.hstack(generated_word_index)
generated_words = [ixtoword[x] for x in generated_word_index]
punctuation = np.argmax(np.array(generated_words) == '.')+1
generated_words = generated_words[:punctuation]
generated_sentence = ' '.join(generated_words)
return (generated_sentence)
def get_data(annotation_path, feature_path):
#load training/validation data
annotations = pd.read_table(annotation_path, sep='\t', header=None, names=['image', 'caption'])
return np.load(feature_path,'r'), annotations['caption'].values
def preProBuildWordVocab(sentence_iterator, word_count_threshold=30): # function from Andre Karpathy's NeuralTalk
#process and vectorize training/validation captions
print('preprocessing %d word vocab' % (word_count_threshold, ))
word_counts = {}
nsents = 0
for sent in sentence_iterator:
nsents += 1
for w in sent.lower().split(' '):
word_counts[w] = word_counts.get(w, 0) + 1
vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]
print('preprocessed words %d -> %d' % (len(word_counts), len(vocab)))
ixtoword = {}
ixtoword[0] = '.'
wordtoix = {}
wordtoix['#START#'] = 0
ix = 1
for w in vocab:
wordtoix[w] = ix
ixtoword[ix] = w
ix += 1
word_counts['.'] = nsents
bias_init_vector = np.array([1.0*word_counts[ixtoword[i]] for i in ixtoword])
bias_init_vector /= np.sum(bias_init_vector)
bias_init_vector = np.log(bias_init_vector)
bias_init_vector -= np.max(bias_init_vector)
return wordtoix, ixtoword, bias_init_vector.astype(np.float32)
dim_embed = 256
dim_hidden = 256
dim_in = 4096
batch_size = 128
momentum = 0.9
n_epochs = 25
def train(learning_rate=0.001, continue_training=False):
tf.reset_default_graph()
feats, captions = get_data(annotation_path, feature_path)
wordtoix, ixtoword, init_b = preProBuildWordVocab(captions)
np.save('data/ixtoword', ixtoword)
print ('num words:',len(ixtoword))
sess = tf.InteractiveSession()
n_words = len(wordtoix)
maxlen = 30
X, final_captions, mask, _map = load_text(2**19-3,captions)
running_decay=1
decay_rate=0.9999302192204246
# with tf.device('/gpu:0'):
caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, batch_size, maxlen+2, n_words, np.zeros(n_z).astype(np.float32),n_input=n_input,n_lstm_input=n_lstm_input,n_z=n_z)
loss, image, sentence, mask = caption_generator.build_model()
saver = tf.train.Saver(max_to_keep=100)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
tf.global_variables_initializer().run()
tf.train.Saver(var_list=caption_generator.all_encoding_weights,max_to_keep=100).restore(sess,tf.train.latest_checkpoint('modelsvardefdefvarallboth'))
if continue_training:
saver.restore(sess,tf.train.latest_checkpoint(model_path))
losses=[]
for epoch in range(n_epochs):
if epoch==1:
for w in caption_generator.all_encoding_weights:
w.trainable=True
index = (np.arange(len(feats)).astype(int))
np.random.shuffle(index)
index=index[:]
i=0
for start, end in zip( range(0, len(index), batch_size), range(batch_size, len(index), batch_size)):
#format data batch
current_feats = feats[index[start:end]]
current_captions = captions[index[start:end]]
current_caption_ind = [x for x in map(lambda cap: [wordtoix[word] for word in cap.lower().split(' ')[:-1] if word in wordtoix], current_captions)]
current_caption_matrix = sequence.pad_sequences(current_caption_ind, padding='post', maxlen=maxlen+1)
current_caption_matrix = np.hstack( [np.full( (len(current_caption_matrix),1), 0), current_caption_matrix] )
current_mask_matrix = np.zeros((current_caption_matrix.shape[0], current_caption_matrix.shape[1]))
nonzeros = np.array([x for x in map(lambda x: (x != 0).sum()+2, current_caption_matrix )])
current_capts=final_captions[index[start:end]]
for ind, row in enumerate(current_mask_matrix):
row[:nonzeros[ind]] = 1
_, loss_value,total_loss = sess.run([train_op, caption_generator.print_loss,loss], feed_dict={
image: current_feats.astype(np.float32),
caption_generator.output_placeholder : current_caption_matrix.astype(np.int32),
mask : current_mask_matrix.astype(np.float32),
sentence : current_capts.astype(np.float32)
})
print("Current Cost: ", loss_value, "\t Epoch {}/{}".format(epoch, n_epochs), "\t Iter {}/{}".format(start,len(feats)))
losses.append(loss_value*running_decay)
# if epoch<9:
# if i%3==0:
# running_decay*=decay_rate
# else:
# if i%8==0:
# running_decay*=decay_rate
i+=1
print losses[-1]
print("Saving the model from epoch: ", epoch)
pkl.dump(losses,open('losses/loss_e2e.pkl','wb'))
saver.save(sess, os.path.join(model_path, 'model'), global_step=epoch)
learning_rate *= 0.95
def test(sess,image,generated_words,ixtoword,idx=0): # Naive greedy search
feats, captions = get_data(annotation_path, feature_path)
feat = np.array([feats[idx]])
saver = tf.train.Saver()
sanity_check= False
# sanity_check=True
if not sanity_check:
saved_path=tf.train.latest_checkpoint(model_path)
saver.restore(sess, saved_path)
else:
tf.global_variables_initializer().run()
generated_word_index= sess.run(generated_words, feed_dict={image:feat})
generated_word_index = np.hstack(generated_word_index)
generated_sentence = [ixtoword[x] for x in generated_word_index]
print(generated_sentence)
if __name__=='__main__':
model_path = './models/tensorflowcs'
feature_path = './data/feats.npy'
annotation_path = './data/results_20130124.token'
import sys
feats, captions = get_data(annotation_path, feature_path)
n_input=50001
binary_dim=n_input
n_lstm_input=512
n_z=256
zero_end_tok=True
form2=True
vanilla=True
onehot=False
same_embedding=False
if sys.argv[1]=='train':
train()
elif sys.argv[1]=='test':
ixtoword = np.load('data/ixtoword.npy').tolist()
n_words = len(ixtoword)
maxlen=15
sess = tf.InteractiveSession()
batch_size=1
caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, 1, maxlen+2, n_words,n_input=n_input,n_lstm_input=n_lstm_input,n_z=n_z)
image, generated_words = caption_generator.build_generator(maxlen=maxlen)
test(sess,image,generated_words,ixtoword,1)
|
tugluck/galah
|
refs/heads/v0.2dev
|
galah/base/pretty.py
|
2
|
# Copyright 2012-2013 John Sullivan
# Copyright 2012-2013 Other contributers as noted in the CONTRIBUTERS file
#
# This file is part of Galah.
#
# Galah is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Galah is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Galah. If not, see <http://www.gnu.org/licenses/>.
def plural_if(zstring, zcondition):
"""
Returns zstring pluralized (adds an 's' to the end) if zcondition is True or
if zcondition is not equal to 1.
Example usage could be ``plural_if("cow", len(cow_list))``.
"""
# If they gave us a boolean value, just use that, otherwise, assume the
# value is some integral type.
if type(zcondition) is bool:
plural = zcondition
else:
plural = zcondition != 1
return zstring + ("s" if plural else "")
def pretty_list(the_list, conjunction = "and", none_string = "nothing"):
"""
Returns a grammatically correct string representing the given list. For
example...
>>> pretty_list(["John", "Bill", "Stacy"])
"John, Bill, and Stacy"
>>> pretty_list(["Bill", "Jorgan"], "or")
"Bill or Jorgan"
>>> pretty_list([], none_string = "nobody")
"nobody"
"""
the_list = list(the_list)
if len(the_list) == 0:
return none_string
elif len(the_list) == 1:
return str(the_list[0])
elif len(the_list) == 2:
return str(the_list[0]) + " " + conjunction + " " + str(the_list[1])
else:
# Add every item except the last two together seperated by commas
result = ", ".join(the_list[:-2]) + ", "
# Add the last two items, joined together by a command and the given
# conjunction
result += "%s, %s %s" % \
(str(the_list[-2]), conjunction, str(the_list[-1]))
return result
def pretty_timedelta(zdelta):
# We will build our string part by part. All strings in this array will be
# concatenated with a space as delimiter.
stringParts = []
if zdelta.days < 0:
ago = True
zdelta = -zdelta
else:
ago = False
stringParts.append("in")
months = abs(zdelta.days) / 30
hours = zdelta.seconds / (60 * 60)
minutes = (zdelta.seconds % (60 * 60)) / 60
seconds = (zdelta.seconds % 60)
if months == 0 and zdelta.days == 0 and hours == 0 and minutes == 0 and \
seconds < 10:
return "just now"
# Add the months part. Because we only approximate the numbers of months,
# the rest of the numbers (days, hours, etc) won't be exact so we skip the
# rest of the if statements.
if months != 0:
stringParts += ["about", str(months), plural_if("month", months)]
# Add the days part
if months == 0 and zdelta.days != 0:
stringParts += [str(zdelta.days), plural_if("day", zdelta.days)]
# Add the hours part
if months == 0 and hours != 0:
stringParts += [str(hours), plural_if("hour", hours)]
# Add the minutes part if we're less than 4 hours away
if months == 0 and minutes != 0 and zdelta.days == 0 and hours < 4:
stringParts += [str(minutes), plural_if("minute", minutes)]
# Add the seconds part if we're less than 10 minutes away
if months == 0 and seconds != 0 and zdelta.days == 0 and hours == 0 and minutes < 10:
stringParts += [str(seconds), plural_if("second", seconds)]
if ago:
stringParts.append("ago")
return " ".join(stringParts)
def pretty_time_distance(za, zb):
return pretty_timedelta(zb - za)
def pretty_time(ztime):
return ztime.strftime("%A, %B %d, %Y @ %I:%M %p")
|
rienafairefr/nYNABapi
|
refs/heads/master
|
test_live/get_webapp_code.py
|
2
|
import os
import re
from lxml import html
import requests
import jsbeautifier
response = requests.get('http://app.youneedabudget.com')
with open('index.html', 'w', encoding='utf-8') as file_before:
file_before.write(response.text)
parsed = html.fromstring(response.text)
for src in parsed.xpath('//script/@src'):
url_src = str(src)
file = url_src.rsplit('/',1)[-1]
if file.startswith('before.'):
before_response = requests.get(str(src))
before_script = jsbeautifier.beautify(before_response.text)
with open(os.path.join('web_app','before.js'),'w+',encoding='utf-8') as file_before:
file_before.write(before_script)
regex1 = re.compile('\s*(\d)\:\s\"appmain\"')
regex2=None
for line in before_script.split('\n'):
if regex1.match(line):
idx = regex1.match(line).groups()[0]
regex2 = re.compile('\s*%s\:\s\"(.*)\"' % idx)
if regex2 is not None and regex2.match(line):
test = regex2.match(line).groups()[0]
if test!='appmain':
random_id = test
break
url_appmain = '/'.join(url_src.rsplit('/',1)[:-1]+['appmain.'+random_id+'.js'])
appmain_response = requests.get(url_appmain)
appmain_script = jsbeautifier.beautify(appmain_response.text)
with open(os.path.join('web_app','appmain.js'),'w+',encoding='utf-8') as file_appmain:
file_appmain.write(appmain_script)
if file.startswith('index.'):
script_response = requests.get(str(src))
index_script = jsbeautifier.beautify(script_response.text)
with open(os.path.join('web_app','index.js'),'w+',encoding='utf-8') as file_before:
file_before.write(index_script)
pass
|
pkoutsias/SickRage
|
refs/heads/master
|
lib/tornado/test/iostream_test.py
|
36
|
from __future__ import absolute_import, division, print_function, with_statement
from tornado.concurrent import Future
from tornado import gen
from tornado import netutil
from tornado.iostream import IOStream, SSLIOStream, PipeIOStream, StreamClosedError
from tornado.httputil import HTTPHeaders
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket
from tornado.stack_context import NullContext
from tornado.tcpserver import TCPServer
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog, gen_test
from tornado.test.util import unittest, skipIfNonUnix, refusing_port
from tornado.web import RequestHandler, Application
import errno
import logging
import os
import platform
import socket
import ssl
import sys
def _server_ssl_options():
return dict(
certfile=os.path.join(os.path.dirname(__file__), 'test.crt'),
keyfile=os.path.join(os.path.dirname(__file__), 'test.key'),
)
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello")
class TestIOStreamWebMixin(object):
def _make_client_iostream(self):
raise NotImplementedError()
def get_app(self):
return Application([('/', HelloHandler)])
def test_connection_closed(self):
# When a server sends a response and then closes the connection,
# the client must be allowed to read the data before the IOStream
# closes itself. Epoll reports closed connections with a separate
# EPOLLRDHUP event delivered at the same time as the read event,
# while kqueue reports them as a second read/write event with an EOF
# flag.
response = self.fetch("/", headers={"Connection": "close"})
response.rethrow()
def test_read_until_close(self):
stream = self._make_client_iostream()
stream.connect(('127.0.0.1', self.get_http_port()), callback=self.stop)
self.wait()
stream.write(b"GET / HTTP/1.0\r\n\r\n")
stream.read_until_close(self.stop)
data = self.wait()
self.assertTrue(data.startswith(b"HTTP/1.1 200"))
self.assertTrue(data.endswith(b"Hello"))
def test_read_zero_bytes(self):
self.stream = self._make_client_iostream()
self.stream.connect(("127.0.0.1", self.get_http_port()),
callback=self.stop)
self.wait()
self.stream.write(b"GET / HTTP/1.0\r\n\r\n")
# normal read
self.stream.read_bytes(9, self.stop)
data = self.wait()
self.assertEqual(data, b"HTTP/1.1 ")
# zero bytes
self.stream.read_bytes(0, self.stop)
data = self.wait()
self.assertEqual(data, b"")
# another normal read
self.stream.read_bytes(3, self.stop)
data = self.wait()
self.assertEqual(data, b"200")
self.stream.close()
def test_write_while_connecting(self):
stream = self._make_client_iostream()
connected = [False]
def connected_callback():
connected[0] = True
self.stop()
stream.connect(("127.0.0.1", self.get_http_port()),
callback=connected_callback)
# unlike the previous tests, try to write before the connection
# is complete.
written = [False]
def write_callback():
written[0] = True
self.stop()
stream.write(b"GET / HTTP/1.0\r\nConnection: close\r\n\r\n",
callback=write_callback)
self.assertTrue(not connected[0])
# by the time the write has flushed, the connection callback has
# also run
try:
self.wait(lambda: connected[0] and written[0])
finally:
logging.debug((connected, written))
stream.read_until_close(self.stop)
data = self.wait()
self.assertTrue(data.endswith(b"Hello"))
stream.close()
@gen_test
def test_future_interface(self):
"""Basic test of IOStream's ability to return Futures."""
stream = self._make_client_iostream()
connect_result = yield stream.connect(
("127.0.0.1", self.get_http_port()))
self.assertIs(connect_result, stream)
yield stream.write(b"GET / HTTP/1.0\r\n\r\n")
first_line = yield stream.read_until(b"\r\n")
self.assertEqual(first_line, b"HTTP/1.1 200 OK\r\n")
# callback=None is equivalent to no callback.
header_data = yield stream.read_until(b"\r\n\r\n", callback=None)
headers = HTTPHeaders.parse(header_data.decode('latin1'))
content_length = int(headers['Content-Length'])
body = yield stream.read_bytes(content_length)
self.assertEqual(body, b'Hello')
stream.close()
@gen_test
def test_future_close_while_reading(self):
stream = self._make_client_iostream()
yield stream.connect(("127.0.0.1", self.get_http_port()))
yield stream.write(b"GET / HTTP/1.0\r\n\r\n")
with self.assertRaises(StreamClosedError):
yield stream.read_bytes(1024 * 1024)
stream.close()
@gen_test
def test_future_read_until_close(self):
# Ensure that the data comes through before the StreamClosedError.
stream = self._make_client_iostream()
yield stream.connect(("127.0.0.1", self.get_http_port()))
yield stream.write(b"GET / HTTP/1.0\r\nConnection: close\r\n\r\n")
yield stream.read_until(b"\r\n\r\n")
body = yield stream.read_until_close()
self.assertEqual(body, b"Hello")
# Nothing else to read; the error comes immediately without waiting
# for yield.
with self.assertRaises(StreamClosedError):
stream.read_bytes(1)
class TestIOStreamMixin(object):
def _make_server_iostream(self, connection, **kwargs):
raise NotImplementedError()
def _make_client_iostream(self, connection, **kwargs):
raise NotImplementedError()
def make_iostream_pair(self, **kwargs):
listener, port = bind_unused_port()
streams = [None, None]
def accept_callback(connection, address):
streams[0] = self._make_server_iostream(connection, **kwargs)
self.stop()
def connect_callback():
streams[1] = client_stream
self.stop()
netutil.add_accept_handler(listener, accept_callback,
io_loop=self.io_loop)
client_stream = self._make_client_iostream(socket.socket(), **kwargs)
client_stream.connect(('127.0.0.1', port),
callback=connect_callback)
self.wait(condition=lambda: all(streams))
self.io_loop.remove_handler(listener.fileno())
listener.close()
return streams
def test_streaming_callback_with_data_in_buffer(self):
server, client = self.make_iostream_pair()
client.write(b"abcd\r\nefgh")
server.read_until(b"\r\n", self.stop)
data = self.wait()
self.assertEqual(data, b"abcd\r\n")
def closed_callback(chunk):
self.fail()
server.read_until_close(callback=closed_callback,
streaming_callback=self.stop)
# self.io_loop.add_timeout(self.io_loop.time() + 0.01, self.stop)
data = self.wait()
self.assertEqual(data, b"efgh")
server.close()
client.close()
def test_write_zero_bytes(self):
# Attempting to write zero bytes should run the callback without
# going into an infinite loop.
server, client = self.make_iostream_pair()
server.write(b'', callback=self.stop)
self.wait()
server.close()
client.close()
def test_connection_refused(self):
# When a connection is refused, the connect callback should not
# be run. (The kqueue IOLoop used to behave differently from the
# epoll IOLoop in this respect)
cleanup_func, port = refusing_port()
self.addCleanup(cleanup_func)
stream = IOStream(socket.socket(), self.io_loop)
self.connect_called = False
def connect_callback():
self.connect_called = True
self.stop()
stream.set_close_callback(self.stop)
# log messages vary by platform and ioloop implementation
with ExpectLog(gen_log, ".*", required=False):
stream.connect(("127.0.0.1", port), connect_callback)
self.wait()
self.assertFalse(self.connect_called)
self.assertTrue(isinstance(stream.error, socket.error), stream.error)
if sys.platform != 'cygwin':
_ERRNO_CONNREFUSED = (errno.ECONNREFUSED,)
if hasattr(errno, "WSAECONNREFUSED"):
_ERRNO_CONNREFUSED += (errno.WSAECONNREFUSED,)
# cygwin's errnos don't match those used on native windows python
self.assertTrue(stream.error.args[0] in _ERRNO_CONNREFUSED)
def test_gaierror(self):
# Test that IOStream sets its exc_info on getaddrinfo error
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = IOStream(s, io_loop=self.io_loop)
stream.set_close_callback(self.stop)
# To reliably generate a gaierror we use a malformed domain name
# instead of a name that's simply unlikely to exist (since
# opendns and some ISPs return bogus addresses for nonexistent
# domains instead of the proper error codes).
with ExpectLog(gen_log, "Connect error"):
stream.connect(('an invalid domain', 54321), callback=self.stop)
self.wait()
self.assertTrue(isinstance(stream.error, socket.gaierror), stream.error)
def test_read_callback_error(self):
# Test that IOStream sets its exc_info when a read callback throws
server, client = self.make_iostream_pair()
try:
server.set_close_callback(self.stop)
with ExpectLog(
app_log, "(Uncaught exception|Exception in callback)"
):
# Clear ExceptionStackContext so IOStream catches error
with NullContext():
server.read_bytes(1, callback=lambda data: 1 / 0)
client.write(b"1")
self.wait()
self.assertTrue(isinstance(server.error, ZeroDivisionError))
finally:
server.close()
client.close()
def test_streaming_callback(self):
server, client = self.make_iostream_pair()
try:
chunks = []
final_called = []
def streaming_callback(data):
chunks.append(data)
self.stop()
def final_callback(data):
self.assertFalse(data)
final_called.append(True)
self.stop()
server.read_bytes(6, callback=final_callback,
streaming_callback=streaming_callback)
client.write(b"1234")
self.wait(condition=lambda: chunks)
client.write(b"5678")
self.wait(condition=lambda: final_called)
self.assertEqual(chunks, [b"1234", b"56"])
# the rest of the last chunk is still in the buffer
server.read_bytes(2, callback=self.stop)
data = self.wait()
self.assertEqual(data, b"78")
finally:
server.close()
client.close()
def test_streaming_until_close(self):
server, client = self.make_iostream_pair()
try:
chunks = []
closed = [False]
def streaming_callback(data):
chunks.append(data)
self.stop()
def close_callback(data):
assert not data, data
closed[0] = True
self.stop()
client.read_until_close(callback=close_callback,
streaming_callback=streaming_callback)
server.write(b"1234")
self.wait(condition=lambda: len(chunks) == 1)
server.write(b"5678", self.stop)
self.wait()
server.close()
self.wait(condition=lambda: closed[0])
self.assertEqual(chunks, [b"1234", b"5678"])
finally:
server.close()
client.close()
def test_streaming_until_close_future(self):
server, client = self.make_iostream_pair()
try:
chunks = []
@gen.coroutine
def client_task():
yield client.read_until_close(streaming_callback=chunks.append)
@gen.coroutine
def server_task():
yield server.write(b"1234")
yield gen.sleep(0.01)
yield server.write(b"5678")
server.close()
@gen.coroutine
def f():
yield [client_task(), server_task()]
self.io_loop.run_sync(f)
self.assertEqual(chunks, [b"1234", b"5678"])
finally:
server.close()
client.close()
def test_delayed_close_callback(self):
# The scenario: Server closes the connection while there is a pending
# read that can be served out of buffered data. The client does not
# run the close_callback as soon as it detects the close, but rather
# defers it until after the buffered read has finished.
server, client = self.make_iostream_pair()
try:
client.set_close_callback(self.stop)
server.write(b"12")
chunks = []
def callback1(data):
chunks.append(data)
client.read_bytes(1, callback2)
server.close()
def callback2(data):
chunks.append(data)
client.read_bytes(1, callback1)
self.wait() # stopped by close_callback
self.assertEqual(chunks, [b"1", b"2"])
finally:
server.close()
client.close()
def test_future_delayed_close_callback(self):
# Same as test_delayed_close_callback, but with the future interface.
server, client = self.make_iostream_pair()
# We can't call make_iostream_pair inside a gen_test function
# because the ioloop is not reentrant.
@gen_test
def f(self):
server.write(b"12")
chunks = []
chunks.append((yield client.read_bytes(1)))
server.close()
chunks.append((yield client.read_bytes(1)))
self.assertEqual(chunks, [b"1", b"2"])
try:
f(self)
finally:
server.close()
client.close()
def test_close_buffered_data(self):
# Similar to the previous test, but with data stored in the OS's
# socket buffers instead of the IOStream's read buffer. Out-of-band
# close notifications must be delayed until all data has been
# drained into the IOStream buffer. (epoll used to use out-of-band
# close events with EPOLLRDHUP, but no longer)
#
# This depends on the read_chunk_size being smaller than the
# OS socket buffer, so make it small.
server, client = self.make_iostream_pair(read_chunk_size=256)
try:
server.write(b"A" * 512)
client.read_bytes(256, self.stop)
data = self.wait()
self.assertEqual(b"A" * 256, data)
server.close()
# Allow the close to propagate to the client side of the
# connection. Using add_callback instead of add_timeout
# doesn't seem to work, even with multiple iterations
self.io_loop.add_timeout(self.io_loop.time() + 0.01, self.stop)
self.wait()
client.read_bytes(256, self.stop)
data = self.wait()
self.assertEqual(b"A" * 256, data)
finally:
server.close()
client.close()
def test_read_until_close_after_close(self):
# Similar to test_delayed_close_callback, but read_until_close takes
# a separate code path so test it separately.
server, client = self.make_iostream_pair()
try:
server.write(b"1234")
server.close()
# Read one byte to make sure the client has received the data.
# It won't run the close callback as long as there is more buffered
# data that could satisfy a later read.
client.read_bytes(1, self.stop)
data = self.wait()
self.assertEqual(data, b"1")
client.read_until_close(self.stop)
data = self.wait()
self.assertEqual(data, b"234")
finally:
server.close()
client.close()
def test_streaming_read_until_close_after_close(self):
# Same as the preceding test but with a streaming_callback.
# All data should go through the streaming callback,
# and the final read callback just gets an empty string.
server, client = self.make_iostream_pair()
try:
server.write(b"1234")
server.close()
client.read_bytes(1, self.stop)
data = self.wait()
self.assertEqual(data, b"1")
streaming_data = []
client.read_until_close(self.stop,
streaming_callback=streaming_data.append)
data = self.wait()
self.assertEqual(b'', data)
self.assertEqual(b''.join(streaming_data), b"234")
finally:
server.close()
client.close()
def test_large_read_until(self):
# Performance test: read_until used to have a quadratic component
# so a read_until of 4MB would take 8 seconds; now it takes 0.25
# seconds.
server, client = self.make_iostream_pair()
try:
# This test fails on pypy with ssl. I think it's because
# pypy's gc defeats moves objects, breaking the
# "frozen write buffer" assumption.
if (isinstance(server, SSLIOStream) and
platform.python_implementation() == 'PyPy'):
raise unittest.SkipTest(
"pypy gc causes problems with openssl")
NUM_KB = 4096
for i in range(NUM_KB):
client.write(b"A" * 1024)
client.write(b"\r\n")
server.read_until(b"\r\n", self.stop)
data = self.wait()
self.assertEqual(len(data), NUM_KB * 1024 + 2)
finally:
server.close()
client.close()
def test_close_callback_with_pending_read(self):
# Regression test for a bug that was introduced in 2.3
# where the IOStream._close_callback would never be called
# if there were pending reads.
OK = b"OK\r\n"
server, client = self.make_iostream_pair()
client.set_close_callback(self.stop)
try:
server.write(OK)
client.read_until(b"\r\n", self.stop)
res = self.wait()
self.assertEqual(res, OK)
server.close()
client.read_until(b"\r\n", lambda x: x)
# If _close_callback (self.stop) is not called,
# an AssertionError: Async operation timed out after 5 seconds
# will be raised.
res = self.wait()
self.assertTrue(res is None)
finally:
server.close()
client.close()
@skipIfNonUnix
def test_inline_read_error(self):
# An error on an inline read is raised without logging (on the
# assumption that it will eventually be noticed or logged further
# up the stack).
#
# This test is posix-only because windows os.close() doesn't work
# on socket FDs, but we can't close the socket object normally
# because we won't get the error we want if the socket knows
# it's closed.
server, client = self.make_iostream_pair()
try:
os.close(server.socket.fileno())
with self.assertRaises(socket.error):
server.read_bytes(1, lambda data: None)
finally:
server.close()
client.close()
def test_async_read_error_logging(self):
# Socket errors on asynchronous reads should be logged (but only
# once).
server, client = self.make_iostream_pair()
server.set_close_callback(self.stop)
try:
# Start a read that will be fulfilled asynchronously.
server.read_bytes(1, lambda data: None)
client.write(b'a')
# Stub out read_from_fd to make it fail.
def fake_read_from_fd():
os.close(server.socket.fileno())
server.__class__.read_from_fd(server)
server.read_from_fd = fake_read_from_fd
# This log message is from _handle_read (not read_from_fd).
with ExpectLog(gen_log, "error on read"):
self.wait()
finally:
server.close()
client.close()
def test_future_close_callback(self):
# Regression test for interaction between the Future read interfaces
# and IOStream._maybe_add_error_listener.
server, client = self.make_iostream_pair()
closed = [False]
def close_callback():
closed[0] = True
self.stop()
server.set_close_callback(close_callback)
try:
client.write(b'a')
future = server.read_bytes(1)
self.io_loop.add_future(future, self.stop)
self.assertEqual(self.wait().result(), b'a')
self.assertFalse(closed[0])
client.close()
self.wait()
self.assertTrue(closed[0])
finally:
server.close()
client.close()
def test_read_bytes_partial(self):
server, client = self.make_iostream_pair()
try:
# Ask for more than is available with partial=True
client.read_bytes(50, self.stop, partial=True)
server.write(b"hello")
data = self.wait()
self.assertEqual(data, b"hello")
# Ask for less than what is available; num_bytes is still
# respected.
client.read_bytes(3, self.stop, partial=True)
server.write(b"world")
data = self.wait()
self.assertEqual(data, b"wor")
# Partial reads won't return an empty string, but read_bytes(0)
# will.
client.read_bytes(0, self.stop, partial=True)
data = self.wait()
self.assertEqual(data, b'')
finally:
server.close()
client.close()
def test_read_until_max_bytes(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Extra room under the limit
client.read_until(b"def", self.stop, max_bytes=50)
server.write(b"abcdef")
data = self.wait()
self.assertEqual(data, b"abcdef")
# Just enough space
client.read_until(b"def", self.stop, max_bytes=6)
server.write(b"abcdef")
data = self.wait()
self.assertEqual(data, b"abcdef")
# Not enough space, but we don't know it until all we can do is
# log a warning and close the connection.
with ExpectLog(gen_log, "Unsatisfiable read"):
client.read_until(b"def", self.stop, max_bytes=5)
server.write(b"123456")
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_max_bytes_inline(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Similar to the error case in the previous test, but the
# server writes first so client reads are satisfied
# inline. For consistency with the out-of-line case, we
# do not raise the error synchronously.
server.write(b"123456")
with ExpectLog(gen_log, "Unsatisfiable read"):
client.read_until(b"def", self.stop, max_bytes=5)
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_max_bytes_ignores_extra(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Even though data that matches arrives the same packet that
# puts us over the limit, we fail the request because it was not
# found within the limit.
server.write(b"abcdef")
with ExpectLog(gen_log, "Unsatisfiable read"):
client.read_until(b"def", self.stop, max_bytes=5)
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_regex_max_bytes(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Extra room under the limit
client.read_until_regex(b"def", self.stop, max_bytes=50)
server.write(b"abcdef")
data = self.wait()
self.assertEqual(data, b"abcdef")
# Just enough space
client.read_until_regex(b"def", self.stop, max_bytes=6)
server.write(b"abcdef")
data = self.wait()
self.assertEqual(data, b"abcdef")
# Not enough space, but we don't know it until all we can do is
# log a warning and close the connection.
with ExpectLog(gen_log, "Unsatisfiable read"):
client.read_until_regex(b"def", self.stop, max_bytes=5)
server.write(b"123456")
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_regex_max_bytes_inline(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Similar to the error case in the previous test, but the
# server writes first so client reads are satisfied
# inline. For consistency with the out-of-line case, we
# do not raise the error synchronously.
server.write(b"123456")
with ExpectLog(gen_log, "Unsatisfiable read"):
client.read_until_regex(b"def", self.stop, max_bytes=5)
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_regex_max_bytes_ignores_extra(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Even though data that matches arrives the same packet that
# puts us over the limit, we fail the request because it was not
# found within the limit.
server.write(b"abcdef")
with ExpectLog(gen_log, "Unsatisfiable read"):
client.read_until_regex(b"def", self.stop, max_bytes=5)
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_small_reads_from_large_buffer(self):
# 10KB buffer size, 100KB available to read.
# Read 1KB at a time and make sure that the buffer is not eagerly
# filled.
server, client = self.make_iostream_pair(max_buffer_size=10 * 1024)
try:
server.write(b"a" * 1024 * 100)
for i in range(100):
client.read_bytes(1024, self.stop)
data = self.wait()
self.assertEqual(data, b"a" * 1024)
finally:
server.close()
client.close()
def test_small_read_untils_from_large_buffer(self):
# 10KB buffer size, 100KB available to read.
# Read 1KB at a time and make sure that the buffer is not eagerly
# filled.
server, client = self.make_iostream_pair(max_buffer_size=10 * 1024)
try:
server.write((b"a" * 1023 + b"\n") * 100)
for i in range(100):
client.read_until(b"\n", self.stop, max_bytes=4096)
data = self.wait()
self.assertEqual(data, b"a" * 1023 + b"\n")
finally:
server.close()
client.close()
def test_flow_control(self):
MB = 1024 * 1024
server, client = self.make_iostream_pair(max_buffer_size=5 * MB)
try:
# Client writes more than the server will accept.
client.write(b"a" * 10 * MB)
# The server pauses while reading.
server.read_bytes(MB, self.stop)
self.wait()
self.io_loop.call_later(0.1, self.stop)
self.wait()
# The client's writes have been blocked; the server can
# continue to read gradually.
for i in range(9):
server.read_bytes(MB, self.stop)
self.wait()
finally:
server.close()
client.close()
class TestIOStreamWebHTTP(TestIOStreamWebMixin, AsyncHTTPTestCase):
def _make_client_iostream(self):
return IOStream(socket.socket(), io_loop=self.io_loop)
class TestIOStreamWebHTTPS(TestIOStreamWebMixin, AsyncHTTPSTestCase):
def _make_client_iostream(self):
return SSLIOStream(socket.socket(), io_loop=self.io_loop,
ssl_options=dict(cert_reqs=ssl.CERT_NONE))
class TestIOStream(TestIOStreamMixin, AsyncTestCase):
def _make_server_iostream(self, connection, **kwargs):
return IOStream(connection, **kwargs)
def _make_client_iostream(self, connection, **kwargs):
return IOStream(connection, **kwargs)
class TestIOStreamSSL(TestIOStreamMixin, AsyncTestCase):
def _make_server_iostream(self, connection, **kwargs):
connection = ssl.wrap_socket(connection,
server_side=True,
do_handshake_on_connect=False,
**_server_ssl_options())
return SSLIOStream(connection, io_loop=self.io_loop, **kwargs)
def _make_client_iostream(self, connection, **kwargs):
return SSLIOStream(connection, io_loop=self.io_loop,
ssl_options=dict(cert_reqs=ssl.CERT_NONE),
**kwargs)
# This will run some tests that are basically redundant but it's the
# simplest way to make sure that it works to pass an SSLContext
# instead of an ssl_options dict to the SSLIOStream constructor.
@unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present')
class TestIOStreamSSLContext(TestIOStreamMixin, AsyncTestCase):
def _make_server_iostream(self, connection, **kwargs):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(
os.path.join(os.path.dirname(__file__), 'test.crt'),
os.path.join(os.path.dirname(__file__), 'test.key'))
connection = ssl_wrap_socket(connection, context,
server_side=True,
do_handshake_on_connect=False)
return SSLIOStream(connection, io_loop=self.io_loop, **kwargs)
def _make_client_iostream(self, connection, **kwargs):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
return SSLIOStream(connection, io_loop=self.io_loop,
ssl_options=context, **kwargs)
class TestIOStreamStartTLS(AsyncTestCase):
def setUp(self):
try:
super(TestIOStreamStartTLS, self).setUp()
self.listener, self.port = bind_unused_port()
self.server_stream = None
self.server_accepted = Future()
netutil.add_accept_handler(self.listener, self.accept)
self.client_stream = IOStream(socket.socket())
self.io_loop.add_future(self.client_stream.connect(
('127.0.0.1', self.port)), self.stop)
self.wait()
self.io_loop.add_future(self.server_accepted, self.stop)
self.wait()
except Exception as e:
print(e)
raise
def tearDown(self):
if self.server_stream is not None:
self.server_stream.close()
if self.client_stream is not None:
self.client_stream.close()
self.listener.close()
super(TestIOStreamStartTLS, self).tearDown()
def accept(self, connection, address):
if self.server_stream is not None:
self.fail("should only get one connection")
self.server_stream = IOStream(connection)
self.server_accepted.set_result(None)
@gen.coroutine
def client_send_line(self, line):
self.client_stream.write(line)
recv_line = yield self.server_stream.read_until(b"\r\n")
self.assertEqual(line, recv_line)
@gen.coroutine
def server_send_line(self, line):
self.server_stream.write(line)
recv_line = yield self.client_stream.read_until(b"\r\n")
self.assertEqual(line, recv_line)
def client_start_tls(self, ssl_options=None, server_hostname=None):
client_stream = self.client_stream
self.client_stream = None
return client_stream.start_tls(False, ssl_options, server_hostname)
def server_start_tls(self, ssl_options=None):
server_stream = self.server_stream
self.server_stream = None
return server_stream.start_tls(True, ssl_options)
@gen_test
def test_start_tls_smtp(self):
# This flow is simplified from RFC 3207 section 5.
# We don't really need all of this, but it helps to make sure
# that after realistic back-and-forth traffic the buffers end up
# in a sane state.
yield self.server_send_line(b"220 mail.example.com ready\r\n")
yield self.client_send_line(b"EHLO mail.example.com\r\n")
yield self.server_send_line(b"250-mail.example.com welcome\r\n")
yield self.server_send_line(b"250 STARTTLS\r\n")
yield self.client_send_line(b"STARTTLS\r\n")
yield self.server_send_line(b"220 Go ahead\r\n")
client_future = self.client_start_tls(dict(cert_reqs=ssl.CERT_NONE))
server_future = self.server_start_tls(_server_ssl_options())
self.client_stream = yield client_future
self.server_stream = yield server_future
self.assertTrue(isinstance(self.client_stream, SSLIOStream))
self.assertTrue(isinstance(self.server_stream, SSLIOStream))
yield self.client_send_line(b"EHLO mail.example.com\r\n")
yield self.server_send_line(b"250 mail.example.com welcome\r\n")
@gen_test
def test_handshake_fail(self):
server_future = self.server_start_tls(_server_ssl_options())
# Certificates are verified with the default configuration.
client_future = self.client_start_tls(server_hostname="localhost")
with ExpectLog(gen_log, "SSL Error"):
with self.assertRaises(ssl.SSLError):
yield client_future
with self.assertRaises((ssl.SSLError, socket.error)):
yield server_future
@unittest.skipIf(not hasattr(ssl, 'create_default_context'),
'ssl.create_default_context not present')
@gen_test
def test_check_hostname(self):
# Test that server_hostname parameter to start_tls is being used.
# The check_hostname functionality is only available in python 2.7 and
# up and in python 3.4 and up.
server_future = self.server_start_tls(_server_ssl_options())
client_future = self.client_start_tls(
ssl.create_default_context(),
server_hostname=b'127.0.0.1')
with ExpectLog(gen_log, "SSL Error"):
with self.assertRaises(ssl.SSLError):
yield client_future
with self.assertRaises((ssl.SSLError, socket.error)):
yield server_future
class WaitForHandshakeTest(AsyncTestCase):
@gen.coroutine
def connect_to_server(self, server_cls):
server = client = None
try:
sock, port = bind_unused_port()
server = server_cls(ssl_options=_server_ssl_options())
server.add_socket(sock)
client = SSLIOStream(socket.socket(),
ssl_options=dict(cert_reqs=ssl.CERT_NONE))
yield client.connect(('127.0.0.1', port))
self.assertIsNotNone(client.socket.cipher())
finally:
if server is not None:
server.stop()
if client is not None:
client.close()
@gen_test
def test_wait_for_handshake_callback(self):
test = self
handshake_future = Future()
class TestServer(TCPServer):
def handle_stream(self, stream, address):
# The handshake has not yet completed.
test.assertIsNone(stream.socket.cipher())
self.stream = stream
stream.wait_for_handshake(self.handshake_done)
def handshake_done(self):
# Now the handshake is done and ssl information is available.
test.assertIsNotNone(self.stream.socket.cipher())
handshake_future.set_result(None)
yield self.connect_to_server(TestServer)
yield handshake_future
@gen_test
def test_wait_for_handshake_future(self):
test = self
handshake_future = Future()
class TestServer(TCPServer):
def handle_stream(self, stream, address):
test.assertIsNone(stream.socket.cipher())
test.io_loop.spawn_callback(self.handle_connection, stream)
@gen.coroutine
def handle_connection(self, stream):
yield stream.wait_for_handshake()
handshake_future.set_result(None)
yield self.connect_to_server(TestServer)
yield handshake_future
@gen_test
def test_wait_for_handshake_already_waiting_error(self):
test = self
handshake_future = Future()
class TestServer(TCPServer):
def handle_stream(self, stream, address):
stream.wait_for_handshake(self.handshake_done)
test.assertRaises(RuntimeError, stream.wait_for_handshake)
def handshake_done(self):
handshake_future.set_result(None)
yield self.connect_to_server(TestServer)
yield handshake_future
@gen_test
def test_wait_for_handshake_already_connected(self):
handshake_future = Future()
class TestServer(TCPServer):
def handle_stream(self, stream, address):
self.stream = stream
stream.wait_for_handshake(self.handshake_done)
def handshake_done(self):
self.stream.wait_for_handshake(self.handshake2_done)
def handshake2_done(self):
handshake_future.set_result(None)
yield self.connect_to_server(TestServer)
yield handshake_future
@skipIfNonUnix
class TestPipeIOStream(AsyncTestCase):
def test_pipe_iostream(self):
r, w = os.pipe()
rs = PipeIOStream(r, io_loop=self.io_loop)
ws = PipeIOStream(w, io_loop=self.io_loop)
ws.write(b"hel")
ws.write(b"lo world")
rs.read_until(b' ', callback=self.stop)
data = self.wait()
self.assertEqual(data, b"hello ")
rs.read_bytes(3, self.stop)
data = self.wait()
self.assertEqual(data, b"wor")
ws.close()
rs.read_until_close(self.stop)
data = self.wait()
self.assertEqual(data, b"ld")
rs.close()
def test_pipe_iostream_big_write(self):
r, w = os.pipe()
rs = PipeIOStream(r, io_loop=self.io_loop)
ws = PipeIOStream(w, io_loop=self.io_loop)
NUM_BYTES = 1048576
# Write 1MB of data, which should fill the buffer
ws.write(b"1" * NUM_BYTES)
rs.read_bytes(NUM_BYTES, self.stop)
data = self.wait()
self.assertEqual(data, b"1" * NUM_BYTES)
ws.close()
rs.close()
|
zero-ui/miniblink49
|
refs/heads/master
|
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
|
11
|
# Copyright (C) 2012 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests import run_webkit_tests
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner, Sharder, TestRunInterruptedException
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_run_results import TestRunResults
from webkitpy.layout_tests.models.test_input import TestInput
from webkitpy.layout_tests.models.test_results import TestResult
from webkitpy.layout_tests.port.test import TestPort
TestExpectations = test_expectations.TestExpectations
class FakePrinter(object):
num_completed = 0
num_tests = 0
def print_expected(self, run_results, get_tests_with_result_type):
pass
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
pass
def print_started_test(self, test_name):
pass
def print_finished_test(self, result, expected, exp_str, got_str):
pass
def write(self, msg):
pass
def write_update(self, msg):
pass
def flush(self):
pass
class LockCheckingRunner(LayoutTestRunner):
def __init__(self, port, options, printer, tester, http_lock):
super(LockCheckingRunner, self).__init__(options, port, printer, port.results_directory(), lambda test_name: False)
self._finished_list_called = False
self._tester = tester
self._should_have_http_lock = http_lock
def handle_finished_list(self, source, list_name, num_tests, elapsed_time):
if not self._finished_list_called:
self._tester.assertEqual(list_name, 'locked_tests')
self._tester.assertTrue(self._remaining_locked_shards)
self._tester.assertTrue(self._has_http_lock is self._should_have_http_lock)
super(LockCheckingRunner, self).handle_finished_list(source, list_name, num_tests, elapsed_time)
if not self._finished_list_called:
self._tester.assertEqual(self._remaining_locked_shards, [])
self._tester.assertFalse(self._has_http_lock)
self._finished_list_called = True
class LayoutTestRunnerTests(unittest.TestCase):
def _runner(self, port=None):
# FIXME: we shouldn't have to use run_webkit_tests.py to get the options we need.
options = run_webkit_tests.parse_args(['--platform', 'test-mac-snowleopard'])[0]
options.child_processes = '1'
host = MockHost()
port = port or host.port_factory.get(options.platform, options=options)
return LockCheckingRunner(port, options, FakePrinter(), self, True)
def _run_tests(self, runner, tests):
test_inputs = [TestInput(test, 6000) for test in tests]
expectations = TestExpectations(runner._port, tests)
runner.run_tests(expectations, test_inputs, set(), num_workers=1, retrying=False)
def test_interrupt_if_at_failure_limits(self):
runner = self._runner()
runner._options.exit_after_n_failures = None
runner._options.exit_after_n_crashes_or_times = None
test_names = ['passes/text.html', 'passes/image.html']
runner._test_inputs = [TestInput(test_name, 6000) for test_name in test_names]
run_results = TestRunResults(TestExpectations(runner._port, test_names), len(test_names))
run_results.unexpected_failures = 100
run_results.unexpected_crashes = 50
run_results.unexpected_timeouts = 50
# No exception when the exit_after* options are None.
runner._interrupt_if_at_failure_limits(run_results)
# No exception when we haven't hit the limit yet.
runner._options.exit_after_n_failures = 101
runner._options.exit_after_n_crashes_or_timeouts = 101
runner._interrupt_if_at_failure_limits(run_results)
# Interrupt if we've exceeded either limit:
runner._options.exit_after_n_crashes_or_timeouts = 10
self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
self.assertEqual(run_results.results_by_name['passes/text.html'].type, test_expectations.SKIP)
self.assertEqual(run_results.results_by_name['passes/image.html'].type, test_expectations.SKIP)
runner._options.exit_after_n_crashes_or_timeouts = None
runner._options.exit_after_n_failures = 10
exception = self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
def test_update_summary_with_result(self):
# Reftests expected to be image mismatch should be respected when pixel_tests=False.
runner = self._runner()
runner._options.pixel_tests = False
test = 'failures/expected/reftest.html'
expectations = TestExpectations(runner._port, tests=[test])
runner._expectations = expectations
run_results = TestRunResults(expectations, 1)
result = TestResult(test_name=test, failures=[test_failures.FailureReftestMismatchDidNotOccur()], reftest_type=['!='])
runner._update_summary_with_result(run_results, result)
self.assertEqual(1, run_results.expected)
self.assertEqual(0, run_results.unexpected)
run_results = TestRunResults(expectations, 1)
result = TestResult(test_name=test, failures=[], reftest_type=['=='])
runner._update_summary_with_result(run_results, result)
self.assertEqual(0, run_results.expected)
self.assertEqual(1, run_results.unexpected)
class SharderTests(unittest.TestCase):
test_list = [
"http/tests/websocket/tests/unicode.htm",
"animations/keyframes.html",
"http/tests/security/view-source-no-refresh.html",
"http/tests/websocket/tests/websocket-protocol-ignored.html",
"fast/css/display-none-inline-style-change-crash.html",
"http/tests/xmlhttprequest/supported-xml-content-types.html",
"dom/html/level2/html/HTMLAnchorElement03.html",
"dom/html/level2/html/HTMLAnchorElement06.html",
"perf/object-keys.html",
"virtual/threaded/dir/test.html",
"virtual/threaded/fast/foo/test.html",
]
def get_test_input(self, test_file):
return TestInput(test_file, requires_lock=(test_file.startswith('http') or test_file.startswith('perf')))
def get_shards(self, num_workers, fully_parallel, run_singly, test_list=None, max_locked_shards=1):
port = TestPort(MockSystemHost())
self.sharder = Sharder(port.split_test, max_locked_shards)
test_list = test_list or self.test_list
return self.sharder.shard_tests([self.get_test_input(test) for test in test_list],
num_workers, fully_parallel, run_singly)
def assert_shards(self, actual_shards, expected_shard_names):
self.assertEqual(len(actual_shards), len(expected_shard_names))
for i, shard in enumerate(actual_shards):
expected_shard_name, expected_test_names = expected_shard_names[i]
self.assertEqual(shard.name, expected_shard_name)
self.assertEqual([test_input.test_name for test_input in shard.test_inputs],
expected_test_names)
def test_shard_by_dir(self):
locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False, run_singly=False)
# Note that although there are tests in multiple dirs that need locks,
# they are crammed into a single shard in order to reduce the # of
# workers hitting the server at once.
self.assert_shards(locked,
[('locked_shard_1',
['http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/unicode.htm',
'http/tests/websocket/tests/websocket-protocol-ignored.html',
'http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
self.assert_shards(unlocked,
[('virtual/threaded/dir', ['virtual/threaded/dir/test.html']),
('virtual/threaded/fast/foo', ['virtual/threaded/fast/foo/test.html']),
('animations', ['animations/keyframes.html']),
('dom/html/level2/html', ['dom/html/level2/html/HTMLAnchorElement03.html',
'dom/html/level2/html/HTMLAnchorElement06.html']),
('fast/css', ['fast/css/display-none-inline-style-change-crash.html'])])
def test_shard_every_file(self):
locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True, max_locked_shards=2, run_singly=False)
self.assert_shards(locked,
[('locked_shard_1',
['http/tests/websocket/tests/unicode.htm',
'http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/websocket-protocol-ignored.html']),
('locked_shard_2',
['http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])]),
self.assert_shards(unlocked,
[('virtual/threaded/dir', ['virtual/threaded/dir/test.html']),
('virtual/threaded/fast/foo', ['virtual/threaded/fast/foo/test.html']),
('.', ['animations/keyframes.html']),
('.', ['fast/css/display-none-inline-style-change-crash.html']),
('.', ['dom/html/level2/html/HTMLAnchorElement03.html']),
('.', ['dom/html/level2/html/HTMLAnchorElement06.html'])])
def test_shard_in_two(self):
locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, run_singly=False)
self.assert_shards(locked,
[('locked_tests',
['http/tests/websocket/tests/unicode.htm',
'http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/websocket-protocol-ignored.html',
'http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
self.assert_shards(unlocked,
[('unlocked_tests',
['animations/keyframes.html',
'fast/css/display-none-inline-style-change-crash.html',
'dom/html/level2/html/HTMLAnchorElement03.html',
'dom/html/level2/html/HTMLAnchorElement06.html',
'virtual/threaded/dir/test.html',
'virtual/threaded/fast/foo/test.html'])])
def test_shard_in_two_has_no_locked_shards(self):
locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, run_singly=False,
test_list=['animations/keyframe.html'])
self.assertEqual(len(locked), 0)
self.assertEqual(len(unlocked), 1)
def test_shard_in_two_has_no_unlocked_shards(self):
locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, run_singly=False,
test_list=['http/tests/websocket/tests/unicode.htm'])
self.assertEqual(len(locked), 1)
self.assertEqual(len(unlocked), 0)
def test_multiple_locked_shards(self):
locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False, max_locked_shards=2, run_singly=False)
self.assert_shards(locked,
[('locked_shard_1',
['http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/unicode.htm',
'http/tests/websocket/tests/websocket-protocol-ignored.html']),
('locked_shard_2',
['http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False, run_singly=False)
self.assert_shards(locked,
[('locked_shard_1',
['http/tests/security/view-source-no-refresh.html',
'http/tests/websocket/tests/unicode.htm',
'http/tests/websocket/tests/websocket-protocol-ignored.html',
'http/tests/xmlhttprequest/supported-xml-content-types.html',
'perf/object-keys.html'])])
def test_virtual_shards(self):
# With run_singly=False, we try to keep all of the tests in a virtual suite together even
# when fully_parallel=True, so that we don't restart every time the command line args change.
locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True, max_locked_shards=2, run_singly=False,
test_list=['virtual/foo/bar1.html', 'virtual/foo/bar2.html'])
self.assert_shards(unlocked,
[('virtual/foo', ['virtual/foo/bar1.html', 'virtual/foo/bar2.html'])])
# But, with run_singly=True, we have to restart every time anyway, so we want full parallelism.
locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True, max_locked_shards=2, run_singly=True,
test_list=['virtual/foo/bar1.html', 'virtual/foo/bar2.html'])
self.assert_shards(unlocked,
[('.', ['virtual/foo/bar1.html']),
('.', ['virtual/foo/bar2.html'])])
|
gleniooliveira/simplemooc
|
refs/heads/master
|
bin/player.py
|
1
|
#!/home/develop/Projeto/venv/bin/python3.5
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
# --------------------------------------------------------------------
# an image animation player
class UI(Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
JocelynDelalande/xhtml2pdf
|
refs/heads/master
|
demo/tgpisa/tgpisa/config/__init__.py
|
12133432
| |
stanlee321/pysolper
|
refs/heads/master
|
permit/lib/dist/jinja2/_markupsafe/_constants.py
|
1535
|
# -*- coding: utf-8 -*-
"""
markupsafe._constants
~~~~~~~~~~~~~~~~~~~~~
Highlevel implementation of the Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
HTML_ENTITIES = {
'AElig': 198,
'Aacute': 193,
'Acirc': 194,
'Agrave': 192,
'Alpha': 913,
'Aring': 197,
'Atilde': 195,
'Auml': 196,
'Beta': 914,
'Ccedil': 199,
'Chi': 935,
'Dagger': 8225,
'Delta': 916,
'ETH': 208,
'Eacute': 201,
'Ecirc': 202,
'Egrave': 200,
'Epsilon': 917,
'Eta': 919,
'Euml': 203,
'Gamma': 915,
'Iacute': 205,
'Icirc': 206,
'Igrave': 204,
'Iota': 921,
'Iuml': 207,
'Kappa': 922,
'Lambda': 923,
'Mu': 924,
'Ntilde': 209,
'Nu': 925,
'OElig': 338,
'Oacute': 211,
'Ocirc': 212,
'Ograve': 210,
'Omega': 937,
'Omicron': 927,
'Oslash': 216,
'Otilde': 213,
'Ouml': 214,
'Phi': 934,
'Pi': 928,
'Prime': 8243,
'Psi': 936,
'Rho': 929,
'Scaron': 352,
'Sigma': 931,
'THORN': 222,
'Tau': 932,
'Theta': 920,
'Uacute': 218,
'Ucirc': 219,
'Ugrave': 217,
'Upsilon': 933,
'Uuml': 220,
'Xi': 926,
'Yacute': 221,
'Yuml': 376,
'Zeta': 918,
'aacute': 225,
'acirc': 226,
'acute': 180,
'aelig': 230,
'agrave': 224,
'alefsym': 8501,
'alpha': 945,
'amp': 38,
'and': 8743,
'ang': 8736,
'apos': 39,
'aring': 229,
'asymp': 8776,
'atilde': 227,
'auml': 228,
'bdquo': 8222,
'beta': 946,
'brvbar': 166,
'bull': 8226,
'cap': 8745,
'ccedil': 231,
'cedil': 184,
'cent': 162,
'chi': 967,
'circ': 710,
'clubs': 9827,
'cong': 8773,
'copy': 169,
'crarr': 8629,
'cup': 8746,
'curren': 164,
'dArr': 8659,
'dagger': 8224,
'darr': 8595,
'deg': 176,
'delta': 948,
'diams': 9830,
'divide': 247,
'eacute': 233,
'ecirc': 234,
'egrave': 232,
'empty': 8709,
'emsp': 8195,
'ensp': 8194,
'epsilon': 949,
'equiv': 8801,
'eta': 951,
'eth': 240,
'euml': 235,
'euro': 8364,
'exist': 8707,
'fnof': 402,
'forall': 8704,
'frac12': 189,
'frac14': 188,
'frac34': 190,
'frasl': 8260,
'gamma': 947,
'ge': 8805,
'gt': 62,
'hArr': 8660,
'harr': 8596,
'hearts': 9829,
'hellip': 8230,
'iacute': 237,
'icirc': 238,
'iexcl': 161,
'igrave': 236,
'image': 8465,
'infin': 8734,
'int': 8747,
'iota': 953,
'iquest': 191,
'isin': 8712,
'iuml': 239,
'kappa': 954,
'lArr': 8656,
'lambda': 955,
'lang': 9001,
'laquo': 171,
'larr': 8592,
'lceil': 8968,
'ldquo': 8220,
'le': 8804,
'lfloor': 8970,
'lowast': 8727,
'loz': 9674,
'lrm': 8206,
'lsaquo': 8249,
'lsquo': 8216,
'lt': 60,
'macr': 175,
'mdash': 8212,
'micro': 181,
'middot': 183,
'minus': 8722,
'mu': 956,
'nabla': 8711,
'nbsp': 160,
'ndash': 8211,
'ne': 8800,
'ni': 8715,
'not': 172,
'notin': 8713,
'nsub': 8836,
'ntilde': 241,
'nu': 957,
'oacute': 243,
'ocirc': 244,
'oelig': 339,
'ograve': 242,
'oline': 8254,
'omega': 969,
'omicron': 959,
'oplus': 8853,
'or': 8744,
'ordf': 170,
'ordm': 186,
'oslash': 248,
'otilde': 245,
'otimes': 8855,
'ouml': 246,
'para': 182,
'part': 8706,
'permil': 8240,
'perp': 8869,
'phi': 966,
'pi': 960,
'piv': 982,
'plusmn': 177,
'pound': 163,
'prime': 8242,
'prod': 8719,
'prop': 8733,
'psi': 968,
'quot': 34,
'rArr': 8658,
'radic': 8730,
'rang': 9002,
'raquo': 187,
'rarr': 8594,
'rceil': 8969,
'rdquo': 8221,
'real': 8476,
'reg': 174,
'rfloor': 8971,
'rho': 961,
'rlm': 8207,
'rsaquo': 8250,
'rsquo': 8217,
'sbquo': 8218,
'scaron': 353,
'sdot': 8901,
'sect': 167,
'shy': 173,
'sigma': 963,
'sigmaf': 962,
'sim': 8764,
'spades': 9824,
'sub': 8834,
'sube': 8838,
'sum': 8721,
'sup': 8835,
'sup1': 185,
'sup2': 178,
'sup3': 179,
'supe': 8839,
'szlig': 223,
'tau': 964,
'there4': 8756,
'theta': 952,
'thetasym': 977,
'thinsp': 8201,
'thorn': 254,
'tilde': 732,
'times': 215,
'trade': 8482,
'uArr': 8657,
'uacute': 250,
'uarr': 8593,
'ucirc': 251,
'ugrave': 249,
'uml': 168,
'upsih': 978,
'upsilon': 965,
'uuml': 252,
'weierp': 8472,
'xi': 958,
'yacute': 253,
'yen': 165,
'yuml': 255,
'zeta': 950,
'zwj': 8205,
'zwnj': 8204
}
|
RDCEP/EDE
|
refs/heads/master
|
ede/crawler/crawler/items.py
|
5
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class CrawlerItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
JamesJeffryes/MINE-Database
|
refs/heads/master
|
Scripts/generate_metanetx_database.py
|
1
|
"""A script to generate a metanetx database.
The purpose of the metanetx database is to provide mapping
from InChI keys to a number of database identifiers. This database
will then populate the website if there is an inchi match.
Running this script requires downloading the following from
https://www.metanetx.org/mnxdoc/mnxref.html
1. chem_xref.tsv
2. chem_prop.tsv
The data version used is based on the description in the header:
#Based on the following resources:
#
#RESOURCE: MetaNetX/MNXref
#VERSION: 4.1
#DATE: 2020/09/17
#URL: https://www.metanetx.org
"""
from pathlib import Path
import pandas as pd
from collections import defaultdict
import pymongo
pwd = Path(__file__)
pwd = pwd.parent
METANETX_PATH = (pwd / "../local_data/metanetx").resolve()
def get_cross_references(row):
current_reference = {}
if ":" in row["#source"]:
current_reference["source"] = row["#source"].split(":")[0]
current_reference["source_id"] = row["#source"].split(":")[1]
else:
current_reference["source"] = row["#source"]
current_reference["source_id"] = row["#source"]
current_reference["description"] = (
row["description"] if not pd.isna(row["description"])
else None
)
cross_ref_dict[row.ID].append(current_reference)
def get_db_entry(row):
dict_for_db[row["#ID"]] = {
"mnxm_id": row["#ID"],
"inchi_key": row.InChIKey,
"primary_reference": row.reference,
"cross_references": cross_ref_dict[row["#ID"]]
}
if __name__ == "__main__":
# First step: Generate panda dfs of the xref and props
skiprows = 347
chem_prop_df = pd.read_csv(
METANETX_PATH / "chem_prop.tsv",
delimiter="\t",
skiprows=skiprows
)
chem_prop_df = chem_prop_df[~chem_prop_df["InChIKey"].isna()]
chem_prop_df = chem_prop_df[~chem_prop_df["formula"].isna()]
chem_xref_df = pd.read_csv(
METANETX_PATH / "chem_xref.tsv",
delimiter="\t",
skiprows=skiprows
)
# Map functions on pandas dataframes to populate dictionaries
cross_ref_dict = defaultdict(list)
dict_for_db = dict()
chem_xref_df.apply(get_cross_references, axis=1)
chem_prop_df.apply(get_db_entry, axis=1)
print("Inserting into Mongo.")
mongo_uri = open(pwd / "../mongo_uri.csv").readline().strip("\n")
client = pymongo.MongoClient(mongo_uri)
client.compound_references.data.insert_many(dict_for_db.values(), ordered=False)
|
xiangel/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/conf/locale/sr_Latn/formats.py
|
235
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
andrewmoses/ssquiz
|
refs/heads/master
|
flask/lib/python2.7/locale.py
|
13
|
""" Locale support.
The module provides low-level access to the C lib's locale APIs
and adds high level number formatting APIs as well as a locale
aliasing engine to complement these.
The aliasing engine includes support for many commonly used locale
names and maps them to values suitable for passing to the C lib's
setlocale() function. It also includes default encodings for all
supported locale names.
"""
import sys
import encodings
import encodings.aliases
import re
import operator
import functools
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one.
class _unicode(object):
pass
# Try importing the _locale module.
#
# If this fails, fall back on a basic 'C' locale emulation.
# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before
# trying the import. So __all__ is also fiddled at the end of the file.
__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error",
"setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm",
"str", "atof", "atoi", "format", "format_string", "currency",
"normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY",
"LC_NUMERIC", "LC_ALL", "CHAR_MAX"]
try:
from _locale import *
except ImportError:
# Locale emulation
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error, '_locale emulation only supports "C" locale'
return 'C'
def strcoll(a,b):
""" strcoll(string,string) -> int.
Compares two strings according to the locale.
"""
return cmp(a,b)
def strxfrm(s):
""" strxfrm(string) -> string.
Returns a string that behaves for cmp locale-aware.
"""
return s
_localeconv = localeconv
# With this dict, you can override some items of localeconv's return value.
# This is useful for testing purposes.
_override_localeconv = {}
@functools.wraps(_localeconv)
def localeconv():
d = _localeconv()
if _override_localeconv:
d.update(_override_localeconv)
return d
### Number formatting APIs
# Author: Martin von Loewis
# improved by Georg Brandl
# Iterate over grouping intervals
def _grouping_intervals(grouping):
last_interval = None
for interval in grouping:
# if grouping is -1, we are done
if interval == CHAR_MAX:
return
# 0: re-use last group ad infinitum
if interval == 0:
if last_interval is None:
raise ValueError("invalid grouping")
while True:
yield last_interval
yield interval
last_interval = interval
#perform the grouping from right to left
def _group(s, monetary=False):
conv = localeconv()
thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
grouping = conv[monetary and 'mon_grouping' or 'grouping']
if not grouping:
return (s, 0)
if s[-1] == ' ':
stripped = s.rstrip()
right_spaces = s[len(stripped):]
s = stripped
else:
right_spaces = ''
left_spaces = ''
groups = []
for interval in _grouping_intervals(grouping):
if not s or s[-1] not in "0123456789":
# only non-digit characters remain (sign, spaces)
left_spaces = s
s = ''
break
groups.append(s[-interval:])
s = s[:-interval]
if s:
groups.append(s)
groups.reverse()
return (
left_spaces + thousands_sep.join(groups) + right_spaces,
len(thousands_sep) * (len(groups) - 1)
)
# Strip a given amount of excess padding from the given string
def _strip_padding(s, amount):
lpos = 0
while amount and s[lpos] == ' ':
lpos += 1
amount -= 1
rpos = len(s) - 1
while amount and s[rpos] == ' ':
rpos -= 1
amount -= 1
return s[lpos:rpos+1]
_percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
def format(percent, value, grouping=False, monetary=False, *additional):
"""Returns the locale-aware substitution of a %? specifier
(percent).
additional is for format strings which contain one or more
'*' modifiers."""
# this is only for one-percent-specifier strings and this should be checked
match = _percent_re.match(percent)
if not match or len(match.group())!= len(percent):
raise ValueError(("format() must be given exactly one %%char "
"format specifier, %s not valid") % repr(percent))
return _format(percent, value, grouping, monetary, *additional)
def _format(percent, value, grouping=False, monetary=False, *additional):
if additional:
formatted = percent % ((value,) + additional)
else:
formatted = percent % value
# floats and decimal ints need special action!
if percent[-1] in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = _group(parts[0], monetary=monetary)
decimal_point = localeconv()[monetary and 'mon_decimal_point'
or 'decimal_point']
formatted = decimal_point.join(parts)
if seps:
formatted = _strip_padding(formatted, seps)
elif percent[-1] in 'diu':
seps = 0
if grouping:
formatted, seps = _group(formatted, monetary=monetary)
if seps:
formatted = _strip_padding(formatted, seps)
return formatted
def format_string(f, val, grouping=False):
"""Formats a string in the same way that the % formatting would use,
but takes the current locale into account.
Grouping is applied if the third parameter is true."""
percents = list(_percent_re.finditer(f))
new_f = _percent_re.sub('%s', f)
if operator.isMappingType(val):
new_val = []
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
new_val.append(format(perc.group(), val, grouping))
else:
if not isinstance(val, tuple):
val = (val,)
new_val = []
i = 0
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
starcount = perc.group('modifiers').count('*')
new_val.append(_format(perc.group(),
val[i],
grouping,
False,
*val[i+1:i+1+starcount]))
i += (1 + starcount)
val = tuple(new_val)
return new_f % val
def currency(val, symbol=True, grouping=False, international=False):
"""Formats val according to the currency settings
in the current locale."""
conv = localeconv()
# check for illegal values
digits = conv[international and 'int_frac_digits' or 'frac_digits']
if digits == 127:
raise ValueError("Currency formatting is not possible using "
"the 'C' locale.")
s = format('%%.%if' % digits, abs(val), grouping, monetary=True)
# '<' and '>' are markers if the sign must be inserted between symbol and value
s = '<' + s + '>'
if symbol:
smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
if precedes:
s = smb + (separated and ' ' or '') + s
else:
s = s + (separated and ' ' or '') + smb
sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
sign = conv[val<0 and 'negative_sign' or 'positive_sign']
if sign_pos == 0:
s = '(' + s + ')'
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace('<', sign)
elif sign_pos == 4:
s = s.replace('>', sign)
else:
# the default if nothing specified;
# this should be the most fitting sign position
s = sign + s
return s.replace('<', '').replace('>', '')
def str(val):
"""Convert float to integer, taking the locale into account."""
return format("%.12g", val)
def atof(string, func=float):
"Parses a string as a float according to the locale settings."
#First, get rid of the grouping
ts = localeconv()['thousands_sep']
if ts:
string = string.replace(ts, '')
#next, replace the decimal point with a dot
dd = localeconv()['decimal_point']
if dd:
string = string.replace(dd, '.')
#finally, parse the string
return func(string)
def atoi(str):
"Converts a string to an integer according to the locale settings."
return atof(str, int)
def _test():
setlocale(LC_ALL, "")
#do grouping
s1 = format("%d", 123456789,1)
print s1, "is", atoi(s1)
#standard formatting
s1 = str(3.14)
print s1, "is", atof(s1)
### Locale name aliasing engine
# Author: Marc-Andre Lemburg, mal@lemburg.com
# Various tweaks by Fredrik Lundh <fredrik@pythonware.com>
# store away the low-level version of setlocale (it's
# overridden below)
_setlocale = setlocale
# Avoid relying on the locale-dependent .lower() method
# (see issue #1813).
_ascii_lower_map = ''.join(
chr(x + 32 if x >= ord('A') and x <= ord('Z') else x)
for x in range(256)
)
def _replace_encoding(code, encoding):
if '.' in code:
langname = code[:code.index('.')]
else:
langname = code
# Convert the encoding to a C lib compatible encoding string
norm_encoding = encodings.normalize_encoding(encoding)
#print('norm encoding: %r' % norm_encoding)
norm_encoding = encodings.aliases.aliases.get(norm_encoding,
norm_encoding)
#print('aliased encoding: %r' % norm_encoding)
encoding = locale_encoding_alias.get(norm_encoding,
norm_encoding)
#print('found encoding %r' % encoding)
return langname + '.' + encoding
def normalize(localename):
""" Returns a normalized locale code for the given locale
name.
The returned locale code is formatted for use with
setlocale().
If normalization fails, the original name is returned
unchanged.
If the given encoding is not known, the function defaults to
the default encoding for the locale code just like setlocale()
does.
"""
# Normalize the locale name and extract the encoding and modifier
if isinstance(localename, _unicode):
localename = localename.encode('ascii')
code = localename.translate(_ascii_lower_map)
if ':' in code:
# ':' is sometimes used as encoding delimiter.
code = code.replace(':', '.')
if '@' in code:
code, modifier = code.split('@', 1)
else:
modifier = ''
if '.' in code:
langname, encoding = code.split('.')[:2]
else:
langname = code
encoding = ''
# First lookup: fullname (possibly with encoding and modifier)
lang_enc = langname
if encoding:
norm_encoding = encoding.replace('-', '')
norm_encoding = norm_encoding.replace('_', '')
lang_enc += '.' + norm_encoding
lookup_name = lang_enc
if modifier:
lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
return code
#print('first lookup failed')
if modifier:
# Second try: fullname without modifier (possibly with encoding)
code = locale_alias.get(lang_enc, None)
if code is not None:
#print('lookup without modifier succeeded')
if '@' not in code:
return code + '@' + modifier
if code.split('@', 1)[1].translate(_ascii_lower_map) == modifier:
return code
#print('second lookup failed')
if encoding:
# Third try: langname (without encoding, possibly with modifier)
lookup_name = langname
if modifier:
lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
#print('lookup without encoding succeeded')
if '@' not in code:
return _replace_encoding(code, encoding)
code, modifier = code.split('@', 1)
return _replace_encoding(code, encoding) + '@' + modifier
if modifier:
# Fourth try: langname (without encoding and modifier)
code = locale_alias.get(langname, None)
if code is not None:
#print('lookup without modifier and encoding succeeded')
if '@' not in code:
return _replace_encoding(code, encoding) + '@' + modifier
code, defmod = code.split('@', 1)
if defmod.translate(_ascii_lower_map) == modifier:
return _replace_encoding(code, encoding) + '@' + defmod
return localename
def _parse_localename(localename):
""" Parses the locale code for localename and returns the
result as tuple (language code, encoding).
The localename is normalized and passed through the locale
alias engine. A ValueError is raised in case the locale name
cannot be parsed.
The language code corresponds to RFC 1766. code and encoding
can be None in case the values cannot be determined or are
unknown to this implementation.
"""
code = normalize(localename)
if '@' in code:
# Deal with locale modifiers
code, modifier = code.split('@', 1)
if modifier == 'euro' and '.' not in code:
# Assume Latin-9 for @euro locales. This is bogus,
# since some systems may use other encodings for these
# locales. Also, we ignore other modifiers.
return code, 'iso-8859-15'
if '.' in code:
return tuple(code.split('.')[:2])
elif code == 'C':
return None, None
raise ValueError, 'unknown locale: %s' % localename
def _build_localename(localetuple):
""" Builds a locale code from the given tuple (language code,
encoding).
No aliasing or normalizing takes place.
"""
language, encoding = localetuple
if language is None:
language = 'C'
if encoding is None:
return language
else:
return language + '.' + encoding
def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):
""" Tries to determine the default locale settings and returns
them as tuple (language code, encoding).
According to POSIX, a program which has not called
setlocale(LC_ALL, "") runs using the portable 'C' locale.
Calling setlocale(LC_ALL, "") lets it use the default locale as
defined by the LANG variable. Since we don't want to interfere
with the current locale setting we thus emulate the behavior
in the way described above.
To maintain compatibility with other platforms, not only the
LANG variable is tested, but a list of variables given as
envvars parameter. The first found to be defined will be
used. envvars defaults to the search path used in GNU gettext;
it must always contain the variable name 'LANG'.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
try:
# check if it's supported by the _locale module
import _locale
code, encoding = _locale._getdefaultlocale()
except (ImportError, AttributeError):
pass
else:
# make sure the code/encoding values are valid
if sys.platform == "win32" and code and code[:2] == "0x":
# map windows language identifier to language name
code = windows_locale.get(int(code, 0))
# ...add other platform-specific processing here, if
# necessary...
return code, encoding
# fall back on POSIX behaviour
import os
lookup = os.environ.get
for variable in envvars:
localename = lookup(variable,None)
if localename:
if variable == 'LANGUAGE':
localename = localename.split(':')[0]
break
else:
localename = 'C'
return _parse_localename(localename)
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
localename = _setlocale(category)
if category == LC_ALL and ';' in localename:
raise TypeError, 'category LC_ALL is not supported'
return _parse_localename(localename)
def setlocale(category, locale=None):
""" Set the locale for the given category. The locale can be
a string, an iterable of two strings (language code and encoding),
or None.
Iterables are converted to strings using the locale aliasing
engine. Locale strings are passed directly to the C lib.
category may be given as one of the LC_* values.
"""
if locale and type(locale) is not type(""):
# convert to string
locale = normalize(_build_localename(locale))
return _setlocale(category, locale)
def resetlocale(category=LC_ALL):
""" Sets the locale for category to the default setting.
The default setting is determined by calling
getdefaultlocale(). category defaults to LC_ALL.
"""
_setlocale(category, _build_localename(getdefaultlocale()))
if sys.platform.startswith("win"):
# On Win32, this will return the ANSI code page
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using."""
import _locale
return _locale._getdefaultlocale()[1]
else:
# On Unix, if CODESET is available, use that.
try:
CODESET
except NameError:
# Fall back to parsing environment variables :-(
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
by looking at environment variables."""
return getdefaultlocale()[1]
else:
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
according to the system configuration."""
if do_setlocale:
oldloc = setlocale(LC_CTYPE)
try:
setlocale(LC_CTYPE, "")
except Error:
pass
result = nl_langinfo(CODESET)
setlocale(LC_CTYPE, oldloc)
return result
else:
return nl_langinfo(CODESET)
### Database
#
# The following data was extracted from the locale.alias file which
# comes with X11 and then hand edited removing the explicit encoding
# definitions and adding some more aliases. The file is usually
# available as /usr/lib/X11/locale/locale.alias.
#
#
# The local_encoding_alias table maps lowercase encoding alias names
# to C locale encoding names (case-sensitive). Note that normalize()
# first looks up the encoding in the encodings.aliases dictionary and
# then applies this mapping to find the correct C lib name for the
# encoding.
#
locale_encoding_alias = {
# Mappings for non-standard encoding names used in locale names
'437': 'C',
'c': 'C',
'en': 'ISO8859-1',
'jis': 'JIS7',
'jis7': 'JIS7',
'ajec': 'eucJP',
# Mappings from Python codec names to C lib encoding names
'ascii': 'ISO8859-1',
'latin_1': 'ISO8859-1',
'iso8859_1': 'ISO8859-1',
'iso8859_10': 'ISO8859-10',
'iso8859_11': 'ISO8859-11',
'iso8859_13': 'ISO8859-13',
'iso8859_14': 'ISO8859-14',
'iso8859_15': 'ISO8859-15',
'iso8859_16': 'ISO8859-16',
'iso8859_2': 'ISO8859-2',
'iso8859_3': 'ISO8859-3',
'iso8859_4': 'ISO8859-4',
'iso8859_5': 'ISO8859-5',
'iso8859_6': 'ISO8859-6',
'iso8859_7': 'ISO8859-7',
'iso8859_8': 'ISO8859-8',
'iso8859_9': 'ISO8859-9',
'iso2022_jp': 'JIS7',
'shift_jis': 'SJIS',
'tactis': 'TACTIS',
'euc_jp': 'eucJP',
'euc_kr': 'eucKR',
'utf_8': 'UTF-8',
'koi8_r': 'KOI8-R',
'koi8_u': 'KOI8-U',
# XXX This list is still incomplete. If you know more
# mappings, please file a bug report. Thanks.
}
#
# The locale_alias table maps lowercase alias names to C locale names
# (case-sensitive). Encodings are always separated from the locale
# name using a dot ('.'); they should only be given in case the
# language name is needed to interpret the given encoding alias
# correctly (CJK codes often have this need).
#
# Note that the normalize() function which uses this tables
# removes '_' and '-' characters from the encoding part of the
# locale name before doing the lookup. This saves a lot of
# space in the table.
#
# MAL 2004-12-10:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.4
# and older):
#
# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1'
# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP'
# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13'
# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13'
# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11'
# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312'
# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5'
# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5'
#
# MAL 2008-05-30:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.5
# and older):
#
# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2'
# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2'
# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
#
# AP 2010-04-12:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.6.5
# and older):
#
# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin'
# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
#
# SS 2013-12-20:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.7.6
# and older):
#
# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'sd' -> 'sd_IN@devanagari.UTF-8' to 'sd_IN.UTF-8'
# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
locale_alias = {
'a3': 'az_AZ.KOI8-C',
'a3_az': 'az_AZ.KOI8-C',
'a3_az.koi8c': 'az_AZ.KOI8-C',
'a3_az.koic': 'az_AZ.KOI8-C',
'af': 'af_ZA.ISO8859-1',
'af_za': 'af_ZA.ISO8859-1',
'af_za.iso88591': 'af_ZA.ISO8859-1',
'am': 'am_ET.UTF-8',
'am_et': 'am_ET.UTF-8',
'american': 'en_US.ISO8859-1',
'american.iso88591': 'en_US.ISO8859-1',
'ar': 'ar_AA.ISO8859-6',
'ar_aa': 'ar_AA.ISO8859-6',
'ar_aa.iso88596': 'ar_AA.ISO8859-6',
'ar_ae': 'ar_AE.ISO8859-6',
'ar_ae.iso88596': 'ar_AE.ISO8859-6',
'ar_bh': 'ar_BH.ISO8859-6',
'ar_bh.iso88596': 'ar_BH.ISO8859-6',
'ar_dz': 'ar_DZ.ISO8859-6',
'ar_dz.iso88596': 'ar_DZ.ISO8859-6',
'ar_eg': 'ar_EG.ISO8859-6',
'ar_eg.iso88596': 'ar_EG.ISO8859-6',
'ar_in': 'ar_IN.UTF-8',
'ar_iq': 'ar_IQ.ISO8859-6',
'ar_iq.iso88596': 'ar_IQ.ISO8859-6',
'ar_jo': 'ar_JO.ISO8859-6',
'ar_jo.iso88596': 'ar_JO.ISO8859-6',
'ar_kw': 'ar_KW.ISO8859-6',
'ar_kw.iso88596': 'ar_KW.ISO8859-6',
'ar_lb': 'ar_LB.ISO8859-6',
'ar_lb.iso88596': 'ar_LB.ISO8859-6',
'ar_ly': 'ar_LY.ISO8859-6',
'ar_ly.iso88596': 'ar_LY.ISO8859-6',
'ar_ma': 'ar_MA.ISO8859-6',
'ar_ma.iso88596': 'ar_MA.ISO8859-6',
'ar_om': 'ar_OM.ISO8859-6',
'ar_om.iso88596': 'ar_OM.ISO8859-6',
'ar_qa': 'ar_QA.ISO8859-6',
'ar_qa.iso88596': 'ar_QA.ISO8859-6',
'ar_sa': 'ar_SA.ISO8859-6',
'ar_sa.iso88596': 'ar_SA.ISO8859-6',
'ar_sd': 'ar_SD.ISO8859-6',
'ar_sd.iso88596': 'ar_SD.ISO8859-6',
'ar_sy': 'ar_SY.ISO8859-6',
'ar_sy.iso88596': 'ar_SY.ISO8859-6',
'ar_tn': 'ar_TN.ISO8859-6',
'ar_tn.iso88596': 'ar_TN.ISO8859-6',
'ar_ye': 'ar_YE.ISO8859-6',
'ar_ye.iso88596': 'ar_YE.ISO8859-6',
'arabic': 'ar_AA.ISO8859-6',
'arabic.iso88596': 'ar_AA.ISO8859-6',
'as': 'as_IN.UTF-8',
'as_in': 'as_IN.UTF-8',
'az': 'az_AZ.ISO8859-9E',
'az_az': 'az_AZ.ISO8859-9E',
'az_az.iso88599e': 'az_AZ.ISO8859-9E',
'be': 'be_BY.CP1251',
'be@latin': 'be_BY.UTF-8@latin',
'be_by': 'be_BY.CP1251',
'be_by.cp1251': 'be_BY.CP1251',
'be_by.microsoftcp1251': 'be_BY.CP1251',
'be_by.utf8@latin': 'be_BY.UTF-8@latin',
'be_by@latin': 'be_BY.UTF-8@latin',
'bg': 'bg_BG.CP1251',
'bg_bg': 'bg_BG.CP1251',
'bg_bg.cp1251': 'bg_BG.CP1251',
'bg_bg.iso88595': 'bg_BG.ISO8859-5',
'bg_bg.koi8r': 'bg_BG.KOI8-R',
'bg_bg.microsoftcp1251': 'bg_BG.CP1251',
'bn_in': 'bn_IN.UTF-8',
'bo_in': 'bo_IN.UTF-8',
'bokmal': 'nb_NO.ISO8859-1',
'bokm\xe5l': 'nb_NO.ISO8859-1',
'br': 'br_FR.ISO8859-1',
'br_fr': 'br_FR.ISO8859-1',
'br_fr.iso88591': 'br_FR.ISO8859-1',
'br_fr.iso885914': 'br_FR.ISO8859-14',
'br_fr.iso885915': 'br_FR.ISO8859-15',
'br_fr.iso885915@euro': 'br_FR.ISO8859-15',
'br_fr.utf8@euro': 'br_FR.UTF-8',
'br_fr@euro': 'br_FR.ISO8859-15',
'bs': 'bs_BA.ISO8859-2',
'bs_ba': 'bs_BA.ISO8859-2',
'bs_ba.iso88592': 'bs_BA.ISO8859-2',
'bulgarian': 'bg_BG.CP1251',
'c': 'C',
'c-french': 'fr_CA.ISO8859-1',
'c-french.iso88591': 'fr_CA.ISO8859-1',
'c.ascii': 'C',
'c.en': 'C',
'c.iso88591': 'en_US.ISO8859-1',
'c_c': 'C',
'c_c.c': 'C',
'ca': 'ca_ES.ISO8859-1',
'ca_ad': 'ca_AD.ISO8859-1',
'ca_ad.iso88591': 'ca_AD.ISO8859-1',
'ca_ad.iso885915': 'ca_AD.ISO8859-15',
'ca_ad.iso885915@euro': 'ca_AD.ISO8859-15',
'ca_ad.utf8@euro': 'ca_AD.UTF-8',
'ca_ad@euro': 'ca_AD.ISO8859-15',
'ca_es': 'ca_ES.ISO8859-1',
'ca_es.iso88591': 'ca_ES.ISO8859-1',
'ca_es.iso885915': 'ca_ES.ISO8859-15',
'ca_es.iso885915@euro': 'ca_ES.ISO8859-15',
'ca_es.utf8@euro': 'ca_ES.UTF-8',
'ca_es@euro': 'ca_ES.ISO8859-15',
'ca_fr': 'ca_FR.ISO8859-1',
'ca_fr.iso88591': 'ca_FR.ISO8859-1',
'ca_fr.iso885915': 'ca_FR.ISO8859-15',
'ca_fr.iso885915@euro': 'ca_FR.ISO8859-15',
'ca_fr.utf8@euro': 'ca_FR.UTF-8',
'ca_fr@euro': 'ca_FR.ISO8859-15',
'ca_it': 'ca_IT.ISO8859-1',
'ca_it.iso88591': 'ca_IT.ISO8859-1',
'ca_it.iso885915': 'ca_IT.ISO8859-15',
'ca_it.iso885915@euro': 'ca_IT.ISO8859-15',
'ca_it.utf8@euro': 'ca_IT.UTF-8',
'ca_it@euro': 'ca_IT.ISO8859-15',
'catalan': 'ca_ES.ISO8859-1',
'cextend': 'en_US.ISO8859-1',
'cextend.en': 'en_US.ISO8859-1',
'chinese-s': 'zh_CN.eucCN',
'chinese-t': 'zh_TW.eucTW',
'croatian': 'hr_HR.ISO8859-2',
'cs': 'cs_CZ.ISO8859-2',
'cs_cs': 'cs_CZ.ISO8859-2',
'cs_cs.iso88592': 'cs_CZ.ISO8859-2',
'cs_cz': 'cs_CZ.ISO8859-2',
'cs_cz.iso88592': 'cs_CZ.ISO8859-2',
'cy': 'cy_GB.ISO8859-1',
'cy_gb': 'cy_GB.ISO8859-1',
'cy_gb.iso88591': 'cy_GB.ISO8859-1',
'cy_gb.iso885914': 'cy_GB.ISO8859-14',
'cy_gb.iso885915': 'cy_GB.ISO8859-15',
'cy_gb@euro': 'cy_GB.ISO8859-15',
'cz': 'cs_CZ.ISO8859-2',
'cz_cz': 'cs_CZ.ISO8859-2',
'czech': 'cs_CZ.ISO8859-2',
'da': 'da_DK.ISO8859-1',
'da.iso885915': 'da_DK.ISO8859-15',
'da_dk': 'da_DK.ISO8859-1',
'da_dk.88591': 'da_DK.ISO8859-1',
'da_dk.885915': 'da_DK.ISO8859-15',
'da_dk.iso88591': 'da_DK.ISO8859-1',
'da_dk.iso885915': 'da_DK.ISO8859-15',
'da_dk@euro': 'da_DK.ISO8859-15',
'danish': 'da_DK.ISO8859-1',
'danish.iso88591': 'da_DK.ISO8859-1',
'dansk': 'da_DK.ISO8859-1',
'de': 'de_DE.ISO8859-1',
'de.iso885915': 'de_DE.ISO8859-15',
'de_at': 'de_AT.ISO8859-1',
'de_at.iso88591': 'de_AT.ISO8859-1',
'de_at.iso885915': 'de_AT.ISO8859-15',
'de_at.iso885915@euro': 'de_AT.ISO8859-15',
'de_at.utf8@euro': 'de_AT.UTF-8',
'de_at@euro': 'de_AT.ISO8859-15',
'de_be': 'de_BE.ISO8859-1',
'de_be.iso88591': 'de_BE.ISO8859-1',
'de_be.iso885915': 'de_BE.ISO8859-15',
'de_be.iso885915@euro': 'de_BE.ISO8859-15',
'de_be.utf8@euro': 'de_BE.UTF-8',
'de_be@euro': 'de_BE.ISO8859-15',
'de_ch': 'de_CH.ISO8859-1',
'de_ch.iso88591': 'de_CH.ISO8859-1',
'de_ch.iso885915': 'de_CH.ISO8859-15',
'de_ch@euro': 'de_CH.ISO8859-15',
'de_de': 'de_DE.ISO8859-1',
'de_de.88591': 'de_DE.ISO8859-1',
'de_de.885915': 'de_DE.ISO8859-15',
'de_de.885915@euro': 'de_DE.ISO8859-15',
'de_de.iso88591': 'de_DE.ISO8859-1',
'de_de.iso885915': 'de_DE.ISO8859-15',
'de_de.iso885915@euro': 'de_DE.ISO8859-15',
'de_de.utf8@euro': 'de_DE.UTF-8',
'de_de@euro': 'de_DE.ISO8859-15',
'de_lu': 'de_LU.ISO8859-1',
'de_lu.iso88591': 'de_LU.ISO8859-1',
'de_lu.iso885915': 'de_LU.ISO8859-15',
'de_lu.iso885915@euro': 'de_LU.ISO8859-15',
'de_lu.utf8@euro': 'de_LU.UTF-8',
'de_lu@euro': 'de_LU.ISO8859-15',
'deutsch': 'de_DE.ISO8859-1',
'dutch': 'nl_NL.ISO8859-1',
'dutch.iso88591': 'nl_BE.ISO8859-1',
'ee': 'ee_EE.ISO8859-4',
'ee_ee': 'ee_EE.ISO8859-4',
'ee_ee.iso88594': 'ee_EE.ISO8859-4',
'eesti': 'et_EE.ISO8859-1',
'el': 'el_GR.ISO8859-7',
'el_gr': 'el_GR.ISO8859-7',
'el_gr.iso88597': 'el_GR.ISO8859-7',
'el_gr@euro': 'el_GR.ISO8859-15',
'en': 'en_US.ISO8859-1',
'en.iso88591': 'en_US.ISO8859-1',
'en_au': 'en_AU.ISO8859-1',
'en_au.iso88591': 'en_AU.ISO8859-1',
'en_be': 'en_BE.ISO8859-1',
'en_be@euro': 'en_BE.ISO8859-15',
'en_bw': 'en_BW.ISO8859-1',
'en_bw.iso88591': 'en_BW.ISO8859-1',
'en_ca': 'en_CA.ISO8859-1',
'en_ca.iso88591': 'en_CA.ISO8859-1',
'en_gb': 'en_GB.ISO8859-1',
'en_gb.88591': 'en_GB.ISO8859-1',
'en_gb.iso88591': 'en_GB.ISO8859-1',
'en_gb.iso885915': 'en_GB.ISO8859-15',
'en_gb@euro': 'en_GB.ISO8859-15',
'en_hk': 'en_HK.ISO8859-1',
'en_hk.iso88591': 'en_HK.ISO8859-1',
'en_ie': 'en_IE.ISO8859-1',
'en_ie.iso88591': 'en_IE.ISO8859-1',
'en_ie.iso885915': 'en_IE.ISO8859-15',
'en_ie.iso885915@euro': 'en_IE.ISO8859-15',
'en_ie.utf8@euro': 'en_IE.UTF-8',
'en_ie@euro': 'en_IE.ISO8859-15',
'en_in': 'en_IN.ISO8859-1',
'en_nz': 'en_NZ.ISO8859-1',
'en_nz.iso88591': 'en_NZ.ISO8859-1',
'en_ph': 'en_PH.ISO8859-1',
'en_ph.iso88591': 'en_PH.ISO8859-1',
'en_sg': 'en_SG.ISO8859-1',
'en_sg.iso88591': 'en_SG.ISO8859-1',
'en_uk': 'en_GB.ISO8859-1',
'en_us': 'en_US.ISO8859-1',
'en_us.88591': 'en_US.ISO8859-1',
'en_us.885915': 'en_US.ISO8859-15',
'en_us.iso88591': 'en_US.ISO8859-1',
'en_us.iso885915': 'en_US.ISO8859-15',
'en_us.iso885915@euro': 'en_US.ISO8859-15',
'en_us@euro': 'en_US.ISO8859-15',
'en_us@euro@euro': 'en_US.ISO8859-15',
'en_za': 'en_ZA.ISO8859-1',
'en_za.88591': 'en_ZA.ISO8859-1',
'en_za.iso88591': 'en_ZA.ISO8859-1',
'en_za.iso885915': 'en_ZA.ISO8859-15',
'en_za@euro': 'en_ZA.ISO8859-15',
'en_zw': 'en_ZW.ISO8859-1',
'en_zw.iso88591': 'en_ZW.ISO8859-1',
'eng_gb': 'en_GB.ISO8859-1',
'eng_gb.8859': 'en_GB.ISO8859-1',
'english': 'en_EN.ISO8859-1',
'english.iso88591': 'en_EN.ISO8859-1',
'english_uk': 'en_GB.ISO8859-1',
'english_uk.8859': 'en_GB.ISO8859-1',
'english_united-states': 'en_US.ISO8859-1',
'english_united-states.437': 'C',
'english_us': 'en_US.ISO8859-1',
'english_us.8859': 'en_US.ISO8859-1',
'english_us.ascii': 'en_US.ISO8859-1',
'eo': 'eo_XX.ISO8859-3',
'eo_eo': 'eo_EO.ISO8859-3',
'eo_eo.iso88593': 'eo_EO.ISO8859-3',
'eo_xx': 'eo_XX.ISO8859-3',
'eo_xx.iso88593': 'eo_XX.ISO8859-3',
'es': 'es_ES.ISO8859-1',
'es_ar': 'es_AR.ISO8859-1',
'es_ar.iso88591': 'es_AR.ISO8859-1',
'es_bo': 'es_BO.ISO8859-1',
'es_bo.iso88591': 'es_BO.ISO8859-1',
'es_cl': 'es_CL.ISO8859-1',
'es_cl.iso88591': 'es_CL.ISO8859-1',
'es_co': 'es_CO.ISO8859-1',
'es_co.iso88591': 'es_CO.ISO8859-1',
'es_cr': 'es_CR.ISO8859-1',
'es_cr.iso88591': 'es_CR.ISO8859-1',
'es_do': 'es_DO.ISO8859-1',
'es_do.iso88591': 'es_DO.ISO8859-1',
'es_ec': 'es_EC.ISO8859-1',
'es_ec.iso88591': 'es_EC.ISO8859-1',
'es_es': 'es_ES.ISO8859-1',
'es_es.88591': 'es_ES.ISO8859-1',
'es_es.iso88591': 'es_ES.ISO8859-1',
'es_es.iso885915': 'es_ES.ISO8859-15',
'es_es.iso885915@euro': 'es_ES.ISO8859-15',
'es_es.utf8@euro': 'es_ES.UTF-8',
'es_es@euro': 'es_ES.ISO8859-15',
'es_gt': 'es_GT.ISO8859-1',
'es_gt.iso88591': 'es_GT.ISO8859-1',
'es_hn': 'es_HN.ISO8859-1',
'es_hn.iso88591': 'es_HN.ISO8859-1',
'es_mx': 'es_MX.ISO8859-1',
'es_mx.iso88591': 'es_MX.ISO8859-1',
'es_ni': 'es_NI.ISO8859-1',
'es_ni.iso88591': 'es_NI.ISO8859-1',
'es_pa': 'es_PA.ISO8859-1',
'es_pa.iso88591': 'es_PA.ISO8859-1',
'es_pa.iso885915': 'es_PA.ISO8859-15',
'es_pa@euro': 'es_PA.ISO8859-15',
'es_pe': 'es_PE.ISO8859-1',
'es_pe.iso88591': 'es_PE.ISO8859-1',
'es_pe.iso885915': 'es_PE.ISO8859-15',
'es_pe@euro': 'es_PE.ISO8859-15',
'es_pr': 'es_PR.ISO8859-1',
'es_pr.iso88591': 'es_PR.ISO8859-1',
'es_py': 'es_PY.ISO8859-1',
'es_py.iso88591': 'es_PY.ISO8859-1',
'es_py.iso885915': 'es_PY.ISO8859-15',
'es_py@euro': 'es_PY.ISO8859-15',
'es_sv': 'es_SV.ISO8859-1',
'es_sv.iso88591': 'es_SV.ISO8859-1',
'es_sv.iso885915': 'es_SV.ISO8859-15',
'es_sv@euro': 'es_SV.ISO8859-15',
'es_us': 'es_US.ISO8859-1',
'es_us.iso88591': 'es_US.ISO8859-1',
'es_uy': 'es_UY.ISO8859-1',
'es_uy.iso88591': 'es_UY.ISO8859-1',
'es_uy.iso885915': 'es_UY.ISO8859-15',
'es_uy@euro': 'es_UY.ISO8859-15',
'es_ve': 'es_VE.ISO8859-1',
'es_ve.iso88591': 'es_VE.ISO8859-1',
'es_ve.iso885915': 'es_VE.ISO8859-15',
'es_ve@euro': 'es_VE.ISO8859-15',
'estonian': 'et_EE.ISO8859-1',
'et': 'et_EE.ISO8859-15',
'et_ee': 'et_EE.ISO8859-15',
'et_ee.iso88591': 'et_EE.ISO8859-1',
'et_ee.iso885913': 'et_EE.ISO8859-13',
'et_ee.iso885915': 'et_EE.ISO8859-15',
'et_ee.iso88594': 'et_EE.ISO8859-4',
'et_ee@euro': 'et_EE.ISO8859-15',
'eu': 'eu_ES.ISO8859-1',
'eu_es': 'eu_ES.ISO8859-1',
'eu_es.iso88591': 'eu_ES.ISO8859-1',
'eu_es.iso885915': 'eu_ES.ISO8859-15',
'eu_es.iso885915@euro': 'eu_ES.ISO8859-15',
'eu_es.utf8@euro': 'eu_ES.UTF-8',
'eu_es@euro': 'eu_ES.ISO8859-15',
'fa': 'fa_IR.UTF-8',
'fa_ir': 'fa_IR.UTF-8',
'fa_ir.isiri3342': 'fa_IR.ISIRI-3342',
'fi': 'fi_FI.ISO8859-15',
'fi.iso885915': 'fi_FI.ISO8859-15',
'fi_fi': 'fi_FI.ISO8859-15',
'fi_fi.88591': 'fi_FI.ISO8859-1',
'fi_fi.iso88591': 'fi_FI.ISO8859-1',
'fi_fi.iso885915': 'fi_FI.ISO8859-15',
'fi_fi.iso885915@euro': 'fi_FI.ISO8859-15',
'fi_fi.utf8@euro': 'fi_FI.UTF-8',
'fi_fi@euro': 'fi_FI.ISO8859-15',
'finnish': 'fi_FI.ISO8859-1',
'finnish.iso88591': 'fi_FI.ISO8859-1',
'fo': 'fo_FO.ISO8859-1',
'fo_fo': 'fo_FO.ISO8859-1',
'fo_fo.iso88591': 'fo_FO.ISO8859-1',
'fo_fo.iso885915': 'fo_FO.ISO8859-15',
'fo_fo@euro': 'fo_FO.ISO8859-15',
'fr': 'fr_FR.ISO8859-1',
'fr.iso885915': 'fr_FR.ISO8859-15',
'fr_be': 'fr_BE.ISO8859-1',
'fr_be.88591': 'fr_BE.ISO8859-1',
'fr_be.iso88591': 'fr_BE.ISO8859-1',
'fr_be.iso885915': 'fr_BE.ISO8859-15',
'fr_be.iso885915@euro': 'fr_BE.ISO8859-15',
'fr_be.utf8@euro': 'fr_BE.UTF-8',
'fr_be@euro': 'fr_BE.ISO8859-15',
'fr_ca': 'fr_CA.ISO8859-1',
'fr_ca.88591': 'fr_CA.ISO8859-1',
'fr_ca.iso88591': 'fr_CA.ISO8859-1',
'fr_ca.iso885915': 'fr_CA.ISO8859-15',
'fr_ca@euro': 'fr_CA.ISO8859-15',
'fr_ch': 'fr_CH.ISO8859-1',
'fr_ch.88591': 'fr_CH.ISO8859-1',
'fr_ch.iso88591': 'fr_CH.ISO8859-1',
'fr_ch.iso885915': 'fr_CH.ISO8859-15',
'fr_ch@euro': 'fr_CH.ISO8859-15',
'fr_fr': 'fr_FR.ISO8859-1',
'fr_fr.88591': 'fr_FR.ISO8859-1',
'fr_fr.iso88591': 'fr_FR.ISO8859-1',
'fr_fr.iso885915': 'fr_FR.ISO8859-15',
'fr_fr.iso885915@euro': 'fr_FR.ISO8859-15',
'fr_fr.utf8@euro': 'fr_FR.UTF-8',
'fr_fr@euro': 'fr_FR.ISO8859-15',
'fr_lu': 'fr_LU.ISO8859-1',
'fr_lu.88591': 'fr_LU.ISO8859-1',
'fr_lu.iso88591': 'fr_LU.ISO8859-1',
'fr_lu.iso885915': 'fr_LU.ISO8859-15',
'fr_lu.iso885915@euro': 'fr_LU.ISO8859-15',
'fr_lu.utf8@euro': 'fr_LU.UTF-8',
'fr_lu@euro': 'fr_LU.ISO8859-15',
'fran\xe7ais': 'fr_FR.ISO8859-1',
'fre_fr': 'fr_FR.ISO8859-1',
'fre_fr.8859': 'fr_FR.ISO8859-1',
'french': 'fr_FR.ISO8859-1',
'french.iso88591': 'fr_CH.ISO8859-1',
'french_france': 'fr_FR.ISO8859-1',
'french_france.8859': 'fr_FR.ISO8859-1',
'ga': 'ga_IE.ISO8859-1',
'ga_ie': 'ga_IE.ISO8859-1',
'ga_ie.iso88591': 'ga_IE.ISO8859-1',
'ga_ie.iso885914': 'ga_IE.ISO8859-14',
'ga_ie.iso885915': 'ga_IE.ISO8859-15',
'ga_ie.iso885915@euro': 'ga_IE.ISO8859-15',
'ga_ie.utf8@euro': 'ga_IE.UTF-8',
'ga_ie@euro': 'ga_IE.ISO8859-15',
'galego': 'gl_ES.ISO8859-1',
'galician': 'gl_ES.ISO8859-1',
'gd': 'gd_GB.ISO8859-1',
'gd_gb': 'gd_GB.ISO8859-1',
'gd_gb.iso88591': 'gd_GB.ISO8859-1',
'gd_gb.iso885914': 'gd_GB.ISO8859-14',
'gd_gb.iso885915': 'gd_GB.ISO8859-15',
'gd_gb@euro': 'gd_GB.ISO8859-15',
'ger_de': 'de_DE.ISO8859-1',
'ger_de.8859': 'de_DE.ISO8859-1',
'german': 'de_DE.ISO8859-1',
'german.iso88591': 'de_CH.ISO8859-1',
'german_germany': 'de_DE.ISO8859-1',
'german_germany.8859': 'de_DE.ISO8859-1',
'gl': 'gl_ES.ISO8859-1',
'gl_es': 'gl_ES.ISO8859-1',
'gl_es.iso88591': 'gl_ES.ISO8859-1',
'gl_es.iso885915': 'gl_ES.ISO8859-15',
'gl_es.iso885915@euro': 'gl_ES.ISO8859-15',
'gl_es.utf8@euro': 'gl_ES.UTF-8',
'gl_es@euro': 'gl_ES.ISO8859-15',
'greek': 'el_GR.ISO8859-7',
'greek.iso88597': 'el_GR.ISO8859-7',
'gu_in': 'gu_IN.UTF-8',
'gv': 'gv_GB.ISO8859-1',
'gv_gb': 'gv_GB.ISO8859-1',
'gv_gb.iso88591': 'gv_GB.ISO8859-1',
'gv_gb.iso885914': 'gv_GB.ISO8859-14',
'gv_gb.iso885915': 'gv_GB.ISO8859-15',
'gv_gb@euro': 'gv_GB.ISO8859-15',
'he': 'he_IL.ISO8859-8',
'he_il': 'he_IL.ISO8859-8',
'he_il.cp1255': 'he_IL.CP1255',
'he_il.iso88598': 'he_IL.ISO8859-8',
'he_il.microsoftcp1255': 'he_IL.CP1255',
'hebrew': 'he_IL.ISO8859-8',
'hebrew.iso88598': 'he_IL.ISO8859-8',
'hi': 'hi_IN.ISCII-DEV',
'hi_in': 'hi_IN.ISCII-DEV',
'hi_in.isciidev': 'hi_IN.ISCII-DEV',
'hne': 'hne_IN.UTF-8',
'hne_in': 'hne_IN.UTF-8',
'hr': 'hr_HR.ISO8859-2',
'hr_hr': 'hr_HR.ISO8859-2',
'hr_hr.iso88592': 'hr_HR.ISO8859-2',
'hrvatski': 'hr_HR.ISO8859-2',
'hu': 'hu_HU.ISO8859-2',
'hu_hu': 'hu_HU.ISO8859-2',
'hu_hu.iso88592': 'hu_HU.ISO8859-2',
'hungarian': 'hu_HU.ISO8859-2',
'icelandic': 'is_IS.ISO8859-1',
'icelandic.iso88591': 'is_IS.ISO8859-1',
'id': 'id_ID.ISO8859-1',
'id_id': 'id_ID.ISO8859-1',
'in': 'id_ID.ISO8859-1',
'in_id': 'id_ID.ISO8859-1',
'is': 'is_IS.ISO8859-1',
'is_is': 'is_IS.ISO8859-1',
'is_is.iso88591': 'is_IS.ISO8859-1',
'is_is.iso885915': 'is_IS.ISO8859-15',
'is_is@euro': 'is_IS.ISO8859-15',
'iso-8859-1': 'en_US.ISO8859-1',
'iso-8859-15': 'en_US.ISO8859-15',
'iso8859-1': 'en_US.ISO8859-1',
'iso8859-15': 'en_US.ISO8859-15',
'iso_8859_1': 'en_US.ISO8859-1',
'iso_8859_15': 'en_US.ISO8859-15',
'it': 'it_IT.ISO8859-1',
'it.iso885915': 'it_IT.ISO8859-15',
'it_ch': 'it_CH.ISO8859-1',
'it_ch.iso88591': 'it_CH.ISO8859-1',
'it_ch.iso885915': 'it_CH.ISO8859-15',
'it_ch@euro': 'it_CH.ISO8859-15',
'it_it': 'it_IT.ISO8859-1',
'it_it.88591': 'it_IT.ISO8859-1',
'it_it.iso88591': 'it_IT.ISO8859-1',
'it_it.iso885915': 'it_IT.ISO8859-15',
'it_it.iso885915@euro': 'it_IT.ISO8859-15',
'it_it.utf8@euro': 'it_IT.UTF-8',
'it_it@euro': 'it_IT.ISO8859-15',
'italian': 'it_IT.ISO8859-1',
'italian.iso88591': 'it_IT.ISO8859-1',
'iu': 'iu_CA.NUNACOM-8',
'iu_ca': 'iu_CA.NUNACOM-8',
'iu_ca.nunacom8': 'iu_CA.NUNACOM-8',
'iw': 'he_IL.ISO8859-8',
'iw_il': 'he_IL.ISO8859-8',
'iw_il.iso88598': 'he_IL.ISO8859-8',
'ja': 'ja_JP.eucJP',
'ja.jis': 'ja_JP.JIS7',
'ja.sjis': 'ja_JP.SJIS',
'ja_jp': 'ja_JP.eucJP',
'ja_jp.ajec': 'ja_JP.eucJP',
'ja_jp.euc': 'ja_JP.eucJP',
'ja_jp.eucjp': 'ja_JP.eucJP',
'ja_jp.iso-2022-jp': 'ja_JP.JIS7',
'ja_jp.iso2022jp': 'ja_JP.JIS7',
'ja_jp.jis': 'ja_JP.JIS7',
'ja_jp.jis7': 'ja_JP.JIS7',
'ja_jp.mscode': 'ja_JP.SJIS',
'ja_jp.pck': 'ja_JP.SJIS',
'ja_jp.sjis': 'ja_JP.SJIS',
'ja_jp.ujis': 'ja_JP.eucJP',
'japan': 'ja_JP.eucJP',
'japanese': 'ja_JP.eucJP',
'japanese-euc': 'ja_JP.eucJP',
'japanese.euc': 'ja_JP.eucJP',
'japanese.sjis': 'ja_JP.SJIS',
'jp_jp': 'ja_JP.eucJP',
'ka': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
'kl': 'kl_GL.ISO8859-1',
'kl_gl': 'kl_GL.ISO8859-1',
'kl_gl.iso88591': 'kl_GL.ISO8859-1',
'kl_gl.iso885915': 'kl_GL.ISO8859-15',
'kl_gl@euro': 'kl_GL.ISO8859-15',
'km_kh': 'km_KH.UTF-8',
'kn': 'kn_IN.UTF-8',
'kn_in': 'kn_IN.UTF-8',
'ko': 'ko_KR.eucKR',
'ko_kr': 'ko_KR.eucKR',
'ko_kr.euc': 'ko_KR.eucKR',
'ko_kr.euckr': 'ko_KR.eucKR',
'korean': 'ko_KR.eucKR',
'korean.euc': 'ko_KR.eucKR',
'ks': 'ks_IN.UTF-8',
'ks_in': 'ks_IN.UTF-8',
'ks_in@devanagari': 'ks_IN.UTF-8@devanagari',
'kw': 'kw_GB.ISO8859-1',
'kw_gb': 'kw_GB.ISO8859-1',
'kw_gb.iso88591': 'kw_GB.ISO8859-1',
'kw_gb.iso885914': 'kw_GB.ISO8859-14',
'kw_gb.iso885915': 'kw_GB.ISO8859-15',
'kw_gb@euro': 'kw_GB.ISO8859-15',
'ky': 'ky_KG.UTF-8',
'ky_kg': 'ky_KG.UTF-8',
'lithuanian': 'lt_LT.ISO8859-13',
'lo': 'lo_LA.MULELAO-1',
'lo_la': 'lo_LA.MULELAO-1',
'lo_la.cp1133': 'lo_LA.IBM-CP1133',
'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133',
'lo_la.mulelao1': 'lo_LA.MULELAO-1',
'lt': 'lt_LT.ISO8859-13',
'lt_lt': 'lt_LT.ISO8859-13',
'lt_lt.iso885913': 'lt_LT.ISO8859-13',
'lt_lt.iso88594': 'lt_LT.ISO8859-4',
'lv': 'lv_LV.ISO8859-13',
'lv_lv': 'lv_LV.ISO8859-13',
'lv_lv.iso885913': 'lv_LV.ISO8859-13',
'lv_lv.iso88594': 'lv_LV.ISO8859-4',
'mai': 'mai_IN.UTF-8',
'mai_in': 'mai_IN.UTF-8',
'mi': 'mi_NZ.ISO8859-1',
'mi_nz': 'mi_NZ.ISO8859-1',
'mi_nz.iso88591': 'mi_NZ.ISO8859-1',
'mk': 'mk_MK.ISO8859-5',
'mk_mk': 'mk_MK.ISO8859-5',
'mk_mk.cp1251': 'mk_MK.CP1251',
'mk_mk.iso88595': 'mk_MK.ISO8859-5',
'mk_mk.microsoftcp1251': 'mk_MK.CP1251',
'ml': 'ml_IN.UTF-8',
'ml_in': 'ml_IN.UTF-8',
'mr': 'mr_IN.UTF-8',
'mr_in': 'mr_IN.UTF-8',
'ms': 'ms_MY.ISO8859-1',
'ms_my': 'ms_MY.ISO8859-1',
'ms_my.iso88591': 'ms_MY.ISO8859-1',
'mt': 'mt_MT.ISO8859-3',
'mt_mt': 'mt_MT.ISO8859-3',
'mt_mt.iso88593': 'mt_MT.ISO8859-3',
'nb': 'nb_NO.ISO8859-1',
'nb_no': 'nb_NO.ISO8859-1',
'nb_no.88591': 'nb_NO.ISO8859-1',
'nb_no.iso88591': 'nb_NO.ISO8859-1',
'nb_no.iso885915': 'nb_NO.ISO8859-15',
'nb_no@euro': 'nb_NO.ISO8859-15',
'ne_np': 'ne_NP.UTF-8',
'nl': 'nl_NL.ISO8859-1',
'nl.iso885915': 'nl_NL.ISO8859-15',
'nl_be': 'nl_BE.ISO8859-1',
'nl_be.88591': 'nl_BE.ISO8859-1',
'nl_be.iso88591': 'nl_BE.ISO8859-1',
'nl_be.iso885915': 'nl_BE.ISO8859-15',
'nl_be.iso885915@euro': 'nl_BE.ISO8859-15',
'nl_be.utf8@euro': 'nl_BE.UTF-8',
'nl_be@euro': 'nl_BE.ISO8859-15',
'nl_nl': 'nl_NL.ISO8859-1',
'nl_nl.88591': 'nl_NL.ISO8859-1',
'nl_nl.iso88591': 'nl_NL.ISO8859-1',
'nl_nl.iso885915': 'nl_NL.ISO8859-15',
'nl_nl.iso885915@euro': 'nl_NL.ISO8859-15',
'nl_nl.utf8@euro': 'nl_NL.UTF-8',
'nl_nl@euro': 'nl_NL.ISO8859-15',
'nn': 'nn_NO.ISO8859-1',
'nn_no': 'nn_NO.ISO8859-1',
'nn_no.88591': 'nn_NO.ISO8859-1',
'nn_no.iso88591': 'nn_NO.ISO8859-1',
'nn_no.iso885915': 'nn_NO.ISO8859-15',
'nn_no@euro': 'nn_NO.ISO8859-15',
'no': 'no_NO.ISO8859-1',
'no@nynorsk': 'ny_NO.ISO8859-1',
'no_no': 'no_NO.ISO8859-1',
'no_no.88591': 'no_NO.ISO8859-1',
'no_no.iso88591': 'no_NO.ISO8859-1',
'no_no.iso885915': 'no_NO.ISO8859-15',
'no_no.iso88591@bokmal': 'no_NO.ISO8859-1',
'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1',
'no_no@euro': 'no_NO.ISO8859-15',
'norwegian': 'no_NO.ISO8859-1',
'norwegian.iso88591': 'no_NO.ISO8859-1',
'nr': 'nr_ZA.ISO8859-1',
'nr_za': 'nr_ZA.ISO8859-1',
'nr_za.iso88591': 'nr_ZA.ISO8859-1',
'nso': 'nso_ZA.ISO8859-15',
'nso_za': 'nso_ZA.ISO8859-15',
'nso_za.iso885915': 'nso_ZA.ISO8859-15',
'ny': 'ny_NO.ISO8859-1',
'ny_no': 'ny_NO.ISO8859-1',
'ny_no.88591': 'ny_NO.ISO8859-1',
'ny_no.iso88591': 'ny_NO.ISO8859-1',
'ny_no.iso885915': 'ny_NO.ISO8859-15',
'ny_no@euro': 'ny_NO.ISO8859-15',
'nynorsk': 'nn_NO.ISO8859-1',
'oc': 'oc_FR.ISO8859-1',
'oc_fr': 'oc_FR.ISO8859-1',
'oc_fr.iso88591': 'oc_FR.ISO8859-1',
'oc_fr.iso885915': 'oc_FR.ISO8859-15',
'oc_fr@euro': 'oc_FR.ISO8859-15',
'or': 'or_IN.UTF-8',
'or_in': 'or_IN.UTF-8',
'pa': 'pa_IN.UTF-8',
'pa_in': 'pa_IN.UTF-8',
'pd': 'pd_US.ISO8859-1',
'pd_de': 'pd_DE.ISO8859-1',
'pd_de.iso88591': 'pd_DE.ISO8859-1',
'pd_de.iso885915': 'pd_DE.ISO8859-15',
'pd_de@euro': 'pd_DE.ISO8859-15',
'pd_us': 'pd_US.ISO8859-1',
'pd_us.iso88591': 'pd_US.ISO8859-1',
'pd_us.iso885915': 'pd_US.ISO8859-15',
'pd_us@euro': 'pd_US.ISO8859-15',
'ph': 'ph_PH.ISO8859-1',
'ph_ph': 'ph_PH.ISO8859-1',
'ph_ph.iso88591': 'ph_PH.ISO8859-1',
'pl': 'pl_PL.ISO8859-2',
'pl_pl': 'pl_PL.ISO8859-2',
'pl_pl.iso88592': 'pl_PL.ISO8859-2',
'polish': 'pl_PL.ISO8859-2',
'portuguese': 'pt_PT.ISO8859-1',
'portuguese.iso88591': 'pt_PT.ISO8859-1',
'portuguese_brazil': 'pt_BR.ISO8859-1',
'portuguese_brazil.8859': 'pt_BR.ISO8859-1',
'posix': 'C',
'posix-utf2': 'C',
'pp': 'pp_AN.ISO8859-1',
'pp_an': 'pp_AN.ISO8859-1',
'pp_an.iso88591': 'pp_AN.ISO8859-1',
'pt': 'pt_PT.ISO8859-1',
'pt.iso885915': 'pt_PT.ISO8859-15',
'pt_br': 'pt_BR.ISO8859-1',
'pt_br.88591': 'pt_BR.ISO8859-1',
'pt_br.iso88591': 'pt_BR.ISO8859-1',
'pt_br.iso885915': 'pt_BR.ISO8859-15',
'pt_br@euro': 'pt_BR.ISO8859-15',
'pt_pt': 'pt_PT.ISO8859-1',
'pt_pt.88591': 'pt_PT.ISO8859-1',
'pt_pt.iso88591': 'pt_PT.ISO8859-1',
'pt_pt.iso885915': 'pt_PT.ISO8859-15',
'pt_pt.iso885915@euro': 'pt_PT.ISO8859-15',
'pt_pt.utf8@euro': 'pt_PT.UTF-8',
'pt_pt@euro': 'pt_PT.ISO8859-15',
'ro': 'ro_RO.ISO8859-2',
'ro_ro': 'ro_RO.ISO8859-2',
'ro_ro.iso88592': 'ro_RO.ISO8859-2',
'romanian': 'ro_RO.ISO8859-2',
'ru': 'ru_RU.UTF-8',
'ru.koi8r': 'ru_RU.KOI8-R',
'ru_ru': 'ru_RU.UTF-8',
'ru_ru.cp1251': 'ru_RU.CP1251',
'ru_ru.iso88595': 'ru_RU.ISO8859-5',
'ru_ru.koi8r': 'ru_RU.KOI8-R',
'ru_ru.microsoftcp1251': 'ru_RU.CP1251',
'ru_ua': 'ru_UA.KOI8-U',
'ru_ua.cp1251': 'ru_UA.CP1251',
'ru_ua.koi8u': 'ru_UA.KOI8-U',
'ru_ua.microsoftcp1251': 'ru_UA.CP1251',
'rumanian': 'ro_RO.ISO8859-2',
'russian': 'ru_RU.ISO8859-5',
'rw': 'rw_RW.ISO8859-1',
'rw_rw': 'rw_RW.ISO8859-1',
'rw_rw.iso88591': 'rw_RW.ISO8859-1',
'sd': 'sd_IN.UTF-8',
'sd@devanagari': 'sd_IN.UTF-8@devanagari',
'sd_in': 'sd_IN.UTF-8',
'sd_in@devanagari': 'sd_IN.UTF-8@devanagari',
'se_no': 'se_NO.UTF-8',
'serbocroatian': 'sr_RS.UTF-8@latin',
'sh': 'sr_RS.UTF-8@latin',
'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2',
'sh_hr': 'sh_HR.ISO8859-2',
'sh_hr.iso88592': 'hr_HR.ISO8859-2',
'sh_sp': 'sr_CS.ISO8859-2',
'sh_yu': 'sr_RS.UTF-8@latin',
'si': 'si_LK.UTF-8',
'si_lk': 'si_LK.UTF-8',
'sinhala': 'si_LK.UTF-8',
'sk': 'sk_SK.ISO8859-2',
'sk_sk': 'sk_SK.ISO8859-2',
'sk_sk.iso88592': 'sk_SK.ISO8859-2',
'sl': 'sl_SI.ISO8859-2',
'sl_cs': 'sl_CS.ISO8859-2',
'sl_si': 'sl_SI.ISO8859-2',
'sl_si.iso88592': 'sl_SI.ISO8859-2',
'slovak': 'sk_SK.ISO8859-2',
'slovene': 'sl_SI.ISO8859-2',
'slovenian': 'sl_SI.ISO8859-2',
'sp': 'sr_CS.ISO8859-5',
'sp_yu': 'sr_CS.ISO8859-5',
'spanish': 'es_ES.ISO8859-1',
'spanish.iso88591': 'es_ES.ISO8859-1',
'spanish_spain': 'es_ES.ISO8859-1',
'spanish_spain.8859': 'es_ES.ISO8859-1',
'sq': 'sq_AL.ISO8859-2',
'sq_al': 'sq_AL.ISO8859-2',
'sq_al.iso88592': 'sq_AL.ISO8859-2',
'sr': 'sr_RS.UTF-8',
'sr@cyrillic': 'sr_RS.UTF-8',
'sr@latin': 'sr_RS.UTF-8@latin',
'sr@latn': 'sr_CS.UTF-8@latin',
'sr_cs': 'sr_CS.UTF-8',
'sr_cs.iso88592': 'sr_CS.ISO8859-2',
'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2',
'sr_cs.iso88595': 'sr_CS.ISO8859-5',
'sr_cs.utf8@latn': 'sr_CS.UTF-8@latin',
'sr_cs@latn': 'sr_CS.UTF-8@latin',
'sr_me': 'sr_ME.UTF-8',
'sr_rs': 'sr_RS.UTF-8',
'sr_rs.utf8@latn': 'sr_RS.UTF-8@latin',
'sr_rs@latin': 'sr_RS.UTF-8@latin',
'sr_rs@latn': 'sr_RS.UTF-8@latin',
'sr_sp': 'sr_CS.ISO8859-2',
'sr_yu': 'sr_RS.UTF-8@latin',
'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.iso88592': 'sr_CS.ISO8859-2',
'sr_yu.iso88595': 'sr_CS.ISO8859-5',
'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5',
'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8',
'sr_yu@cyrillic': 'sr_RS.UTF-8',
'ss': 'ss_ZA.ISO8859-1',
'ss_za': 'ss_ZA.ISO8859-1',
'ss_za.iso88591': 'ss_ZA.ISO8859-1',
'st': 'st_ZA.ISO8859-1',
'st_za': 'st_ZA.ISO8859-1',
'st_za.iso88591': 'st_ZA.ISO8859-1',
'sv': 'sv_SE.ISO8859-1',
'sv.iso885915': 'sv_SE.ISO8859-15',
'sv_fi': 'sv_FI.ISO8859-1',
'sv_fi.iso88591': 'sv_FI.ISO8859-1',
'sv_fi.iso885915': 'sv_FI.ISO8859-15',
'sv_fi.iso885915@euro': 'sv_FI.ISO8859-15',
'sv_fi.utf8@euro': 'sv_FI.UTF-8',
'sv_fi@euro': 'sv_FI.ISO8859-15',
'sv_se': 'sv_SE.ISO8859-1',
'sv_se.88591': 'sv_SE.ISO8859-1',
'sv_se.iso88591': 'sv_SE.ISO8859-1',
'sv_se.iso885915': 'sv_SE.ISO8859-15',
'sv_se@euro': 'sv_SE.ISO8859-15',
'swedish': 'sv_SE.ISO8859-1',
'swedish.iso88591': 'sv_SE.ISO8859-1',
'ta': 'ta_IN.TSCII-0',
'ta_in': 'ta_IN.TSCII-0',
'ta_in.tscii': 'ta_IN.TSCII-0',
'ta_in.tscii0': 'ta_IN.TSCII-0',
'te': 'te_IN.UTF-8',
'tg': 'tg_TJ.KOI8-C',
'tg_tj': 'tg_TJ.KOI8-C',
'tg_tj.koi8c': 'tg_TJ.KOI8-C',
'th': 'th_TH.ISO8859-11',
'th_th': 'th_TH.ISO8859-11',
'th_th.iso885911': 'th_TH.ISO8859-11',
'th_th.tactis': 'th_TH.TIS620',
'th_th.tis620': 'th_TH.TIS620',
'thai': 'th_TH.ISO8859-11',
'tl': 'tl_PH.ISO8859-1',
'tl_ph': 'tl_PH.ISO8859-1',
'tl_ph.iso88591': 'tl_PH.ISO8859-1',
'tn': 'tn_ZA.ISO8859-15',
'tn_za': 'tn_ZA.ISO8859-15',
'tn_za.iso885915': 'tn_ZA.ISO8859-15',
'tr': 'tr_TR.ISO8859-9',
'tr_tr': 'tr_TR.ISO8859-9',
'tr_tr.iso88599': 'tr_TR.ISO8859-9',
'ts': 'ts_ZA.ISO8859-1',
'ts_za': 'ts_ZA.ISO8859-1',
'ts_za.iso88591': 'ts_ZA.ISO8859-1',
'tt': 'tt_RU.TATAR-CYR',
'tt_ru': 'tt_RU.TATAR-CYR',
'tt_ru.koi8c': 'tt_RU.KOI8-C',
'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR',
'turkish': 'tr_TR.ISO8859-9',
'turkish.iso88599': 'tr_TR.ISO8859-9',
'uk': 'uk_UA.KOI8-U',
'uk_ua': 'uk_UA.KOI8-U',
'uk_ua.cp1251': 'uk_UA.CP1251',
'uk_ua.iso88595': 'uk_UA.ISO8859-5',
'uk_ua.koi8u': 'uk_UA.KOI8-U',
'uk_ua.microsoftcp1251': 'uk_UA.CP1251',
'univ': 'en_US.UTF-8',
'universal': 'en_US.UTF-8',
'universal.utf8@ucs4': 'en_US.UTF-8',
'ur': 'ur_PK.CP1256',
'ur_in': 'ur_IN.UTF-8',
'ur_pk': 'ur_PK.CP1256',
'ur_pk.cp1256': 'ur_PK.CP1256',
'ur_pk.microsoftcp1256': 'ur_PK.CP1256',
'uz': 'uz_UZ.UTF-8',
'uz_uz': 'uz_UZ.UTF-8',
'uz_uz.iso88591': 'uz_UZ.ISO8859-1',
'uz_uz.utf8@cyrillic': 'uz_UZ.UTF-8',
'uz_uz@cyrillic': 'uz_UZ.UTF-8',
've': 've_ZA.UTF-8',
've_za': 've_ZA.UTF-8',
'vi': 'vi_VN.TCVN',
'vi_vn': 'vi_VN.TCVN',
'vi_vn.tcvn': 'vi_VN.TCVN',
'vi_vn.tcvn5712': 'vi_VN.TCVN',
'vi_vn.viscii': 'vi_VN.VISCII',
'vi_vn.viscii111': 'vi_VN.VISCII',
'wa': 'wa_BE.ISO8859-1',
'wa_be': 'wa_BE.ISO8859-1',
'wa_be.iso88591': 'wa_BE.ISO8859-1',
'wa_be.iso885915': 'wa_BE.ISO8859-15',
'wa_be.iso885915@euro': 'wa_BE.ISO8859-15',
'wa_be@euro': 'wa_BE.ISO8859-15',
'xh': 'xh_ZA.ISO8859-1',
'xh_za': 'xh_ZA.ISO8859-1',
'xh_za.iso88591': 'xh_ZA.ISO8859-1',
'yi': 'yi_US.CP1255',
'yi_us': 'yi_US.CP1255',
'yi_us.cp1255': 'yi_US.CP1255',
'yi_us.microsoftcp1255': 'yi_US.CP1255',
'zh': 'zh_CN.eucCN',
'zh_cn': 'zh_CN.gb2312',
'zh_cn.big5': 'zh_TW.big5',
'zh_cn.euc': 'zh_CN.eucCN',
'zh_cn.gb18030': 'zh_CN.gb18030',
'zh_cn.gb2312': 'zh_CN.gb2312',
'zh_cn.gbk': 'zh_CN.gbk',
'zh_hk': 'zh_HK.big5hkscs',
'zh_hk.big5': 'zh_HK.big5',
'zh_hk.big5hk': 'zh_HK.big5hkscs',
'zh_hk.big5hkscs': 'zh_HK.big5hkscs',
'zh_tw': 'zh_TW.big5',
'zh_tw.big5': 'zh_TW.big5',
'zh_tw.euc': 'zh_TW.eucTW',
'zh_tw.euctw': 'zh_TW.eucTW',
'zu': 'zu_ZA.ISO8859-1',
'zu_za': 'zu_ZA.ISO8859-1',
'zu_za.iso88591': 'zu_ZA.ISO8859-1',
}
#
# This maps Windows language identifiers to locale strings.
#
# This list has been updated from
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp
# to include every locale up to Windows Vista.
#
# NOTE: this mapping is incomplete. If your language is missing, please
# submit a bug report to the Python bug tracker at http://bugs.python.org/
# Make sure you include the missing language identifier and the suggested
# locale code.
#
windows_locale = {
0x0436: "af_ZA", # Afrikaans
0x041c: "sq_AL", # Albanian
0x0484: "gsw_FR",# Alsatian - France
0x045e: "am_ET", # Amharic - Ethiopia
0x0401: "ar_SA", # Arabic - Saudi Arabia
0x0801: "ar_IQ", # Arabic - Iraq
0x0c01: "ar_EG", # Arabic - Egypt
0x1001: "ar_LY", # Arabic - Libya
0x1401: "ar_DZ", # Arabic - Algeria
0x1801: "ar_MA", # Arabic - Morocco
0x1c01: "ar_TN", # Arabic - Tunisia
0x2001: "ar_OM", # Arabic - Oman
0x2401: "ar_YE", # Arabic - Yemen
0x2801: "ar_SY", # Arabic - Syria
0x2c01: "ar_JO", # Arabic - Jordan
0x3001: "ar_LB", # Arabic - Lebanon
0x3401: "ar_KW", # Arabic - Kuwait
0x3801: "ar_AE", # Arabic - United Arab Emirates
0x3c01: "ar_BH", # Arabic - Bahrain
0x4001: "ar_QA", # Arabic - Qatar
0x042b: "hy_AM", # Armenian
0x044d: "as_IN", # Assamese - India
0x042c: "az_AZ", # Azeri - Latin
0x082c: "az_AZ", # Azeri - Cyrillic
0x046d: "ba_RU", # Bashkir
0x042d: "eu_ES", # Basque - Russia
0x0423: "be_BY", # Belarusian
0x0445: "bn_IN", # Begali
0x201a: "bs_BA", # Bosnian - Cyrillic
0x141a: "bs_BA", # Bosnian - Latin
0x047e: "br_FR", # Breton - France
0x0402: "bg_BG", # Bulgarian
# 0x0455: "my_MM", # Burmese - Not supported
0x0403: "ca_ES", # Catalan
0x0004: "zh_CHS",# Chinese - Simplified
0x0404: "zh_TW", # Chinese - Taiwan
0x0804: "zh_CN", # Chinese - PRC
0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R.
0x1004: "zh_SG", # Chinese - Singapore
0x1404: "zh_MO", # Chinese - Macao S.A.R.
0x7c04: "zh_CHT",# Chinese - Traditional
0x0483: "co_FR", # Corsican - France
0x041a: "hr_HR", # Croatian
0x101a: "hr_BA", # Croatian - Bosnia
0x0405: "cs_CZ", # Czech
0x0406: "da_DK", # Danish
0x048c: "gbz_AF",# Dari - Afghanistan
0x0465: "div_MV",# Divehi - Maldives
0x0413: "nl_NL", # Dutch - The Netherlands
0x0813: "nl_BE", # Dutch - Belgium
0x0409: "en_US", # English - United States
0x0809: "en_GB", # English - United Kingdom
0x0c09: "en_AU", # English - Australia
0x1009: "en_CA", # English - Canada
0x1409: "en_NZ", # English - New Zealand
0x1809: "en_IE", # English - Ireland
0x1c09: "en_ZA", # English - South Africa
0x2009: "en_JA", # English - Jamaica
0x2409: "en_CB", # English - Carribbean
0x2809: "en_BZ", # English - Belize
0x2c09: "en_TT", # English - Trinidad
0x3009: "en_ZW", # English - Zimbabwe
0x3409: "en_PH", # English - Philippines
0x4009: "en_IN", # English - India
0x4409: "en_MY", # English - Malaysia
0x4809: "en_IN", # English - Singapore
0x0425: "et_EE", # Estonian
0x0438: "fo_FO", # Faroese
0x0464: "fil_PH",# Filipino
0x040b: "fi_FI", # Finnish
0x040c: "fr_FR", # French - France
0x080c: "fr_BE", # French - Belgium
0x0c0c: "fr_CA", # French - Canada
0x100c: "fr_CH", # French - Switzerland
0x140c: "fr_LU", # French - Luxembourg
0x180c: "fr_MC", # French - Monaco
0x0462: "fy_NL", # Frisian - Netherlands
0x0456: "gl_ES", # Galician
0x0437: "ka_GE", # Georgian
0x0407: "de_DE", # German - Germany
0x0807: "de_CH", # German - Switzerland
0x0c07: "de_AT", # German - Austria
0x1007: "de_LU", # German - Luxembourg
0x1407: "de_LI", # German - Liechtenstein
0x0408: "el_GR", # Greek
0x046f: "kl_GL", # Greenlandic - Greenland
0x0447: "gu_IN", # Gujarati
0x0468: "ha_NG", # Hausa - Latin
0x040d: "he_IL", # Hebrew
0x0439: "hi_IN", # Hindi
0x040e: "hu_HU", # Hungarian
0x040f: "is_IS", # Icelandic
0x0421: "id_ID", # Indonesian
0x045d: "iu_CA", # Inuktitut - Syllabics
0x085d: "iu_CA", # Inuktitut - Latin
0x083c: "ga_IE", # Irish - Ireland
0x0410: "it_IT", # Italian - Italy
0x0810: "it_CH", # Italian - Switzerland
0x0411: "ja_JP", # Japanese
0x044b: "kn_IN", # Kannada - India
0x043f: "kk_KZ", # Kazakh
0x0453: "kh_KH", # Khmer - Cambodia
0x0486: "qut_GT",# K'iche - Guatemala
0x0487: "rw_RW", # Kinyarwanda - Rwanda
0x0457: "kok_IN",# Konkani
0x0412: "ko_KR", # Korean
0x0440: "ky_KG", # Kyrgyz
0x0454: "lo_LA", # Lao - Lao PDR
0x0426: "lv_LV", # Latvian
0x0427: "lt_LT", # Lithuanian
0x082e: "dsb_DE",# Lower Sorbian - Germany
0x046e: "lb_LU", # Luxembourgish
0x042f: "mk_MK", # FYROM Macedonian
0x043e: "ms_MY", # Malay - Malaysia
0x083e: "ms_BN", # Malay - Brunei Darussalam
0x044c: "ml_IN", # Malayalam - India
0x043a: "mt_MT", # Maltese
0x0481: "mi_NZ", # Maori
0x047a: "arn_CL",# Mapudungun
0x044e: "mr_IN", # Marathi
0x047c: "moh_CA",# Mohawk - Canada
0x0450: "mn_MN", # Mongolian - Cyrillic
0x0850: "mn_CN", # Mongolian - PRC
0x0461: "ne_NP", # Nepali
0x0414: "nb_NO", # Norwegian - Bokmal
0x0814: "nn_NO", # Norwegian - Nynorsk
0x0482: "oc_FR", # Occitan - France
0x0448: "or_IN", # Oriya - India
0x0463: "ps_AF", # Pashto - Afghanistan
0x0429: "fa_IR", # Persian
0x0415: "pl_PL", # Polish
0x0416: "pt_BR", # Portuguese - Brazil
0x0816: "pt_PT", # Portuguese - Portugal
0x0446: "pa_IN", # Punjabi
0x046b: "quz_BO",# Quechua (Bolivia)
0x086b: "quz_EC",# Quechua (Ecuador)
0x0c6b: "quz_PE",# Quechua (Peru)
0x0418: "ro_RO", # Romanian - Romania
0x0417: "rm_CH", # Romansh
0x0419: "ru_RU", # Russian
0x243b: "smn_FI",# Sami Finland
0x103b: "smj_NO",# Sami Norway
0x143b: "smj_SE",# Sami Sweden
0x043b: "se_NO", # Sami Northern Norway
0x083b: "se_SE", # Sami Northern Sweden
0x0c3b: "se_FI", # Sami Northern Finland
0x203b: "sms_FI",# Sami Skolt
0x183b: "sma_NO",# Sami Southern Norway
0x1c3b: "sma_SE",# Sami Southern Sweden
0x044f: "sa_IN", # Sanskrit
0x0c1a: "sr_SP", # Serbian - Cyrillic
0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic
0x081a: "sr_SP", # Serbian - Latin
0x181a: "sr_BA", # Serbian - Bosnia Latin
0x045b: "si_LK", # Sinhala - Sri Lanka
0x046c: "ns_ZA", # Northern Sotho
0x0432: "tn_ZA", # Setswana - Southern Africa
0x041b: "sk_SK", # Slovak
0x0424: "sl_SI", # Slovenian
0x040a: "es_ES", # Spanish - Spain
0x080a: "es_MX", # Spanish - Mexico
0x0c0a: "es_ES", # Spanish - Spain (Modern)
0x100a: "es_GT", # Spanish - Guatemala
0x140a: "es_CR", # Spanish - Costa Rica
0x180a: "es_PA", # Spanish - Panama
0x1c0a: "es_DO", # Spanish - Dominican Republic
0x200a: "es_VE", # Spanish - Venezuela
0x240a: "es_CO", # Spanish - Colombia
0x280a: "es_PE", # Spanish - Peru
0x2c0a: "es_AR", # Spanish - Argentina
0x300a: "es_EC", # Spanish - Ecuador
0x340a: "es_CL", # Spanish - Chile
0x380a: "es_UR", # Spanish - Uruguay
0x3c0a: "es_PY", # Spanish - Paraguay
0x400a: "es_BO", # Spanish - Bolivia
0x440a: "es_SV", # Spanish - El Salvador
0x480a: "es_HN", # Spanish - Honduras
0x4c0a: "es_NI", # Spanish - Nicaragua
0x500a: "es_PR", # Spanish - Puerto Rico
0x540a: "es_US", # Spanish - United States
# 0x0430: "", # Sutu - Not supported
0x0441: "sw_KE", # Swahili
0x041d: "sv_SE", # Swedish - Sweden
0x081d: "sv_FI", # Swedish - Finland
0x045a: "syr_SY",# Syriac
0x0428: "tg_TJ", # Tajik - Cyrillic
0x085f: "tmz_DZ",# Tamazight - Latin
0x0449: "ta_IN", # Tamil
0x0444: "tt_RU", # Tatar
0x044a: "te_IN", # Telugu
0x041e: "th_TH", # Thai
0x0851: "bo_BT", # Tibetan - Bhutan
0x0451: "bo_CN", # Tibetan - PRC
0x041f: "tr_TR", # Turkish
0x0442: "tk_TM", # Turkmen - Cyrillic
0x0480: "ug_CN", # Uighur - Arabic
0x0422: "uk_UA", # Ukrainian
0x042e: "wen_DE",# Upper Sorbian - Germany
0x0420: "ur_PK", # Urdu
0x0820: "ur_IN", # Urdu - India
0x0443: "uz_UZ", # Uzbek - Latin
0x0843: "uz_UZ", # Uzbek - Cyrillic
0x042a: "vi_VN", # Vietnamese
0x0452: "cy_GB", # Welsh
0x0488: "wo_SN", # Wolof - Senegal
0x0434: "xh_ZA", # Xhosa - South Africa
0x0485: "sah_RU",# Yakut - Cyrillic
0x0478: "ii_CN", # Yi - PRC
0x046a: "yo_NG", # Yoruba - Nigeria
0x0435: "zu_ZA", # Zulu
}
def _print_locale():
""" Test function.
"""
categories = {}
def _init_categories(categories=categories):
for k,v in globals().items():
if k[:3] == 'LC_':
categories[k] = v
_init_categories()
del categories['LC_ALL']
print 'Locale defaults as determined by getdefaultlocale():'
print '-'*72
lang, enc = getdefaultlocale()
print 'Language: ', lang or '(undefined)'
print 'Encoding: ', enc or '(undefined)'
print
print 'Locale settings on startup:'
print '-'*72
for name,category in categories.items():
print name, '...'
lang, enc = getlocale(category)
print ' Language: ', lang or '(undefined)'
print ' Encoding: ', enc or '(undefined)'
print
print
print 'Locale settings after calling resetlocale():'
print '-'*72
resetlocale()
for name,category in categories.items():
print name, '...'
lang, enc = getlocale(category)
print ' Language: ', lang or '(undefined)'
print ' Encoding: ', enc or '(undefined)'
print
try:
setlocale(LC_ALL, "")
except:
print 'NOTE:'
print 'setlocale(LC_ALL, "") does not support the default locale'
print 'given in the OS environment variables.'
else:
print
print 'Locale settings after calling setlocale(LC_ALL, ""):'
print '-'*72
for name,category in categories.items():
print name, '...'
lang, enc = getlocale(category)
print ' Language: ', lang or '(undefined)'
print ' Encoding: ', enc or '(undefined)'
print
###
try:
LC_MESSAGES
except NameError:
pass
else:
__all__.append("LC_MESSAGES")
if __name__=='__main__':
print 'Locale aliasing:'
print
_print_locale()
print
print 'Number formatting:'
print
_test()
|
hoangcuongflp/enjarify
|
refs/heads/master
|
enjarify/jvm/scalartypes.py
|
35
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Primative type inference
# In dalvik bytecode, constants are untyped, which effectively means a union type
# They can be zero (int/float/null), narrow (int/float) or wide (long/double)
INVALID = 0
INT = 1 << 0
FLOAT = 1 << 1
OBJ = 1 << 2
LONG = 1 << 3
DOUBLE = 1 << 4
ZERO = INT | FLOAT | OBJ
C32 = INT | FLOAT
C64 = LONG | DOUBLE
ALL = ZERO | C64
_descToScalar = dict(zip(map(ord, 'ZBCSIFJDL['), [INT, INT, INT, INT, INT, FLOAT, LONG, DOUBLE, OBJ, OBJ]))
def fromDesc(desc):
return _descToScalar[desc[0]]
def iswide(st):
return st & C64
def paramTypes(method_id, static):
temp = method_id.getSpacedParamTypes(static)
return [(INVALID if desc is None else fromDesc(desc)) for desc in temp]
|
niteoweb/libcloud
|
refs/heads/niteoweb_internal_release
|
docs/examples/compute/cloudstack/deploy_node_with_keypair_security_group.py
|
63
|
from pprint import pprint
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
# Import the deployment specific modules
from libcloud.compute.deployment import ScriptDeployment
from libcloud.compute.deployment import MultiStepDeployment
cls = get_driver(Provider.EXOSCALE)
driver = cls('api key', 'api secret key')
image = driver.list_images()[0]
size = driver.list_sizes()[0]
# Define the scripts that you want to run during deployment
script = ScriptDeployment('/bin/date')
msd = MultiStepDeployment([script])
node = driver.deploy_node(name='test', image=image, size=size,
ssh_key='~/.ssh/id_rsa_test',
ex_keyname='test-keypair',
deploy=msd)
# The stdout of the deployment can be checked on the `script` object
pprint(script.stdout)
|
CEG-FYP-OpenStack/scheduler
|
refs/heads/master
|
nova/tests/functional/api_sample_tests/test_instance_actions.py
|
8
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_config import cfg
import six
from nova.tests.functional.api_sample_tests import api_sample_base
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_server_actions
from nova.tests.unit import utils as test_utils
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class ServerActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
microversion = None
ADMIN_API = True
extension_name = 'os-instance-actions'
def _get_flags(self):
f = super(ServerActionsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.instance_actions.Instance_actions')
return f
def _fake_get(self, context, instance_uuid, expected_attrs=None,
want_objects=True):
return fake_instance.fake_instance_obj(
None, **{'uuid': instance_uuid})
def setUp(self):
super(ServerActionsSampleJsonTest, self).setUp()
self.api.microversion = self.microversion
self.actions = fake_server_actions.FAKE_ACTIONS
self.events = fake_server_actions.FAKE_EVENTS
self.instance = test_utils.get_test_instance(obj=True)
def fake_instance_action_get_by_request_id(context, uuid, request_id):
return copy.deepcopy(self.actions[uuid][request_id])
def fake_server_actions_get(context, uuid):
return [copy.deepcopy(value) for value in
six.itervalues(self.actions[uuid])]
def fake_instance_action_events_get(context, action_id):
return copy.deepcopy(self.events[action_id])
def fake_instance_get_by_uuid(context, instance_id):
return self.instance
self.stub_out('nova.db.action_get_by_request_id',
fake_instance_action_get_by_request_id)
self.stub_out('nova.db.actions_get', fake_server_actions_get)
self.stub_out('nova.db.action_events_get',
fake_instance_action_events_get)
self.stub_out('nova.db.instance_get_by_uuid',
fake_instance_get_by_uuid)
self.stub_out('nova.compute.api.API.get', self._fake_get)
def test_instance_action_get(self):
fake_uuid = fake_server_actions.FAKE_UUID
fake_request_id = fake_server_actions.FAKE_REQUEST_ID1
fake_action = self.actions[fake_uuid][fake_request_id]
response = self._do_get('servers/%s/os-instance-actions/%s' %
(fake_uuid, fake_request_id))
subs = {}
subs['action'] = '(reboot)|(resize)'
subs['instance_uuid'] = str(fake_uuid)
subs['integer_id'] = '[0-9]+'
subs['request_id'] = str(fake_action['request_id'])
subs['start_time'] = str(fake_action['start_time'])
subs['result'] = '(Success)|(Error)'
subs['event'] = '(schedule)|(compute_create)'
self._verify_response('instance-action-get-resp', subs, response, 200)
def test_instance_actions_list(self):
fake_uuid = fake_server_actions.FAKE_UUID
response = self._do_get('servers/%s/os-instance-actions' % (fake_uuid))
subs = {}
subs['action'] = '(reboot)|(resize)'
subs['integer_id'] = '[0-9]+'
subs['request_id'] = ('req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12}')
self._verify_response('instance-actions-list-resp', subs,
response, 200)
class ServerActionsV221SampleJsonTest(ServerActionsSampleJsonTest):
microversion = '2.21'
scenarios = [('v2_21', {'api_major_version': 'v2.1'})]
def _fake_get(self, context, instance_uuid, expected_attrs=None,
want_objects=True):
self.assertEqual('yes', context.read_deleted)
return fake_instance.fake_instance_obj(
None, **{'uuid': instance_uuid})
|
junalmeida/Sick-Beard
|
refs/heads/master
|
lib/guessit/transfo/guess_episodes_rexps.py
|
10
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.plugins.transformers import Transformer
from guessit.matcher import GuessFinder
from guessit.patterns import sep
from guessit.containers import PropertiesContainer, WeakValidator, NoValidator
from guessit.patterns.numeral import numeral, digital_numeral, parse_numeral
from re import split as re_split
class GuessEpisodesRexps(Transformer):
def __init__(self):
Transformer.__init__(self, 20)
self.container = PropertiesContainer(enhance=False, canonical_from_pattern=False)
def episode_parser(value):
values = re_split('[a-zA-Z]', value)
values = [x for x in values if x]
ret = []
for letters_elt in values:
dashed_values = letters_elt.split('-')
dashed_values = [x for x in dashed_values if x]
if len(dashed_values) > 1:
for _ in range(0, len(dashed_values) - 1):
start_dash_ep = parse_numeral(dashed_values[0])
end_dash_ep = parse_numeral(dashed_values[1])
for dash_ep in range(start_dash_ep, end_dash_ep + 1):
ret.append(dash_ep)
else:
ret.append(parse_numeral(letters_elt))
if len(ret) > 1:
return {None: ret[0], 'episodeList': ret} # TODO: Should support seasonList also
elif len(ret) > 0:
return ret[0]
else:
return None
self.container.register_property(None, r'((?:season|saison)' + sep + '?(?P<season>' + numeral + '))', confidence=1.0, formatter=parse_numeral)
self.container.register_property(None, r'(s(?P<season>' + digital_numeral + ')[^0-9]?' + sep + '?(?P<episodeNumber>(?:e' + digital_numeral + '(?:' + sep + '?[e-]' + digital_numeral + ')*)))[^0-9]', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser}, validator=NoValidator())
self.container.register_property(None, r'[^0-9]((?P<season>' + digital_numeral + ')[^0-9 .-]?-?(?P<episodeNumber>(?:x' + digital_numeral + '(?:' + sep + '?[x-]' + digital_numeral + ')*)))[^0-9]', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser})
self.container.register_property(None, r'(s(?P<season>' + digital_numeral + '))[^0-9]', confidence=0.6, formatter=parse_numeral, validator=NoValidator())
self.container.register_property(None, r'((?P<episodeNumber>' + digital_numeral + ')v[23])', confidence=0.6, formatter=parse_numeral)
self.container.register_property(None, r'((?:ep)' + sep + r'(?P<episodeNumber>' + numeral + '))[^0-9]', confidence=0.7, formatter=parse_numeral)
self.container.register_property(None, r'(e(?P<episodeNumber>' + digital_numeral + '))', confidence=0.6, formatter=parse_numeral)
self.container.register_canonical_properties('other', 'FiNAL', 'Complete', validator=WeakValidator())
def supported_properties(self):
return ['episodeNumber', 'season']
def guess_episodes_rexps(self, string, node=None, options=None):
found = self.container.find_properties(string, node)
return self.container.as_guess(found, string)
def should_process(self, mtree, options=None):
return mtree.guess.get('type', '').startswith('episode')
def process(self, mtree, options=None):
GuessFinder(self.guess_episodes_rexps, None, self.log, options).process_nodes(mtree.unidentified_leaves())
|
Frouk/zulip
|
refs/heads/master
|
zerver/templatetags/app_filters.py
|
126
|
from django.template import Library
register = Library()
def and_n_others(values, limit):
# A helper for the commonly appended "and N other(s)" string, with
# the appropriate pluralization.
return " and %d other%s" % (len(values) - limit,
"" if len(values) == limit + 1 else "s")
@register.filter(name='display_list', is_safe=True)
def display_list(values, display_limit):
"""
Given a list of values, return a string nicely formatting those values,
summarizing when you have more than `display_limit`. Eg, for a
`display_limit` of 3 we get the following possible cases:
Jessica
Jessica and Waseem
Jessica, Waseem, and Tim
Jessica, Waseem, Tim, and 1 other
Jessica, Waseem, Tim, and 2 others
"""
if len(values) == 1:
# One value, show it.
display_string = "%s" % (values[0],)
elif len(values) <= display_limit:
# Fewer than `display_limit` values, show all of them.
display_string = ", ".join(
"%s" % (value,) for value in values[:-1])
display_string += " and %s" % (values[-1],)
else:
# More than `display_limit` values, only mention a few.
display_string = ", ".join(
"%s" % (value,) for value in values[:display_limit])
display_string += and_n_others(values, display_limit)
return display_string
|
kennethjiang/heroku-buildpack-python-libffi
|
refs/heads/master
|
test/django-1.5-skeleton/haystack/wsgi.py
|
45
|
"""
WSGI config for haystack project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "haystack.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "haystack.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
exploreodoo/datStruct
|
refs/heads/master
|
odoo/addons/account_cancel/models/__init__.py
|
243
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import account_bank_statement
|
apaleyes/mxnet
|
refs/heads/master
|
tests/python/unittest/test_multi_device_exec.py
|
15
|
import os
import mxnet as mx
def test_ctx_group():
with mx.AttrScope(ctx_group='stage1'):
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
set_stage1 = set(act1.list_arguments())
with mx.AttrScope(ctx_group='stage2'):
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
fc3 = mx.symbol.BatchNorm(fc3)
mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
set_stage2 = set(mlp.list_arguments()) - set_stage1
group2ctx = {
'stage1' : mx.cpu(1),
'stage2' : mx.cpu(2)
}
texec = mlp.simple_bind(mx.cpu(0),
group2ctx=group2ctx,
data=(1,200))
for arr, name in zip(texec.arg_arrays, mlp.list_arguments()):
if name in set_stage1:
assert arr.context == group2ctx['stage1']
else:
assert arr.context == group2ctx['stage2']
if __name__ == '__main__':
test_ctx_group()
|
OpenNetworkingFoundation/ONFOpenTransport
|
refs/heads/develop
|
RI/flask_server/tapi_server/models/tapi_odu_owned_node_edge_point_augmentation1.py
|
4
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_odu_odu_node_edge_point_spec import TapiOduOduNodeEdgePointSpec # noqa: F401,E501
from tapi_server import util
class TapiOduOwnedNodeEdgePointAugmentation1(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, odu_node_edge_point_spec=None): # noqa: E501
"""TapiOduOwnedNodeEdgePointAugmentation1 - a model defined in OpenAPI
:param odu_node_edge_point_spec: The odu_node_edge_point_spec of this TapiOduOwnedNodeEdgePointAugmentation1. # noqa: E501
:type odu_node_edge_point_spec: TapiOduOduNodeEdgePointSpec
"""
self.openapi_types = {
'odu_node_edge_point_spec': TapiOduOduNodeEdgePointSpec
}
self.attribute_map = {
'odu_node_edge_point_spec': 'odu-node-edge-point-spec'
}
self._odu_node_edge_point_spec = odu_node_edge_point_spec
@classmethod
def from_dict(cls, dikt) -> 'TapiOduOwnedNodeEdgePointAugmentation1':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.odu.OwnedNodeEdgePointAugmentation1 of this TapiOduOwnedNodeEdgePointAugmentation1. # noqa: E501
:rtype: TapiOduOwnedNodeEdgePointAugmentation1
"""
return util.deserialize_model(dikt, cls)
@property
def odu_node_edge_point_spec(self):
"""Gets the odu_node_edge_point_spec of this TapiOduOwnedNodeEdgePointAugmentation1.
:return: The odu_node_edge_point_spec of this TapiOduOwnedNodeEdgePointAugmentation1.
:rtype: TapiOduOduNodeEdgePointSpec
"""
return self._odu_node_edge_point_spec
@odu_node_edge_point_spec.setter
def odu_node_edge_point_spec(self, odu_node_edge_point_spec):
"""Sets the odu_node_edge_point_spec of this TapiOduOwnedNodeEdgePointAugmentation1.
:param odu_node_edge_point_spec: The odu_node_edge_point_spec of this TapiOduOwnedNodeEdgePointAugmentation1.
:type odu_node_edge_point_spec: TapiOduOduNodeEdgePointSpec
"""
self._odu_node_edge_point_spec = odu_node_edge_point_spec
|
hfp/tensorflow-xsmm
|
refs/heads/master
|
tensorflow/contrib/seq2seq/python/ops/helper.py
|
36
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of helpers for use with SamplingDecoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.distributions import bernoulli
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.util import nest
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"SampleEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
"InferenceHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Interface for implementing sampling in seq2seq decoders.
Helper instances are used by `BasicDecoder`.
"""
@abc.abstractproperty
def batch_size(self):
"""Batch size of tensor returned by `sample`.
Returns a scalar int32 tensor.
"""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractproperty
def sample_ids_shape(self):
"""Shape of tensor returned by `sample`, excluding the batch dimension.
Returns a `TensorShape`.
"""
raise NotImplementedError("sample_ids_shape has not been implemented")
@abc.abstractproperty
def sample_ids_dtype(self):
"""DType of tensor returned by `sample`.
Returns a DType.
"""
raise NotImplementedError("sample_ids_dtype has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn,
sample_ids_shape=None, sample_ids_dtype=None):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
sample_ids_shape: Either a list of integers, or a 1-D Tensor of type
`int32`, the shape of each value in the `sample_ids` batch. Defaults to
a scalar.
sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_ids_shape
@property
def sample_ids_dtype(self):
return self._sample_ids_dtype
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
self._inputs = inputs
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def inputs(self):
return self._inputs
@property
def sequence_length(self):
return self._sequence_length
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sampler = bernoulli.Bernoulli(
probs=self._sampling_probability, dtype=dtypes.bool)
select_sample = select_sampler.sample(
sample_shape=self.batch_size, seed=self._scheduling_seed)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
gen_array_ops.fill([self.batch_size], -1))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
inputs_not_sampling = array_ops.gather_nd(
base_next_inputs, where_not_sampling)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_inputs_fn=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_inputs_fn: (Optional) callable to apply to the RNN outputs to create
the next input when sampling. If `None` (default), the RNN outputs will
be used as the next inputs.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
self._next_inputs_fn = next_inputs_fn
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledOutputTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
[time, outputs, state]):
sampler = bernoulli.Bernoulli(probs=self._sampling_probability)
return sampler.sample(sample_shape=self.batch_size, seed=self._seed)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
sample_ids = math_ops.cast(sample_ids, dtypes.bool)
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_inputs_fn is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_inputs_fn(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
next_inputs = control_flow_ops.cond(
math_ops.logical_or(all_finished, no_samples),
lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.argmax(outputs, axis=-1, output_type=dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
class SampleEmbeddingHelper(GreedyEmbeddingHelper):
"""A helper for use during inference.
Uses sampling (from a distribution) instead of argmax and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token,
softmax_temperature=None, seed=None):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
softmax_temperature: (Optional) `float32` scalar, value to divide the
logits by before computing the softmax. Larger values (above 1.0) result
in more random samples, while smaller values push the sampling
distribution towards the argmax. Must be strictly greater than 0.
Defaults to 1.0.
seed: (Optional) The sampling seed.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
super(SampleEmbeddingHelper, self).__init__(
embedding, start_tokens, end_token)
self._softmax_temperature = softmax_temperature
self._seed = seed
def sample(self, time, outputs, state, name=None):
"""sample for SampleEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, we sample instead of argmax (greedy).
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
if self._softmax_temperature is None:
logits = outputs
else:
logits = outputs / self._softmax_temperature
sample_id_sampler = categorical.Categorical(logits=logits)
sample_ids = sample_id_sampler.sample(seed=self._seed)
return sample_ids
class InferenceHelper(Helper):
"""A helper to use during inference with a custom sampling function."""
def __init__(self, sample_fn, sample_shape, sample_dtype,
start_inputs, end_fn, next_inputs_fn=None):
"""Initializer.
Args:
sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`.
sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`,
the shape of the each sample in the batch returned by `sample_fn`.
sample_dtype: the dtype of the sample returned by `sample_fn`.
start_inputs: The initial batch of inputs.
end_fn: A callable that takes `sample_ids` and emits a `bool` vector
shaped `[batch_size]` indicating whether each sample is an end token.
next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns
the next batch of inputs. If not provided, `sample_ids` is used as the
next batch of inputs.
"""
self._sample_fn = sample_fn
self._end_fn = end_fn
self._sample_shape = tensor_shape.TensorShape(sample_shape)
self._sample_dtype = sample_dtype
self._next_inputs_fn = next_inputs_fn
self._batch_size = array_ops.shape(start_inputs)[0]
self._start_inputs = ops.convert_to_tensor(
start_inputs, name="start_inputs")
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_shape
@property
def sample_ids_dtype(self):
return self._sample_dtype
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
del time, state # unused by sample
return self._sample_fn(outputs)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
del time, outputs # unused by next_inputs
if self._next_inputs_fn is None:
next_inputs = sample_ids
else:
next_inputs = self._next_inputs_fn(sample_ids)
finished = self._end_fn(sample_ids)
return (finished, next_inputs, state)
|
ikoveshnikov/tempesta
|
refs/heads/master
|
tempesta_fw/t/functional/testers/stress.py
|
1
|
from __future__ import print_function
import unittest
from helpers import tf_cfg, control, tempesta, stateful
__author__ = 'Tempesta Technologies, Inc.'
__copyright__ = 'Copyright (C) 2017-2018 Tempesta Technologies, Inc.'
__license__ = 'GPL2'
class StressTest(unittest.TestCase):
""" Test Suite to use HTTP benchmarks as a clients. Can be used for
functional testing of schedulers and stress testing for other components.
"""
pipelined_req = 1
tfw_msg_errors = False
errors_502 = 0
def create_clients(self):
""" Override to set desired list of benchmarks and their options. """
self.wrk = control.Wrk()
self.wrk.set_script("foo", content="")
self.clients = [self.wrk]
def create_tempesta(self):
""" Normally no override is needed.
Create controller for TempestaFW and add all servers to default group.
"""
self.tempesta = control.Tempesta()
def configure_tempesta(self):
""" Add all servers to default server group with default scheduler. """
sg = tempesta.ServerGroup('default')
for s in self.servers:
sg.add_server(s.ip, s.config.port, s.conns_n)
self.tempesta.config.add_sg(sg)
def create_servers(self):
""" Overrirde to create needed amount of upstream servers. """
port = tempesta.upstream_port_start_from()
self.servers = [control.Nginx(listen_port=port)]
def create_servers_helper(self, count, start_port=None):
""" Helper function to spawn `count` servers in default configuration.
See comment in Nginx.get_stats().
"""
if start_port is None:
start_port = tempesta.upstream_port_start_from()
self.servers = []
for i in range(count):
self.servers.append(control.Nginx(listen_port=(start_port + i)))
def setUp(self):
# Init members used in tearDown function.
self.tempesta = None
self.servers = []
tf_cfg.dbg(3) # Step to the next line after name of test case.
tf_cfg.dbg(3, '\tInit test case...')
self.create_clients()
self.create_servers()
self.create_tempesta()
def force_stop(self):
""" Forcefully stop all servers. """
# Call functions only if variables not None: there might be an error
# before tempesta would be created.
if self.tempesta:
self.tempesta.force_stop()
if self.servers:
control.servers_force_stop(self.servers)
def tearDown(self):
""" Carefully stop all servers. Error on stop will make next test fail,
so mark test as failed even if everything other is fine.
"""
# Call functions only if variables not None: there might be an error
# before tempesta would be created.
if self.tempesta:
self.tempesta.stop()
if self.servers:
control.servers_stop(self.servers)
if self.tempesta.state == stateful.STATE_ERROR:
raise Exception("Error during stopping tempesta")
for server in self.servers:
if server.state == stateful.STATE_ERROR:
raise Exception("Error during stopping servers")
def show_performance(self):
if tf_cfg.v_level() < 2:
return
if tf_cfg.v_level() == 2:
# Go to new line, don't mess up output.
tf_cfg.dbg(2)
req_total = err_total = rate_total = 0
for c in self.clients:
req, err, rate, _ = c.results()
req_total += req
err_total += err
rate_total += rate
tf_cfg.dbg(3, ('\tClient: errors: %d, requests: %d, rate: %d'
% (err, req, rate)))
tf_cfg.dbg(
2, '\tClients in total: errors: %d, requests: %d, rate: %d' %
(err_total, req_total, rate_total))
def assert_response(self, req, err, statuses):
msg = 'HTTP client detected %i/%i errors. Results: %s' % \
(err, req, str(statuses))
if 502 in statuses.keys():
self.errors_502 += statuses[502]
self.assertEqual(err, 0, msg=msg)
def assert_clients(self):
""" Check benchmark result: no errors happen, no packet loss. """
cl_req_cnt = 0
cl_conn_cnt = 0
self.errors_502 = 0
for c in self.clients:
req, err, _, statuses = c.results()
cl_req_cnt += req
cl_conn_cnt += c.connections * self.pipelined_req
self.assert_response(req, err, statuses)
exp_min = cl_req_cnt
# Positive allowance: this means some responses are missed by the client.
# It is believed (nobody actually checked though...) that wrk does not
# wait for responses to last requests in each connection before closing
# it and does not account for those requests.
# So, [0; concurrent_connections] responses will be missed by the client.
exp_max = cl_req_cnt + cl_conn_cnt
self.assertTrue(
self.tempesta.stats.cl_msg_received >= exp_min and
self.tempesta.stats.cl_msg_received <= exp_max
)
def assert_tempesta(self):
""" Assert that tempesta had no errors during test. """
msg = 'Tempesta have errors in processing HTTP %s.'
cl_conn_cnt = 0
for c in self.clients:
cl_conn_cnt += c.connections
self.assertEqual(self.tempesta.stats.cl_msg_parsing_errors, 0,
msg=(msg % 'requests'))
self.assertEqual(self.tempesta.stats.srv_msg_parsing_errors, 0,
msg=(msg % 'responses'))
if self.tfw_msg_errors:
return
# TODO: with self.errors_502 we should compare special counter for
# backend connection error. But it is not present.
self.assertTrue(self.tempesta.stats.cl_msg_other_errors == \
self.errors_502, msg=(msg % 'requests'))
# See comment on "positive allowance" in `assert_clients()`
expected_err = cl_conn_cnt
self.assertTrue(self.tempesta.stats.srv_msg_other_errors <= expected_err,
msg=(msg % 'responses'))
def assert_servers(self):
# Nothing to do for nginx in default configuration.
# Implementers of this method should take into account the deficiency
# of wrk described above.
pass
def servers_get_stats(self):
control.servers_get_stats(self.servers)
def generic_start_test(self, tempesta_defconfig):
# Set defconfig for Tempesta.
self.tempesta.config.set_defconfig(tempesta_defconfig)
self.configure_tempesta()
control.servers_start(self.servers)
self.tempesta.start()
def generic_asserts_test(self):
self.show_performance()
# Tempesta statistics is valuable to client assertions.
self.tempesta.get_stats()
self.assert_clients()
self.assert_tempesta()
self.assert_servers()
def generic_test_routine(self, tempesta_defconfig):
""" Make necessary updates to configs of servers, create tempesta config
and run the routine in you `test_*()` function.
"""
self.generic_start_test(tempesta_defconfig)
control.clients_run_parallel(self.clients)
self.generic_asserts_test()
if __name__ == '__main__':
unittest.main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
Jawbone/UPPlatform_Python_SDK
|
refs/heads/master
|
upapi/scopes.py
|
1
|
"""
OAuth2 scope constants for the UP API
Refer to https://jawbone.com/up/developer/authentication for a definition of these scopes.
"""
BASIC_READ = 'basic_read'
EXTENDED_READ = 'extended_read'
LOCATION_READ = 'location_read'
FRIENDS_READ = 'friends_read'
MOOD_READ = 'mood_read'
MOOD_WRITE = 'mood_write'
MOVE_READ = 'move_read'
MOVE_WRITE = 'move_write'
SLEEP_READ = 'sleep_read'
SLEEP_WRITE = 'sleep_write'
MEAL_READ = 'meal_read'
MEAL_WRITE = 'meal_write'
WEIGHT_READ = 'weight_read'
WEIGHT_WRITE = 'weight_write'
GENERIC_EVENT_READ = 'generic_event_read'
GENERIC_EVENT_WRITE = 'generic_event_write'
HEARTRATE_READ = 'heartrate_read'
|
Orav/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_venv.py
|
2
|
"""
Test harness for the venv module.
Copyright (C) 2011-2012 Vinay Sajip.
Licensed to the PSF under a contributor agreement.
"""
import ensurepip
import os
import os.path
import shutil
import struct
import subprocess
import sys
import tempfile
from test.support import (captured_stdout, captured_stderr, run_unittest,
can_symlink, EnvironmentVarGuard)
import textwrap
import unittest
import venv
# pip currently requires ssl support, so we ensure we handle
# it being missing (http://bugs.python.org/issue19744)
try:
import ssl
except ImportError:
ssl = None
skipInVenv = unittest.skipIf(sys.prefix != sys.base_prefix,
'Test not appropriate in a venv')
# os.path.exists('nul') is False: http://bugs.python.org/issue20541
if os.devnull.lower() == 'nul':
failsOnWindows = unittest.expectedFailure
else:
def failsOnWindows(f):
return f
class BaseTest(unittest.TestCase):
"""Base class for venv tests."""
def setUp(self):
self.env_dir = os.path.realpath(tempfile.mkdtemp())
if os.name == 'nt':
self.bindir = 'Scripts'
self.lib = ('Lib',)
self.include = 'Include'
else:
self.bindir = 'bin'
self.lib = ('lib', 'python%s' % sys.version[:3])
self.include = 'include'
if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in os.environ:
executable = os.environ['__PYVENV_LAUNCHER__']
else:
executable = sys.executable
self.exe = os.path.split(executable)[-1]
def tearDown(self):
shutil.rmtree(self.env_dir)
def run_with_capture(self, func, *args, **kwargs):
with captured_stdout() as output:
with captured_stderr() as error:
func(*args, **kwargs)
return output.getvalue(), error.getvalue()
def get_env_file(self, *args):
return os.path.join(self.env_dir, *args)
def get_text_file_contents(self, *args):
with open(self.get_env_file(*args), 'r') as f:
result = f.read()
return result
class BasicTest(BaseTest):
"""Test venv module functionality."""
def isdir(self, *args):
fn = self.get_env_file(*args)
self.assertTrue(os.path.isdir(fn))
def test_defaults(self):
"""
Test the create function with default arguments.
"""
shutil.rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
self.isdir(self.bindir)
self.isdir(self.include)
self.isdir(*self.lib)
# Issue 21197
p = self.get_env_file('lib64')
conditions = ((struct.calcsize('P') == 8) and (os.name == 'posix') and
(sys.platform != 'darwin'))
if conditions:
self.assertTrue(os.path.islink(p))
else:
self.assertFalse(os.path.exists(p))
data = self.get_text_file_contents('pyvenv.cfg')
if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
in os.environ):
executable = os.environ['__PYVENV_LAUNCHER__']
else:
executable = sys.executable
path = os.path.dirname(executable)
self.assertIn('home = %s' % path, data)
fn = self.get_env_file(self.bindir, self.exe)
if not os.path.exists(fn): # diagnostics for Windows buildbot failures
bd = self.get_env_file(self.bindir)
print('Contents of %r:' % bd)
print(' %r' % os.listdir(bd))
self.assertTrue(os.path.exists(fn), 'File %r should exist.' % fn)
@skipInVenv
def test_prefixes(self):
"""
Test that the prefix values are as expected.
"""
#check our prefixes
self.assertEqual(sys.base_prefix, sys.prefix)
self.assertEqual(sys.base_exec_prefix, sys.exec_prefix)
# check a venv's prefixes
shutil.rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
envpy = os.path.join(self.env_dir, self.bindir, self.exe)
cmd = [envpy, '-c', None]
for prefix, expected in (
('prefix', self.env_dir),
('prefix', self.env_dir),
('base_prefix', sys.prefix),
('base_exec_prefix', sys.exec_prefix)):
cmd[2] = 'import sys; print(sys.%s)' % prefix
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
self.assertEqual(out.strip(), expected.encode())
if sys.platform == 'win32':
ENV_SUBDIRS = (
('Scripts',),
('Include',),
('Lib',),
('Lib', 'site-packages'),
)
else:
ENV_SUBDIRS = (
('bin',),
('include',),
('lib',),
('lib', 'python%d.%d' % sys.version_info[:2]),
('lib', 'python%d.%d' % sys.version_info[:2], 'site-packages'),
)
def create_contents(self, paths, filename):
"""
Create some files in the environment which are unrelated
to the virtual environment.
"""
for subdirs in paths:
d = os.path.join(self.env_dir, *subdirs)
os.mkdir(d)
fn = os.path.join(d, filename)
with open(fn, 'wb') as f:
f.write(b'Still here?')
def test_overwrite_existing(self):
"""
Test creating environment in an existing directory.
"""
self.create_contents(self.ENV_SUBDIRS, 'foo')
venv.create(self.env_dir)
for subdirs in self.ENV_SUBDIRS:
fn = os.path.join(self.env_dir, *(subdirs + ('foo',)))
self.assertTrue(os.path.exists(fn))
with open(fn, 'rb') as f:
self.assertEqual(f.read(), b'Still here?')
builder = venv.EnvBuilder(clear=True)
builder.create(self.env_dir)
for subdirs in self.ENV_SUBDIRS:
fn = os.path.join(self.env_dir, *(subdirs + ('foo',)))
self.assertFalse(os.path.exists(fn))
def clear_directory(self, path):
for fn in os.listdir(path):
fn = os.path.join(path, fn)
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
def test_unoverwritable_fails(self):
#create a file clashing with directories in the env dir
for paths in self.ENV_SUBDIRS[:3]:
fn = os.path.join(self.env_dir, *paths)
with open(fn, 'wb') as f:
f.write(b'')
self.assertRaises((ValueError, OSError), venv.create, self.env_dir)
self.clear_directory(self.env_dir)
def test_upgrade(self):
"""
Test upgrading an existing environment directory.
"""
# See Issue #21643: the loop needs to run twice to ensure
# that everything works on the upgrade (the first run just creates
# the venv).
for upgrade in (False, True):
builder = venv.EnvBuilder(upgrade=upgrade)
self.run_with_capture(builder.create, self.env_dir)
self.isdir(self.bindir)
self.isdir(self.include)
self.isdir(*self.lib)
fn = self.get_env_file(self.bindir, self.exe)
if not os.path.exists(fn):
# diagnostics for Windows buildbot failures
bd = self.get_env_file(self.bindir)
print('Contents of %r:' % bd)
print(' %r' % os.listdir(bd))
self.assertTrue(os.path.exists(fn), 'File %r should exist.' % fn)
def test_isolation(self):
"""
Test isolation from system site-packages
"""
for ssp, s in ((True, 'true'), (False, 'false')):
builder = venv.EnvBuilder(clear=True, system_site_packages=ssp)
builder.create(self.env_dir)
data = self.get_text_file_contents('pyvenv.cfg')
self.assertIn('include-system-site-packages = %s\n' % s, data)
@unittest.skipUnless(can_symlink(), 'Needs symlinks')
def test_symlinking(self):
"""
Test symlinking works as expected
"""
for usl in (False, True):
builder = venv.EnvBuilder(clear=True, symlinks=usl)
builder.create(self.env_dir)
fn = self.get_env_file(self.bindir, self.exe)
# Don't test when False, because e.g. 'python' is always
# symlinked to 'python3.3' in the env, even when symlinking in
# general isn't wanted.
if usl:
self.assertTrue(os.path.islink(fn))
# If a venv is created from a source build and that venv is used to
# run the test, the pyvenv.cfg in the venv created in the test will
# point to the venv being used to run the test, and we lose the link
# to the source build - so Python can't initialise properly.
@skipInVenv
def test_executable(self):
"""
Test that the sys.executable value is as expected.
"""
shutil.rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
envpy = os.path.join(os.path.realpath(self.env_dir), self.bindir, self.exe)
cmd = [envpy, '-c', 'import sys; print(sys.executable)']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
self.assertEqual(out.strip(), envpy.encode())
@unittest.skipUnless(can_symlink(), 'Needs symlinks')
def test_executable_symlinks(self):
"""
Test that the sys.executable value is as expected.
"""
shutil.rmtree(self.env_dir)
builder = venv.EnvBuilder(clear=True, symlinks=True)
builder.create(self.env_dir)
envpy = os.path.join(os.path.realpath(self.env_dir), self.bindir, self.exe)
cmd = [envpy, '-c', 'import sys; print(sys.executable)']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
self.assertEqual(out.strip(), envpy.encode())
@skipInVenv
class EnsurePipTest(BaseTest):
"""Test venv module installation of pip."""
def assert_pip_not_installed(self):
envpy = os.path.join(os.path.realpath(self.env_dir),
self.bindir, self.exe)
try_import = 'try:\n import pip\nexcept ImportError:\n print("OK")'
cmd = [envpy, '-c', try_import]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
# We force everything to text, so unittest gives the detailed diff
# if we get unexpected results
err = err.decode("latin-1") # Force to text, prevent decoding errors
self.assertEqual(err, "")
out = out.decode("latin-1") # Force to text, prevent decoding errors
self.assertEqual(out.strip(), "OK")
def test_no_pip_by_default(self):
shutil.rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
self.assert_pip_not_installed()
def test_explicit_no_pip(self):
shutil.rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir, with_pip=False)
self.assert_pip_not_installed()
@failsOnWindows
def test_devnull_exists_and_is_empty(self):
# Fix for issue #20053 uses os.devnull to force a config file to
# appear empty. However http://bugs.python.org/issue20541 means
# that doesn't currently work properly on Windows. Once that is
# fixed, the "win_location" part of test_with_pip should be restored
self.assertTrue(os.path.exists(os.devnull))
with open(os.devnull, "rb") as f:
self.assertEqual(f.read(), b"")
# Requesting pip fails without SSL (http://bugs.python.org/issue19744)
@unittest.skipIf(ssl is None, ensurepip._MISSING_SSL_MESSAGE)
def test_with_pip(self):
shutil.rmtree(self.env_dir)
with EnvironmentVarGuard() as envvars:
# pip's cross-version compatibility may trigger deprecation
# warnings in current versions of Python. Ensure related
# environment settings don't cause venv to fail.
envvars["PYTHONWARNINGS"] = "e"
# ensurepip is different enough from a normal pip invocation
# that we want to ensure it ignores the normal pip environment
# variable settings. We set PIP_NO_INSTALL here specifically
# to check that ensurepip (and hence venv) ignores it.
# See http://bugs.python.org/issue19734
envvars["PIP_NO_INSTALL"] = "1"
# Also check that we ignore the pip configuration file
# See http://bugs.python.org/issue20053
with tempfile.TemporaryDirectory() as home_dir:
envvars["HOME"] = home_dir
bad_config = "[global]\nno-install=1"
# Write to both config file names on all platforms to reduce
# cross-platform variation in test code behaviour
win_location = ("pip", "pip.ini")
posix_location = (".pip", "pip.conf")
# Skips win_location due to http://bugs.python.org/issue20541
for dirname, fname in (posix_location,):
dirpath = os.path.join(home_dir, dirname)
os.mkdir(dirpath)
fpath = os.path.join(dirpath, fname)
with open(fpath, 'w') as f:
f.write(bad_config)
# Actually run the create command with all that unhelpful
# config in place to ensure we ignore it
try:
self.run_with_capture(venv.create, self.env_dir,
with_pip=True)
except subprocess.CalledProcessError as exc:
# The output this produces can be a little hard to read,
# but at least it has all the details
details = exc.output.decode(errors="replace")
msg = "{}\n\n**Subprocess Output**\n{}"
self.fail(msg.format(exc, details))
# Ensure pip is available in the virtual environment
envpy = os.path.join(os.path.realpath(self.env_dir), self.bindir, self.exe)
cmd = [envpy, '-Im', 'pip', '--version']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
# We force everything to text, so unittest gives the detailed diff
# if we get unexpected results
err = err.decode("latin-1") # Force to text, prevent decoding errors
self.assertEqual(err, "")
out = out.decode("latin-1") # Force to text, prevent decoding errors
expected_version = "pip {}".format(ensurepip.version())
self.assertEqual(out[:len(expected_version)], expected_version)
env_dir = os.fsencode(self.env_dir).decode("latin-1")
self.assertIn(env_dir, out)
# http://bugs.python.org/issue19728
# Check the private uninstall command provided for the Windows
# installers works (at least in a virtual environment)
cmd = [envpy, '-Im', 'ensurepip._uninstall']
with EnvironmentVarGuard() as envvars:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
# We force everything to text, so unittest gives the detailed diff
# if we get unexpected results
err = err.decode("latin-1") # Force to text, prevent decoding errors
self.assertEqual(err, "")
# Being fairly specific regarding the expected behaviour for the
# initial bundling phase in Python 3.4. If the output changes in
# future pip versions, this test can likely be relaxed further.
out = out.decode("latin-1") # Force to text, prevent decoding errors
self.assertIn("Successfully uninstalled pip", out)
self.assertIn("Successfully uninstalled setuptools", out)
# Check pip is now gone from the virtual environment
self.assert_pip_not_installed()
def test_main():
run_unittest(BasicTest, EnsurePipTest)
if __name__ == "__main__":
test_main()
|
EiNSTeiN-/deluge-gtk3
|
refs/heads/master
|
deluge/ui/gtkui/status_tab.py
|
1
|
# -*- coding: utf-8 -*-
#
# status_tab.py
#
# Copyright (C) 2008 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
from gi.repository import Gtk
from deluge.ui.client import client
import deluge.component as component
import deluge.common
from deluge.ui.gtkui.torrentdetails import Tab
from deluge.log import LOG as log
def fpeer_sized(first, second):
return "%s (%s)" % (deluge.common.fsize(first), deluge.common.fsize(second))
def fpeer_size_second(first, second):
return "%s (%s)" % (first, deluge.common.fsize(second))
def fratio(value):
if value < 0:
return "∞"
return "%.3f" % value
def fpcnt(value):
return "%.2f%%" % value
def fspeed(value, max_value=-1):
if max_value > -1:
return "%s (%s %s)" % (deluge.common.fspeed(value), max_value, _("KiB/s"))
else:
return deluge.common.fspeed(value)
class StatusTab(Tab):
def __init__(self):
Tab.__init__(self)
# Get the labels we need to update.
# widgetname, modifier function, status keys
glade = component.get("MainWindow").main_glade
self._name = "Status"
self._child_widget = glade.get_object("status_tab")
self._tab_label = glade.get_object("status_tab_label")
self.label_widgets = [
(glade.get_object("summary_pieces"), fpeer_size_second, ("num_pieces", "piece_length")),
(glade.get_object("summary_availability"), fratio, ("distributed_copies",)),
(glade.get_object("summary_total_downloaded"), fpeer_sized, ("all_time_download", "total_payload_download")),
(glade.get_object("summary_total_uploaded"), fpeer_sized, ("total_uploaded", "total_payload_upload")),
(glade.get_object("summary_download_speed"), fspeed, ("download_payload_rate", "max_download_speed")),
(glade.get_object("summary_upload_speed"), fspeed, ("upload_payload_rate", "max_upload_speed")),
(glade.get_object("summary_seeders"), deluge.common.fpeer, ("num_seeds", "total_seeds")),
(glade.get_object("summary_peers"), deluge.common.fpeer, ("num_peers", "total_peers")),
(glade.get_object("summary_eta"), deluge.common.ftime, ("eta",)),
(glade.get_object("summary_share_ratio"), fratio, ("ratio",)),
(glade.get_object("summary_tracker_status"), None, ("tracker_status",)),
(glade.get_object("summary_next_announce"), deluge.common.ftime, ("next_announce",)),
(glade.get_object("summary_active_time"), deluge.common.ftime, ("active_time",)),
(glade.get_object("summary_seed_time"), deluge.common.ftime, ("seeding_time",)),
(glade.get_object("summary_seed_rank"), str, ("seed_rank",)),
(glade.get_object("summary_auto_managed"), str, ("is_auto_managed",)),
(glade.get_object("progressbar"), fpcnt, ("progress",)),
(glade.get_object("summary_date_added"), deluge.common.fdate, ("time_added",))
]
def update(self):
# Get the first selected torrent
selected = component.get("TorrentView").get_selected_torrents()
# Only use the first torrent in the list or return if None selected
if len(selected) != 0:
selected = selected[0]
else:
# No torrent is selected in the torrentview
return
# Get the torrent status
status_keys = ["progress", "num_pieces", "piece_length",
"distributed_copies", "all_time_download", "total_payload_download",
"total_uploaded", "total_payload_upload", "download_payload_rate",
"upload_payload_rate", "num_peers", "num_seeds", "total_peers",
"total_seeds", "eta", "ratio", "next_announce",
"tracker_status", "max_connections", "max_upload_slots",
"max_upload_speed", "max_download_speed", "active_time",
"seeding_time", "seed_rank", "is_auto_managed", "time_added"]
component.get("SessionProxy").get_torrent_status(
selected, status_keys).addCallback(self._on_get_torrent_status)
def _on_get_torrent_status(self, status):
# Check to see if we got valid data from the core
if status is None:
return
if status["is_auto_managed"]:
status["is_auto_managed"]=_("On")
else:
status["is_auto_managed"]=_("Off")
# Update all the label widgets
for widget in self.label_widgets:
if widget[1] != None:
args = []
try:
for key in widget[2]:
args.append(status[key])
except Exception, e:
log.debug("Unable to get status value: %s", e)
continue
txt = widget[1](*args)
else:
txt = status[widget[2][0]]
if widget[0].get_text() != txt:
widget[0].set_text(txt)
# Do the progress bar because it's a special case (not a label)
w = component.get("MainWindow").main_glade.get_object("progressbar")
fraction = status["progress"] / 100
if w.get_fraction() != fraction:
w.set_fraction(fraction)
def clear(self):
for widget in self.label_widgets:
widget[0].set_text("")
component.get("MainWindow").main_glade.get_object("progressbar").set_fraction(0.0)
|
daspecster/google-cloud-python
|
refs/heads/master
|
vision/unit_tests/__init__.py
|
216
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
jtopjian/st2
|
refs/heads/master
|
st2common/st2common/hooks.py
|
2
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import traceback
import webob
from oslo.config import cfg
from pecan.hooks import PecanHook
from six.moves.urllib import parse as urlparse
from webob import exc
from st2common import log as logging
from st2common.exceptions import auth as exceptions
from st2common.util.jsonify import json_encode
from st2common.util.auth import validate_token
from st2common.constants.auth import HEADER_ATTRIBUTE_NAME
from st2common.constants.auth import QUERY_PARAM_ATTRIBUTE_NAME
LOG = logging.getLogger(__name__)
# A list of method names for which we don't want to log the result / response
RESPONSE_LOGGING_METHOD_NAME_BLACKLIST = [
'get_all'
]
# A list of controller classes for which we don't want to log the result / response
RESPONSE_LOGGING_CONTROLLER_NAME_BLACKLIST = [
'ActionExecutionChildrenController', # action executions can be big
'ActionExecutionAttributeController', # result can be big
'ActionExecutionsController' # action executions can be big
]
class CorsHook(PecanHook):
def after(self, state):
headers = state.response.headers
origin = state.request.headers.get('Origin')
origins = set(cfg.CONF.api.allow_origin)
# Build a list of the default allowed origins
public_api_url = cfg.CONF.auth.api_url
# Default gulp development server WebUI URL
origins.add('http://localhost:3000')
# By default WebUI simple http server listens on 8080
origins.add('http://localhost:8080')
origins.add('http://127.0.0.1:8080')
if public_api_url:
# Public API URL
origins.add(public_api_url)
if origin:
if '*' in origins:
origin_allowed = '*'
else:
# See http://www.w3.org/TR/cors/#access-control-allow-origin-response-header
origin_allowed = origin if origin in origins else 'null'
else:
origin_allowed = list(origins)[0]
methods_allowed = ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS']
request_headers_allowed = ['Content-Type', 'Authorization', 'X-Auth-Token']
response_headers_allowed = ['Content-Type', 'X-Limit', 'X-Total-Count']
headers['Access-Control-Allow-Origin'] = origin_allowed
headers['Access-Control-Allow-Methods'] = ','.join(methods_allowed)
headers['Access-Control-Allow-Headers'] = ','.join(request_headers_allowed)
headers['Access-Control-Expose-Headers'] = ','.join(response_headers_allowed)
if not headers.get('Content-Length') \
and not headers.get('Content-type', '').startswith('text/event-stream'):
headers['Content-Length'] = str(len(state.response.body))
def on_error(self, state, e):
if state.request.method == 'OPTIONS':
return webob.Response()
class AuthHook(PecanHook):
def before(self, state):
# OPTIONS requests doesn't need to be authenticated
if state.request.method == 'OPTIONS':
return
state.request.context['token'] = self._validate_token(request=state.request)
if QUERY_PARAM_ATTRIBUTE_NAME in state.arguments.keywords:
del state.arguments.keywords[QUERY_PARAM_ATTRIBUTE_NAME]
def on_error(self, state, e):
if isinstance(e, exceptions.TokenNotProvidedError):
LOG.exception('Token is not provided.')
return self._abort_unauthorized()
if isinstance(e, exceptions.TokenNotFoundError):
LOG.exception('Token is not found.')
return self._abort_unauthorized()
if isinstance(e, exceptions.TokenExpiredError):
LOG.exception('Token has expired.')
return self._abort_unauthorized()
@staticmethod
def _abort_unauthorized():
return webob.Response(json_encode({
'faultstring': 'Unauthorized'
}), status=401)
@staticmethod
def _abort_other_errors():
return webob.Response(json_encode({
'faultstring': 'Internal Server Error'
}), status=500)
@staticmethod
def _validate_token(request):
"""
Validate token provided either in headers or query parameters.
"""
headers = request.headers
query_string = request.query_string
query_params = dict(urlparse.parse_qsl(query_string))
token_in_headers = headers.get(HEADER_ATTRIBUTE_NAME, None)
token_in_query_params = query_params.get(QUERY_PARAM_ATTRIBUTE_NAME, None)
return validate_token(token_in_headers=token_in_headers,
token_in_query_params=token_in_query_params)
class JSONErrorResponseHook(PecanHook):
"""
Handle all the errors and respond with JSON.
"""
def on_error(self, state, e):
error_msg = getattr(e, 'comment', str(e))
LOG.debug('API call failed: %s', error_msg)
LOG.debug(traceback.format_exc())
if hasattr(e, 'body') and isinstance(e.body, dict):
body = e.body
else:
body = {}
if isinstance(e, exc.HTTPException):
status_code = state.response.status
message = str(e)
else:
status_code = httplib.INTERNAL_SERVER_ERROR
message = 'Internal Server Error'
body['faultstring'] = message
response_body = json_encode(body)
headers = state.response.headers or {}
headers['Content-Type'] = 'application/json'
headers['Content-Length'] = str(len(response_body))
return webob.Response(response_body, status=status_code, headers=headers)
class LoggingHook(PecanHook):
"""
Logs all incoming requests and outgoing responses
"""
def before(self, state):
# Note: We use getattr since in some places (tests) request is mocked
method = getattr(state.request, 'method', None)
path = getattr(state.request, 'path', None)
remote_addr = getattr(state.request, 'remote_addr', None)
# Log the incoming request
values = {'method': method, 'path': path, 'remote_addr': remote_addr}
values['filters'] = state.arguments.keywords
LOG.info('%(method)s %(path)s with filters=%(filters)s' % values, extra=values)
def after(self, state):
# Note: We use getattr since in some places (tests) request is mocked
method = getattr(state.request, 'method', None)
path = getattr(state.request, 'path', None)
remote_addr = getattr(state.request, 'remote_addr', None)
# Log the outgoing response
values = {'method': method, 'path': path, 'remote_addr': remote_addr}
values['status_code'] = state.response.status
if hasattr(state.controller, 'im_self'):
function_name = state.controller.im_func.__name__
controller_name = state.controller.im_class.__name__
log_result = True
log_result &= function_name not in RESPONSE_LOGGING_METHOD_NAME_BLACKLIST
log_result &= controller_name not in RESPONSE_LOGGING_CONTROLLER_NAME_BLACKLIST
else:
log_result = False
if log_result:
values['result'] = state.response.body
log_msg = '%(method)s %(path)s result=%(result)s' % values
else:
# Note: We don't want to include a result for some
# methods which have a large result
log_msg = '%(method)s %(path)s' % values
LOG.info(log_msg, extra=values)
|
prculley/gramps
|
refs/heads/master
|
gramps/plugins/lib/libcairodoc.py
|
4
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Zsolt Foldvari
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Brian Matherly
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011-2017 Paul Franklin
# Copyright (C) 2012 Craig Anderson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Report output generator based on Cairo.
"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from math import radians
import re
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.plug.docgen import (BaseDoc, TextDoc, DrawDoc, ParagraphStyle,
TableCellStyle, SOLID, FONT_SANS_SERIF, FONT_SERIF,
FONT_MONOSPACE, PARA_ALIGN_CENTER, PARA_ALIGN_LEFT)
from gramps.gen.plug.report import utils
from gramps.gen.errors import PluginError
from gramps.gen.plug.docbackend import CairoBackend
from gramps.gen.utils.image import resize_to_buffer
from gramps.gui.utils import SystemFonts
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".libcairodoc")
#-------------------------------------------------------------------------
#
# Pango modules
#
#-------------------------------------------------------------------------
from gi.repository import Pango, PangoCairo
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
# each element draws some extra information useful for debugging
DEBUG = False
#------------------------------------------------------------------------
#
# Font selection
#
#------------------------------------------------------------------------
_TTF_FREEFONT = {
FONT_SERIF: 'FreeSerif',
FONT_SANS_SERIF: 'FreeSans',
FONT_MONOSPACE: 'FreeMono',
}
_MS_TTFONT = {
FONT_SERIF: 'Times New Roman',
FONT_SANS_SERIF: 'Arial',
FONT_MONOSPACE: 'Courier New',
}
_GNOME_FONT = {
FONT_SERIF: 'Serif',
FONT_SANS_SERIF: 'Sans',
FONT_MONOSPACE: 'Monospace',
}
font_families = _GNOME_FONT
# FIXME debug logging does not work here.
def set_font_families():
"""Set the used font families depending on availability.
"""
global font_families
fonts = SystemFonts()
family_names = fonts.get_system_fonts()
fam = [f for f in _TTF_FREEFONT.values() if f in family_names]
if len(fam) == len(_TTF_FREEFONT):
font_families = _TTF_FREEFONT
log.debug('Using FreeFonts: %s' % font_families)
return
fam = [f for f in _MS_TTFONT.values() if f in family_names]
if len(fam) == len(_MS_TTFONT):
font_families = _MS_TTFONT
log.debug('Using MS TrueType fonts: %s' % font_families)
return
fam = [f for f in _GNOME_FONT.values() if f in family_names]
if len(fam) == len(_GNOME_FONT):
font_families = _GNOME_FONT
log.debug('Using Gnome fonts: %s' % font_families)
return
log.debug('No fonts found.')
set_font_families()
#------------------------------------------------------------------------
#
# Converter functions
#
#------------------------------------------------------------------------
def fontstyle_to_fontdescription(font_style):
"""Convert a FontStyle instance to a Pango.FontDescription one.
Font color and underline are not implemented in Pango.FontDescription,
and have to be set with Pango.Layout.set_attributes(attrlist) method.
"""
if font_style.get_bold():
f_weight = Pango.Weight.BOLD
else:
f_weight = Pango.Weight.NORMAL
if font_style.get_italic():
f_style = Pango.Style.ITALIC
else:
f_style = Pango.Style.NORMAL
font_description = Pango.FontDescription(font_families[font_style.face])
font_description.set_size(int(round(font_style.get_size() * Pango.SCALE)))
font_description.set_weight(f_weight)
font_description.set_style(f_style)
return font_description
def tabstops_to_tabarray(tab_stops, dpi):
"""Convert a list of tabs given in cm to a Pango.TabArray.
"""
tab_array = Pango.TabArray.new(initial_size=len(tab_stops),
positions_in_pixels=False)
for index in range(len(tab_stops)):
location = tab_stops[index] * dpi * Pango.SCALE / 2.54
tab_array.set_tab(index, Pango.TabAlign.LEFT, int(location))
return tab_array
def raw_length(s):
"""
Return the length of the raw string after all pango markup has been removed.
"""
s = re.sub('<.*?>', '', s)
s = s.replace('&', '&')
s = s.replace('<', '<')
s = s.replace('>', '>')
s = s.replace('"', '"')
s = s.replace(''', "'")
return len(s)
###------------------------------------------------------------------------
###
### Table row style
###
###------------------------------------------------------------------------
##class RowStyle(list):
##"""Specifies the format of a table row.
##RowStyle extents the available styles in
##The RowStyle contains the width of each column as a percentage of the
##width of the full row. Note! The width of the row is not known until
##divide() or draw() method is called.
##"""
##def __init__(self):
##self.columns = []
##def set_columns(self, columns):
##"""Set the number of columns.
##@param columns: number of columns that should be used.
##@param type: int
##"""
##self.columns = columns
##def get_columns(self):
##"""Return the number of columns.
##"""
##return self.columns
##def set_column_widths(self, clist):
##"""Set the width of all the columns at once.
##@param clist: list of width of columns in % of the full row.
##@param tyle: list
##"""
##self.columns = len(clist)
##for i in range(self.columns):
##self.colwid[i] = clist[i]
##def set_column_width(self, index, width):
##"""
##Set the width of a specified column to the specified width.
##@param index: column being set (index starts at 0)
##@param width: percentage of the table width assigned to the column
##"""
##self.colwid[index] = width
##def get_column_width(self, index):
##"""
##Return the column width of the specified column as a percentage of
##the entire table width.
##@param index: column to return (index starts at 0)
##"""
##return self.colwid[index]
class FrameStyle:
"""Define the style properties of a Frame.
- width: Width of the frame in cm.
- height: Height of the frame in cm.
- align: Horizontal position to entire page.
Available values: 'left','center', 'right'.
- spacing: Tuple of spacing around the frame in cm. Order of values:
(left, right, top, bottom).
"""
def __init__(self, width=0, height=0, align='left', spacing=(0, 0, 0, 0)):
self.width = width
self.height = height
self.align = align
self.spacing = spacing
#------------------------------------------------------------------------
#
# Document element classes
#
#------------------------------------------------------------------------
class GtkDocBaseElement:
"""Base of all document elements.
Support document element structuring and can render itself onto
a Cairo surface.
There are two categories of methods:
1. hierarchy building methods (add_child, get_children, set_parent,
get_parent);
2. rendering methods (divide, draw).
The hierarchy building methods generally don't have to be overridden in
the subclass, while the rendering methods (divide, draw) must be
implemented in the subclasses.
"""
_type = 'BASE'
_allowed_children = []
def __init__(self, style=None):
self._parent = None
self._children = []
self._style = style
def get_type(self):
"""Get the type of this element.
"""
return self._type
def set_parent(self, parent):
"""Set the parent element of this element.
"""
self._parent = parent
def get_parent(self):
"""Get the parent element of this element.
"""
return self._parent
def add_child(self, element):
"""Add a child element.
Returns False if the child cannot be added (e.g. not an allowed type),
or True otherwise.
"""
# check if it is an allowed child for this type
if element.get_type() not in self._allowed_children:
log.debug("%r is not an allowed child for %r" %
(element.__class__, self.__class__))
return False
# append the child and set its parent
self._children.append(element)
element.set_parent(self)
return True
def get_children(self):
"""Get the list of children of this element.
"""
return self._children
def get_marks(self):
"""Get the list of index marks for this element.
"""
marks = []
for child in self._children:
marks.extend(child.get_marks())
return marks
def divide(self, layout, width, height, dpi_x, dpi_y):
"""Divide the element into two depending on available space.
@param layout: pango layout to write on
@param type: Pango.Layout
@param width: width of available space for this element
@param type: device points
@param height: height of available space for this element
@param type: device points
@param dpi_x: the horizontal resolution
@param type: dots per inch
@param dpi_y: the vertical resolution
@param type: dots per inch
@return: the divided element, and the height of the first part
@rtype: (GtkDocXXX-1, GtkDocXXX-2), device points
"""
raise NotImplementedError
def draw(self, cairo_context, pango_layout, width, dpi_x, dpi_y):
"""Draw itself onto a cairo surface.
@param cairo_context: context to draw on
@param type: cairo.Context class
@param pango_layout: pango layout to write on
@param type: Pango.Layout class
@param width: width of available space for this element
@param type: device points
@param dpi_x: the horizontal resolution
@param type: dots per inch
@param dpi_y: the vertical resolution
@param type: dots per inch
@return: height of the element
@rtype: device points
"""
raise NotImplementedError
class GtkDocDocument(GtkDocBaseElement):
"""The whole document or a page.
"""
_type = 'DOCUMENT'
_allowed_children = ['PARAGRAPH', 'PAGEBREAK', 'TABLE', 'IMAGE', 'FRAME',
'TOC', 'INDEX']
def draw(self, cairo_context, pango_layout, width, dpi_x, dpi_y):
x = y = elem_height = 0
for elem in self._children:
cairo_context.translate(x, elem_height)
elem_height = elem.draw(cairo_context, pango_layout,
width, dpi_x, dpi_y)
y += elem_height
return y
def has_toc(self):
for elem in self._children:
if elem.get_type() == 'TOC':
return True
return False
def has_index(self):
for elem in self._children:
if elem.get_type() == 'INDEX':
return True
return False
class GtkDocPagebreak(GtkDocBaseElement):
"""Implement a page break.
"""
_type = 'PAGEBREAK'
_allowed_children = []
def divide(self, layout, width, height, dpi_x, dpi_y):
return (None, None), 0
class GtkDocTableOfContents(GtkDocBaseElement):
"""Implement a table of contents.
"""
_type = 'TOC'
_allowed_children = []
def divide(self, layout, width, height, dpi_x, dpi_y):
return (self, None), 0
def draw(self, cr, layout, width, dpi_x, dpi_y):
return 0
class GtkDocAlphabeticalIndex(GtkDocBaseElement):
"""Implement an alphabetical index.
"""
_type = 'INDEX'
_allowed_children = []
def divide(self, layout, width, height, dpi_x, dpi_y):
return (self, None), 0
def draw(self, cr, layout, width, dpi_x, dpi_y):
return 0
class GtkDocParagraph(GtkDocBaseElement):
"""Paragraph.
"""
_type = 'PARAGRAPH'
_allowed_children = []
# line spacing is not defined in ParagraphStyle
spacingfractionfont = 0.2
def __init__(self, style, leader=None):
GtkDocBaseElement.__init__(self, style)
if leader:
self._text = leader + '\t'
# FIXME append new tab to the existing tab list
self._style.set_tabs([-1 * self._style.get_first_indent()])
else:
self._text = ''
self._plaintext = None
self._attrlist = None
self._marklist = []
def add_text(self, text):
if self._plaintext is not None:
raise PluginError('CairoDoc: text is already parsed.'
' You cannot add text anymore')
self._text = self._text + text
def add_mark(self, mark):
"""
Add an index mark to this paragraph
"""
self._marklist.append((mark, raw_length(self._text)))
def get_marks(self):
"""
Return a list of index marks for this paragraph
"""
return [elem[0] for elem in self._marklist]
def __set_marklist(self, marklist):
"""
Internal method to allow for splitting of paragraphs
"""
self._marklist = marklist
def __set_plaintext(self, plaintext):
"""
Internal method to allow for splitting of paragraphs
"""
if not isinstance(plaintext, str):
self._plaintext = plaintext.decode('utf-8')
else:
self._plaintext = plaintext
def __set_attrlist(self, attrlist):
"""
Internal method to allow for splitting of paragraphs
"""
self._attrlist = attrlist
def __parse_text(self):
"""
Parse the markup text. This method will only do this if not
done already
"""
if self._plaintext is None:
parse_ok, self._attrlist, self._plaintext, accel_char= \
Pango.parse_markup(self._text, -1, '\000')
def divide(self, layout, width, height, dpi_x, dpi_y):
self.__parse_text()
l_margin = self._style.get_left_margin() * dpi_x / 2.54
r_margin = self._style.get_right_margin() * dpi_x / 2.54
t_margin = self._style.get_top_margin() * dpi_y / 2.54
b_margin = self._style.get_bottom_margin() * dpi_y / 2.54
h_padding = self._style.get_padding() * dpi_x / 2.54
v_padding = self._style.get_padding() * dpi_y / 2.54
f_indent = self._style.get_first_indent() * dpi_x / 2.54
# calculate real width available for text
text_width = width - l_margin - 2 * h_padding - r_margin
if f_indent < 0:
text_width -= f_indent
layout.set_width(int(text_width * Pango.SCALE))
# set paragraph properties
layout.set_wrap(Pango.WrapMode.WORD_CHAR)
layout.set_indent(int(f_indent * Pango.SCALE))
layout.set_tabs(tabstops_to_tabarray(self._style.get_tabs(), dpi_x))
#
align = self._style.get_alignment_text()
if align == 'left':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.LEFT)
elif align == 'right':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.RIGHT)
elif align == 'center':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.CENTER)
elif align == 'justify':
#We have a problem, in pango, justify works only on full lines,
# and we need an alignment for the partial lines. We don't know
# for justify what alignment the user wants however. We assume
# here LEFT ...
layout.set_alignment(Pango.Alignment.LEFT)
layout.set_justify(True)
else:
raise ValueError
#
font_style = self._style.get_font()
layout.set_font_description(fontstyle_to_fontdescription(font_style))
#set line spacing based on font:
spacing = font_style.get_size() * self.spacingfractionfont
layout.set_spacing(int(round(spacing * Pango.SCALE)))
text_height = height - t_margin - 2 * v_padding
# calculate where to cut the paragraph
layout.set_text(self._plaintext, -1)
layout.set_attributes(self._attrlist)
layout_width, layout_height = layout.get_pixel_size()
line_count = layout.get_line_count()
spacing = layout.get_spacing() / Pango.SCALE
# if all paragraph fits we don't need to cut
if layout_height - spacing <= text_height:
paragraph_height = layout_height + spacing +t_margin + (2 * v_padding)
if height - paragraph_height > b_margin:
paragraph_height += b_margin
return (self, None), paragraph_height
# we need to cut paragraph:
# 1. if paragraph part of a cell, we do not divide if only small part,
# of paragraph can be shown, instead move to next page
if line_count < 4 and self._parent._type == 'CELL':
return (None, self), 0
lineiter = layout.get_iter()
linenr = 0
linerange = lineiter.get_line_yrange()
# 2. if nothing fits, move to next page without split
# there is a spacing above and under the text
if linerange[1] - linerange[0] + 2.*spacing \
> text_height * Pango.SCALE:
return (None, self), 0
# 3. split the paragraph
startheight = linerange[0]
endheight = linerange[1]
splitline = -1
if lineiter.at_last_line():
#only one line of text that does not fit
return (None, self), 0
while not lineiter.at_last_line():
#go to next line, see if all fits, if not split
lineiter.next_line()
linenr += 1
linerange = lineiter.get_line_yrange()
if linerange[1] - startheight + 2.*spacing \
> text_height * Pango.SCALE:
splitline = linenr
break
endheight = linerange[1]
if splitline == -1:
print('CairoDoc STRANGE ')
return (None, self), 0
#we split at splitline
# get index of first character which doesn't fit on available height
layout_line = layout.get_line(splitline)
index = layout_line.start_index
# and divide the text, first create the second part
new_style = ParagraphStyle(self._style)
new_style.set_top_margin(0)
#we split a paragraph, text should begin in correct position: no indent
#as if the paragraph just continues from normal text
new_style.set_first_indent(0)
new_paragraph = GtkDocParagraph(new_style)
#index is in bytecode in the text..
new_paragraph.__set_plaintext(self._plaintext.encode('utf-8')[index:])
#now recalculate the attrilist:
newattrlist = layout.get_attributes().copy()
newattrlist.filter(self.filterattr, index)
## GTK3 PROBLEM: get_iterator no longer available!!
## REFERENCES:
## http://www.gramps-project.org/bugs/view.php?id=6208
## https://bugzilla.gnome.org/show_bug.cgi?id=646788
## workaround: https://github.com/matasbbb/pitivit/commit/da815339e5ce3631b122a72158ba9ffcc9ee4372
## OLD EASY CODE:
## oldattrlist = newattrlist.get_iterator()
## while oldattrlist.next() :
## vals = oldattrlist.get_attrs()
## #print (vals)
## for attr in vals:
## newattr = attr.copy()
## newattr.start_index -= index if newattr.start_index > index \
## else 0
## newattr.end_index -= index
## newattrlist.insert(newattr)
## ## START OF WORKAROUND
oldtext = self._text
pos = 0
realpos = 0
markstarts = []
#index is in bytecode in the text.. !!
while pos < index:
if realpos >= len(oldtext):
break
char = oldtext[realpos]
if char == '<' and oldtext[realpos+1] != '/':
# a markup starts
end = realpos + oldtext[realpos:].find('>') + 1
markstarts += [oldtext[realpos:end]]
realpos = end
elif char == '<':
#this is the closing tag, we did not stop yet, so remove tag!
realpos = realpos + oldtext[realpos:].find('>') + 1
markstarts.pop()
else:
pos += len(char.encode('utf-8'))
realpos += 1
#now construct the marked up text to use
newtext = ''.join(markstarts)
newtext += oldtext[realpos:]
#have it parsed
parse_ok, newattrlist, _plaintext, accel_char= \
Pango.parse_markup(newtext, -1, '\000')
## ##END OF WORKAROUND
new_paragraph.__set_attrlist(newattrlist)
# then update the first one
self.__set_plaintext(self._plaintext.encode('utf-8')[:index])
self._style.set_bottom_margin(0)
# split the list of index marks
para1 = []
para2 = []
for mark, position in self._marklist:
if position < index:
para1.append((mark, position))
else:
para2.append((mark, position - index))
self.__set_marklist(para1)
new_paragraph.__set_marklist(para2)
paragraph_height = endheight - startheight + spacing + t_margin + 2 * v_padding
return (self, new_paragraph), paragraph_height
def filterattr(self, attr, index):
"""callback to filter out attributes in the removed piece at beginning
"""
if attr.start_index > index or \
(attr.start_index < index and attr.end_index > index):
return False
return True
def draw(self, cr, layout, width, dpi_x, dpi_y):
self.__parse_text()
l_margin = self._style.get_left_margin() * dpi_x / 2.54
r_margin = self._style.get_right_margin() * dpi_x / 2.54
t_margin = self._style.get_top_margin() * dpi_y / 2.54
b_margin = self._style.get_bottom_margin() * dpi_y / 2.54
h_padding = self._style.get_padding() * dpi_x / 2.54
v_padding = self._style.get_padding() * dpi_y / 2.54
f_indent = self._style.get_first_indent() * dpi_x / 2.54
# calculate real width available for text
text_width = width - l_margin - 2 * h_padding - r_margin
if f_indent < 0:
text_width -= f_indent
layout.set_width(int(text_width * Pango.SCALE))
# set paragraph properties
layout.set_wrap(Pango.WrapMode.WORD_CHAR)
layout.set_indent(int(f_indent * Pango.SCALE))
layout.set_tabs(tabstops_to_tabarray(self._style.get_tabs(), dpi_x))
#
align = self._style.get_alignment_text()
if align == 'left':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.LEFT)
elif align == 'right':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.RIGHT)
elif align == 'center':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.CENTER)
elif align == 'justify':
#We have a problem, in pango, justify works only on full lines,
# and we need an alignment for the partial lines. We don't know
# for justify what alignment the user wants however. We assume
# here LEFT ...
layout.set_alignment(Pango.Alignment.LEFT)
layout.set_justify(True)
#
font_style = self._style.get_font()
layout.set_font_description(fontstyle_to_fontdescription(font_style))
#set line spacing based on font:
spacing = font_style.get_size() * self.spacingfractionfont
layout.set_spacing(int(round(spacing * Pango.SCALE)))
# layout the text
layout.set_text(self._plaintext, -1)
layout.set_attributes(self._attrlist)
layout_width, layout_height = layout.get_pixel_size()
# render the layout onto the cairo surface
x = l_margin + h_padding
if f_indent < 0:
x += f_indent
# 3/4 of the spacing is added above the text, 1/4 is added below
cr.move_to(x, t_margin + v_padding + spacing * 0.75)
cr.set_source_rgb(*utils.rgb_color(font_style.get_color()))
PangoCairo.show_layout(cr, layout)
# calculate the full paragraph height
height = layout_height + spacing + t_margin + 2*v_padding + b_margin
# draw the borders
if self._style.get_top_border():
cr.move_to(l_margin, t_margin)
cr.rel_line_to(width - l_margin - r_margin, 0)
if self._style.get_right_border():
cr.move_to(width - r_margin, t_margin)
cr.rel_line_to(0, height - t_margin - b_margin)
if self._style.get_bottom_border():
cr.move_to(l_margin, height - b_margin)
cr.rel_line_to(width - l_margin - r_margin, 0)
if self._style.get_left_border():
cr.move_to(l_margin, t_margin)
cr.line_to(0, height - t_margin - b_margin)
cr.set_line_width(1)
cr.set_source_rgb(0, 0, 0)
cr.stroke()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(1.0, 0, 0)
cr.rectangle(0, 0, width, height)
cr.stroke()
cr.set_source_rgb(0, 0, 1.0)
cr.rectangle(l_margin, t_margin,
width-l_margin-r_margin, height-t_margin-b_margin)
cr.stroke()
return height
class GtkDocTable(GtkDocBaseElement):
"""Implement a table.
"""
_type = 'TABLE'
_allowed_children = ['ROW']
def divide(self, layout, width, height, dpi_x, dpi_y):
#calculate real table width
table_width = width * self._style.get_width() / 100
# calculate the height of each row
table_height = 0
row_index = 0
while row_index < len(self._children):
row = self._children[row_index]
(r1, r2), row_height = row.divide(layout, table_width, height,
dpi_x, dpi_y)
if r2 is not None:
#break the table in two parts
break
table_height += row_height
row_index += 1
height -= row_height
# divide the table if any row did not fit
new_table = None
if row_index < len(self._children):
new_table = GtkDocTable(self._style)
#add the split row
new_table.add_child(r2)
list(map(new_table.add_child, self._children[row_index+1:]))
del self._children[row_index+1:]
return (self, new_table), table_height
def draw(self, cr, layout, width, dpi_x, dpi_y):
#calculate real table width
table_width = width * self._style.get_width() / 100
# TODO is a table always left aligned??
table_height = 0
# draw all the rows
for row in self._children:
cr.save()
cr.translate(0, table_height)
row_height = row.draw(cr, layout, table_width, dpi_x, dpi_y)
cr.restore()
table_height += row_height
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(1.0, 0, 0)
cr.rectangle(0, 0, table_width, table_height)
cr.stroke()
return table_height
class GtkDocTableRow(GtkDocBaseElement):
"""Implement a row in a table.
"""
_type = 'ROW'
_allowed_children = ['CELL']
def divide(self, layout, width, height, dpi_x, dpi_y):
# the highest cell gives the height of the row
cell_heights = []
dividedrow = False
cell_width_iter = self._style.__iter__()
new_row = GtkDocTableRow(self._style)
for cell in self._children:
cell_width = 0
for i in range(cell.get_span()):
cell_width += next(cell_width_iter)
cell_width = cell_width * width / 100
(c1, c2), cell_height = cell.divide(layout, cell_width, height,
dpi_x, dpi_y)
cell_heights.append(cell_height)
if c2 is None:
emptycell = GtkDocTableCell(c1._style, c1.get_span())
new_row.add_child(emptycell)
else:
dividedrow = True
new_row.add_child(c2)
# save height [inch] of the row to be able to draw exact cell border
row_height = max(cell_heights)
self.height = row_height / dpi_y
# return the new row if dividing was needed
if dividedrow:
if row_height == 0:
for cell in self._children:
cell._style.set_top_border(False)
cell._style.set_left_border(False)
cell._style.set_right_border(False)
return (self, new_row), row_height
else:
return (self, None), row_height
def draw(self, cr, layout, width, dpi_x, dpi_y):
cr.save()
# get the height of this row
row_height = self.height * dpi_y
# draw all the cells in the row
cell_width_iter = self._style.__iter__()
for cell in self._children:
cell_width = 0
for i in range(cell.get_span()):
cell_width += next(cell_width_iter)
cell_width = cell_width * width / 100
cell.draw(cr, layout, cell_width, row_height, dpi_x, dpi_y)
cr.translate(cell_width, 0)
cr.restore()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(0, 0, 1.0)
cr.rectangle(0, 0, width, row_height)
cr.stroke()
return row_height
class GtkDocTableCell(GtkDocBaseElement):
"""Implement a cell in a table row.
"""
_type = 'CELL'
_allowed_children = ['PARAGRAPH', 'IMAGE']
def __init__(self, style, span=1):
GtkDocBaseElement.__init__(self, style)
self._span = span
def get_span(self):
return self._span
def divide(self, layout, width, height, dpi_x, dpi_y):
h_padding = self._style.get_padding() * dpi_x / 2.54
v_padding = self._style.get_padding() * dpi_y / 2.54
# calculate real available width
width -= 2 * h_padding
available_height = height
# calculate height of each child
cell_height = 0
new_cell = None
e2 = None
childnr = 0
for child in self._children:
if new_cell is None:
(e1, e2), child_height = child.divide(layout, width,
available_height, dpi_x, dpi_y)
cell_height += child_height
available_height -= child_height
if e2 is not None:
#divide the cell
new_style = TableCellStyle(self._style)
if e1 is not None:
new_style.set_top_border(False)
new_cell = GtkDocTableCell(new_style, self._span)
new_cell.add_child(e2)
# then update this cell
self._style.set_bottom_border(False)
if e1 is not None:
childnr += 1
else:
#cell has been divided
new_cell.add_child(child)
self._children = self._children[:childnr]
# calculate real height
if cell_height != 0:
cell_height += 2 * v_padding
# a cell can't be divided, return the height
return (self, new_cell), cell_height
def draw(self, cr, layout, width, cell_height, dpi_x, dpi_y):
"""Draw a cell.
This draw method is a bit different from the others, as common
cell height of all cells in a row is also given as parameter.
This is needed to be able to draw proper vertical borders around
each cell, i.e. the border should be as long as the highest cell
in the given row.
"""
h_padding = self._style.get_padding() * dpi_x / 2.54
v_padding = self._style.get_padding() * dpi_y / 2.54
# calculate real available width
i_width = width - 2 * h_padding
# draw children
cr.save()
cr.translate(h_padding, v_padding)
for child in self._children:
child_height = child.draw(cr, layout, i_width, dpi_x, dpi_y)
cr.translate(0, child_height)
cr.restore()
# draw the borders
if self._style.get_top_border():
cr.move_to(0, 0)
cr.rel_line_to(width , 0)
if self._style.get_right_border():
cr.move_to(width, 0)
cr.rel_line_to(0, cell_height)
if self._style.get_bottom_border():
cr.move_to(0, cell_height)
cr.rel_line_to(width, 0)
if self._style.get_left_border():
cr.move_to(0, 0)
cr.line_to(0, cell_height)
cr.set_line_width(1)
cr.set_source_rgb(0, 0, 0)
cr.stroke()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(0, 1.0, 0)
cr.rectangle(0, 0, width, cell_height)
cr.stroke()
return cell_height
class GtkDocPicture(GtkDocBaseElement):
"""Implement an image.
"""
_type = 'IMAGE'
_allowed_children = []
def __init__(self, style, filename, width, height, crop=None):
GtkDocBaseElement.__init__(self, style)
self._filename = filename
self._width = width
self._height = height
self._crop = crop
def divide(self, layout, width, height, dpi_x, dpi_y):
img_width = self._width * dpi_x / 2.54
img_height = self._height * dpi_y / 2.54
# image can't be divided, a new page must begin
# if it can't fit on the current one
if img_height <= height:
return (self, None), img_height
else:
return (None, self), 0
def draw(self, cr, layout, width, dpi_x, dpi_y):
from gi.repository import Gtk, Gdk
img_width = self._width * dpi_x / 2.54
img_height = self._height * dpi_y / 2.54
if self._style == 'right':
l_margin = width - img_width
elif self._style == 'center':
l_margin = (width - img_width) / 2.0
else:
l_margin = 0
# load the image and get its extents
pixbuf = resize_to_buffer(self._filename, [img_width, img_height],
self._crop)
pixbuf_width = pixbuf.get_width()
pixbuf_height = pixbuf.get_height()
# calculate the scale to fit image into the set extents
scale = min(img_width / pixbuf_width, img_height / pixbuf_height)
# draw the image
cr.save()
cr.translate(l_margin, 0)
cr.scale(scale, scale)
Gdk.cairo_set_source_pixbuf(cr, pixbuf,
(img_width / scale - pixbuf_width) / 2,
(img_height / scale - pixbuf_height) / 2)
cr.rectangle(0 , 0, img_width / scale, img_height / scale)
##gcr.set_source_pixbuf(pixbuf,
##(img_width - pixbuf_width) / 2,
##(img_height - pixbuf_height) / 2)
##cr.rectangle(0 , 0, img_width, img_height)
##cr.scale(scale, scale)
cr.fill()
cr.restore()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(1.0, 0, 0)
cr.rectangle(l_margin, 0, img_width, img_height)
cr.stroke()
return (img_height)
class GtkDocFrame(GtkDocBaseElement):
"""Implement a frame.
"""
_type = 'FRAME'
_allowed_children = ['LINE', 'POLYGON', 'BOX', 'TEXT']
def divide(self, layout, width, height, dpi_x, dpi_y):
frame_width = round(self._style.width * dpi_x / 2.54)
frame_height = round(self._style.height * dpi_y / 2.54)
t_margin = self._style.spacing[2] * dpi_y / 2.54
b_margin = self._style.spacing[3] * dpi_y / 2.54
# frame can't be divided, a new page must begin
# if it can't fit on the current one
if frame_height + t_margin + b_margin <= height:
return (self, None), frame_height + t_margin + b_margin
elif frame_height + t_margin <= height:
return (self, None), height
else:
return (None, self), 0
def draw(self, cr, layout, width, dpi_x, dpi_y):
frame_width = self._style.width * dpi_x / 2.54
frame_height = self._style.height * dpi_y / 2.54
l_margin = self._style.spacing[0] * dpi_x / 2.54
r_margin = self._style.spacing[1] * dpi_x / 2.54
t_margin = self._style.spacing[2] * dpi_y / 2.54
b_margin = self._style.spacing[3] * dpi_y / 2.54
if self._style.align == 'left':
x_offset = l_margin
elif self._style.align == 'right':
x_offset = width - r_margin - frame_width
elif self._style.align == 'center':
x_offset = (width - frame_width) / 2.0
else:
raise ValueError
# draw each element in the frame
cr.save()
cr.translate(x_offset, t_margin)
cr.rectangle(0, 0, frame_width, frame_height)
cr.clip()
for elem in self._children:
elem.draw(cr, layout, frame_width, dpi_x, dpi_y)
cr.restore()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(1.0, 0, 0)
cr.rectangle(x_offset, t_margin, frame_width, frame_height)
cr.stroke()
return frame_height + t_margin + b_margin
class GtkDocLine(GtkDocBaseElement):
"""Implement a line.
"""
_type = 'LINE'
_allowed_children = []
def __init__(self, style, x1, y1, x2, y2):
GtkDocBaseElement.__init__(self, style)
self._start = (x1, y1)
self._end = (x2, y2)
def draw(self, cr, layout, width, dpi_x, dpi_y):
start = (self._start[0] * dpi_x / 2.54, self._start[1] * dpi_y / 2.54)
end = (self._end[0] * dpi_x / 2.54, self._end[1] * dpi_y / 2.54)
line_color = utils.rgb_color(self._style.get_color())
cr.save()
cr.set_source_rgb(*line_color)
cr.set_line_width(self._style.get_line_width())
# TODO line style
line_style = self._style.get_line_style()
if line_style != SOLID:
cr.set_dash(self._style.get_dash_style(line_style), 0)
cr.move_to(*start)
cr.line_to(*end)
cr.stroke()
cr.restore()
return 0
class GtkDocPolygon(GtkDocBaseElement):
"""Implement a line.
"""
_type = 'POLYGON'
_allowed_children = []
def __init__(self, style, path):
GtkDocBaseElement.__init__(self, style)
self._path = path
def draw(self, cr, layout, width, dpi_x, dpi_y):
path = [(x * dpi_x / 2.54, y * dpi_y / 2.54) for (x, y) in self._path]
path_start = path.pop(0)
path_stroke_color = utils.rgb_color(self._style.get_color())
path_fill_color = utils.rgb_color(self._style.get_fill_color())
cr.save()
cr.move_to(*path_start)
for (x, y) in path:
cr.line_to(x, y)
cr.close_path()
cr.set_source_rgb(*path_fill_color)
cr.fill_preserve()
cr.set_source_rgb(*path_stroke_color)
cr.set_line_width(self._style.get_line_width())
# TODO line style
line_style = self._style.get_line_style()
if line_style != SOLID:
cr.set_dash(self._style.get_dash_style(line_style), 0)
cr.stroke()
cr.restore()
return 0
class GtkDocBox(GtkDocBaseElement):
"""Implement a box with optional shadow around it.
"""
_type = 'BOX'
_allowed_children = []
def __init__(self, style, x, y, width, height):
GtkDocBaseElement.__init__(self, style)
self._x = x
self._y = y
self._width = width
self._height = height
def draw(self, cr, layout, width, dpi_x, dpi_y):
box_x = self._x * dpi_x / 2.54
box_y = self._y * dpi_y / 2.54
box_width = self._width * dpi_x / 2.54
box_height = self._height * dpi_y / 2.54
box_stroke_color = utils.rgb_color((0, 0, 0))
box_fill_color = utils.rgb_color(self._style.get_fill_color())
shadow_color = utils.rgb_color((192, 192, 192))
cr.save()
cr.set_line_width(self._style.get_line_width())
# TODO line style
line_style = self._style.get_line_style()
if line_style != SOLID:
cr.set_dash(self._style.get_dash_style(line_style), 0)
if self._style.get_shadow():
shadow_x = box_x + self._style.get_shadow_space() * dpi_x / 2.54
shadow_y = box_y + self._style.get_shadow_space() * dpi_y / 2.54
cr.set_source_rgb(*shadow_color)
cr.rectangle(shadow_x, shadow_y, box_width, box_height)
cr.fill()
cr.rectangle(box_x, box_y, box_width, box_height)
cr.set_source_rgb(*box_fill_color)
cr.fill_preserve()
cr.set_source_rgb(*box_stroke_color)
cr.stroke()
cr.restore()
return 0
class GtkDocText(GtkDocBaseElement):
"""Implement a text on graphical reports.
"""
_type = 'TEXT'
_allowed_children = []
# line spacing is not defined in ParagraphStyle
spacingfractionfont = 0.2
def __init__(self, style, vertical_alignment, text, x, y,
angle=0, mark=None):
GtkDocBaseElement.__init__(self, style)
self._align_y = vertical_alignment
self._text = text
self._x = x
self._y = y
self._angle = angle
self._marklist = []
if mark:
self._marklist = [mark]
def draw(self, cr, layout, width, dpi_x, dpi_y):
text_x = self._x * dpi_x / 2.54
text_y = self._y * dpi_y / 2.54
# turn off text wrapping
layout.set_width(-1)
# set paragraph properties
align = self._style.get_alignment_text()
if align == 'left':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.LEFT)
elif align == 'right':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.RIGHT)
elif align == 'center':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.CENTER)
elif align == 'justify':
#We have a problem, in pango, justify works only on full lines,
# and we need an alignment for the partial lines. We don't know
# for justify what alignment the user wants however. We assume
# here CENTER ...
layout.set_alignment(Pango.Alignment.CENTER)
layout.set_justify(True)
else:
raise ValueError
#
font_style = self._style.get_font()
layout.set_font_description(fontstyle_to_fontdescription(font_style))
#set line spacing based on font:
spacing = font_style.get_size() * self.spacingfractionfont
layout.set_spacing(int(round(spacing * Pango.SCALE)))
# layout the text
layout.set_markup(self._text)
layout_width, layout_height = layout.get_pixel_size()
# calculate horizontal and vertical alignment shift
if align == 'left':
align_x = 0
elif align == 'right':
align_x = - layout_width
elif align == 'center' or align == 'justify':
align_x = - layout_width / 2
else:
raise ValueError
if self._align_y == 'top':
align_y = 0
elif self._align_y == 'center':
align_y = - layout_height / 2
elif self._align_y == 'bottom':
align_y = - layout_height
else:
raise ValueError
# render the layout onto the cairo surface
cr.save()
cr.translate(text_x, text_y)
cr.rotate(radians(self._angle))
cr.move_to(align_x, align_y)
cr.set_source_rgb(*utils.rgb_color(font_style.get_color()))
PangoCairo.show_layout(cr, layout)
cr.restore()
return layout_height
def get_marks(self):
"""
Return the index mark for this text
"""
return self._marklist
#------------------------------------------------------------------------
#
# CairoDoc class
#
#------------------------------------------------------------------------
class CairoDoc(BaseDoc, TextDoc, DrawDoc):
"""Act as an abstract document that can render onto a cairo context.
Maintains an abstract model of the document. The root of this abstract
document is self._doc. The model is build via the subclassed BaseDoc, and
the implemented TextDoc, DrawDoc interface methods.
It can render the model onto cairo context pages, according to the received
page style.
"""
# BaseDoc implementation
EXT = 'pdf'
def open(self, filename):
fe = filename.split('.')
if len(fe) == 1:
filename = filename + '.' + self.EXT
elif fe[-1] != self.EXT:
# NOTE: the warning will be bogus
# if the EXT isn't properly overridden by derived class
log.warning(_(
"""Mismatch between selected extension %(ext)s and actual format.
Writing to %(filename)s in format %(impliedext)s.""") %
{'ext' : fe[-1],
'filename' : filename,
'impliedext' : self.EXT} )
self._backend = CairoBackend(filename)
self._doc = GtkDocDocument()
self._active_element = self._doc
self._pages = []
self._elements_to_paginate = []
self._links_error = False
def close(self):
self.run()
# TextDoc implementation
def page_break(self):
self._active_element.add_child(GtkDocPagebreak())
def start_bold(self):
self.__write_text('<b>', markup=True)
def end_bold(self):
self.__write_text('</b>', markup=True)
def start_superscript(self):
self.__write_text('<small><sup>', markup=True)
def end_superscript(self):
self.__write_text('</sup></small>', markup=True)
def start_paragraph(self, style_name, leader=None):
style_sheet = self.get_style_sheet()
style = style_sheet.get_paragraph_style(style_name)
new_paragraph = GtkDocParagraph(style, leader)
self._active_element.add_child(new_paragraph)
self._active_element = new_paragraph
def end_paragraph(self):
self._active_element = self._active_element.get_parent()
def start_table(self, name, style_name):
style_sheet = self.get_style_sheet()
style = style_sheet.get_table_style(style_name)
new_table = GtkDocTable(style)
self._active_element.add_child(new_table)
self._active_element = new_table
# we need to remember the column width list from the table style.
# this is an ugly hack, but got no better idea.
self._active_row_style = list(map(style.get_column_width,
list(range(style.get_columns()))))
if self.get_rtl_doc():
self._active_row_style.reverse()
def end_table(self):
self._active_element = self._active_element.get_parent()
def start_row(self):
new_row = GtkDocTableRow(self._active_row_style)
self._active_element.add_child(new_row)
self._active_element = new_row
def end_row(self):
if self.get_rtl_doc():
self._active_element._children.reverse()
self._active_element = self._active_element.get_parent()
def start_cell(self, style_name, span=1):
style_sheet = self.get_style_sheet()
style = style_sheet.get_cell_style(style_name)
new_cell = GtkDocTableCell(style, span)
self._active_element.add_child(new_cell)
self._active_element = new_cell
def end_cell(self):
self._active_element = self._active_element.get_parent()
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the cairo doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. CairoDoc does nothing different for html notes
links: bool, true if URLs should be made clickable
"""
text = str(styledtext)
s_tags = styledtext.get_tags()
#FIXME: following split should be regex to match \n\s*\n instead?
markuptext = self._backend.add_markup_from_styled(text, s_tags,
split='\n\n')
if format == 1:
#preformatted, retain whitespace. Cairo retains \n automatically,
#so use \n\n for paragraph detection
#FIXME: following split should be regex to match \n\s*\n instead?
for line in markuptext.split('\n\n'):
self.start_paragraph(style_name)
self.__write_text(line, markup=True, links=links)
self.end_paragraph()
elif format == 0:
#flowed
#FIXME: following split should be regex to match \n\s*\n instead?
for line in markuptext.split('\n\n'):
self.start_paragraph(style_name)
#flowed, normal whitespace goes away, but we keep linebreak
lines = line.split('\n')
newlines = []
for singleline in lines:
newlines.append(' '.join(singleline.split()))
self.__write_text('\n'.join(newlines), markup=True, links=links)
self.end_paragraph()
def __markup(self, text, markup=None):
if not markup:
# We need to escape the text here for later Pango.Layout.set_markup
# calls. This way we save the markup created by the report
# The markup in the note editor is not in the text so is not
# considered. It must be added by pango too
text = self._backend.ESCAPE_FUNC()(text)
return text
def __write_text(self, text, mark=None, markup=False, links=False):
"""
@param text: text to write.
@param mark: IndexMark to use for indexing
@param markup: True if text already contains markup info.
Then text will no longer be escaped
@param links: True if URLs should be made clickable
"""
if links == True:
import cairo
if cairo.cairo_version() < 11210 and self._links_error == False:
# Cairo v1.12 is suppose to be the first version
# that supports clickable links
print("""
WARNING: This version of cairo (%s) does NOT support clickable links.
The first version that is suppose to is v1.12. See the roadmap:
http://www.cairographics.org/roadmap/
The work around is to save to another format that supports clickable
links (like ODF) and write PDF from that format.
""" % cairo.version)
self._links_error = True
text = self.__markup(text, markup)
if mark:
self._active_element.add_mark(mark)
self._active_element.add_text(text)
def write_text(self, text, mark=None, links=False):
"""Write a normal piece of text according to the
present style
@param text: text to write.
@param mark: IndexMark to use for indexing
@param links: True if URLs should be made clickable
"""
self.__write_text(text, mark, links=links)
def write_markup(self, text, s_tags, mark=None):
"""
Writes the text in the current paragraph. Should only be used after a
start_paragraph and before an end_paragraph.
@param text: text to write. The text is assumed to be _not_ escaped
@param s_tags: assumed to be list of styledtexttags to apply to the
text
@param mark: IndexMark to use for indexing
"""
markuptext = self._backend.add_markup_from_styled(text, s_tags)
self.__write_text(markuptext, mark=mark, markup=True)
def add_media(self, name, pos, x_cm, y_cm, alt='',
style_name=None, crop=None):
new_image = GtkDocPicture(pos, name, x_cm, y_cm, crop=crop)
self._active_element.add_child(new_image)
if len(alt):
style_sheet = self.get_style_sheet()
style = style_sheet.get_paragraph_style(style_name)
style.set_alignment(PARA_ALIGN_CENTER)
# Center the caption under the image
if pos == "right":
style.set_left_margin(self.get_usable_width() - new_image._width)
else:
style.set_right_margin(self.get_usable_width() - new_image._width)
new_paragraph = GtkDocParagraph(style)
new_paragraph.add_text('\n'.join(alt))
self._active_element.add_child(new_paragraph)
def insert_toc(self):
"""
Insert a Table of Contents at this point in the document.
"""
self._doc.add_child(GtkDocTableOfContents())
def insert_index(self):
"""
Insert an Alphabetical Index at this point in the document.
"""
self._doc.add_child(GtkDocAlphabeticalIndex())
# DrawDoc implementation
def start_page(self):
# if this is not the first page we need to "close" the previous one
children = self._doc.get_children()
if children and children[-1].get_type() != 'PAGEBREAK':
self._doc.add_child(GtkDocPagebreak())
new_frame_style = FrameStyle(width=self.get_usable_width(),
height=self.get_usable_height())
new_frame = GtkDocFrame(new_frame_style)
self._active_element.add_child(new_frame)
self._active_element = new_frame
def end_page(self):
self._active_element = self._active_element.get_parent()
def draw_line(self, style_name, x1, y1, x2, y2):
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
new_line = GtkDocLine(style, x1, y1, x2, y2)
self._active_element.add_child(new_line)
def draw_path(self, style_name, path):
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
new_polygon = GtkDocPolygon(style, path)
self._active_element.add_child(new_polygon)
def draw_box(self, style_name, text, x, y, w, h, mark=None):
""" @param mark: IndexMark to use for indexing """
# we handle the box and...
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
new_box = GtkDocBox(style, x, y, w, h)
self._active_element.add_child(new_box)
# ...the text separately
paragraph_style_name = style.get_paragraph_style()
if paragraph_style_name:
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
paragraph_style.set_alignment(PARA_ALIGN_LEFT)
# horizontal position of the text is not included in the style,
# we assume that it is the size of the shadow, or 0.2mm
if style.get_shadow():
x_offset = style.get_shadow_space()
else:
x_offset = 0.2
new_text = GtkDocText(paragraph_style, 'center',
self.__markup(text),
x + x_offset, y + h / 2, angle=0, mark=mark)
self._active_element.add_child(new_text)
def draw_text(self, style_name, text, x, y, mark=None):
""" @param mark: IndexMark to use for indexing """
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
paragraph_style_name = style.get_paragraph_style()
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
paragraph_style.set_alignment(PARA_ALIGN_LEFT)
new_text = GtkDocText(paragraph_style, 'top',
self.__markup(text), x, y, angle=0, mark=mark)
self._active_element.add_child(new_text)
def center_text(self, style_name, text, x, y, mark=None):
""" @param mark: IndexMark to use for indexing """
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
paragraph_style_name = style.get_paragraph_style()
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
paragraph_style.set_alignment(PARA_ALIGN_CENTER)
new_text = GtkDocText(paragraph_style, 'top',
self.__markup(text), x, y, angle=0, mark=mark)
self._active_element.add_child(new_text)
def rotate_text(self, style_name, text, x, y, angle, mark=None):
""" @param mark: IndexMark to use for indexing """
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
paragraph_style_name = style.get_paragraph_style()
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
paragraph_style.set_alignment(PARA_ALIGN_CENTER)
new_text = GtkDocText(paragraph_style, 'center',
self.__markup('\n'.join(text)), x, y, angle, mark)
self._active_element.add_child(new_text)
# paginating and drawing interface
def run(self):
"""Create the physical output from the meta document.
It must be implemented in the subclasses. The idea is that with
different subclass different output could be generated:
e.g. Print, PDF, PS, PNG (which are currently supported by Cairo).
"""
raise NotImplementedError
def paginate_document(self, layout, page_width, page_height, dpi_x, dpi_y):
"""Paginate the entire document.
"""
while not self.paginate(layout, page_width, page_height, dpi_x, dpi_y):
pass
def paginate(self, layout, page_width, page_height, dpi_x, dpi_y):
"""Paginate the meta document in chunks.
Only one document level element is handled at one run.
"""
# if first time run than initialize the variables
if not self._elements_to_paginate:
self._elements_to_paginate = self._doc.get_children()[:]
self._pages.append(GtkDocDocument())
self._available_height = page_height
# try to fit the next element to current page, divide it if needed
if not self._elements_to_paginate:
#this is a self._doc where nothing has been added. Empty page.
return True
elem = self._elements_to_paginate.pop(0)
(e1, e2), e1_h = elem.divide(layout,
page_width,
self._available_height,
dpi_x,
dpi_y)
# if (part of) it fits on current page add it
if e1 is not None:
self._pages[len(self._pages) - 1].add_child(e1)
# if elem was divided remember the second half to be processed
if e2 is not None:
self._elements_to_paginate.insert(0, e2)
# calculate how much space left on current page
self._available_height -= e1_h
# start new page if needed
if (e1 is None) or (e2 is not None):
self._pages.append(GtkDocDocument())
self._available_height = page_height
return len(self._elements_to_paginate) == 0
def draw_page(self, page_nr, cr, layout, width, height, dpi_x, dpi_y):
"""Draw a page on a Cairo context.
"""
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(0, 1.0, 0)
cr.rectangle(0, 0, width, height)
cr.stroke()
self._pages[page_nr].draw(cr, layout, width, dpi_x, dpi_y)
|
muhaochen/MTransE
|
refs/heads/master
|
run/en_fr/train_MMtransE_15k.py
|
1
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../src/MMTransE'))
from MMTransE import MMTransE
model = MMTransE(dim=75, save_dir=os.path.join(os.path.dirname(__file__), 'model_MMtransE_person_15k.bin'))
model.Train_MT(epochs=400, save_every_epochs=100, languages=['en', 'fr'], graphs=[os.path.join(os.path.dirname(__file__), '../../data/WK3l-15k/en_fr/P_en_v5.csv'), os.path.join(os.path.dirname(__file__), '../../data/WK3l-15k/en_fr/P_fr_v5.csv')], intersect_graph=os.path.join(os.path.dirname(__file__), '../../data/WK3l-15k/en_fr/P_en_fr_v5'), save_dirs = ['model_en.bin','model_fr.bin'], rate=0.01, split_rate=True, L1_flag=False)
model.save(os.path.join(os.path.dirname(__file__), 'model_MMtransE_person_15k.bin'))
|
mwaterfall/alfred-kippt-search
|
refs/heads/master
|
workflow/requests/__init__.py
|
56
|
# -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('http://python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print r.text
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2013 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '1.2.0'
__build__ = 0x010200
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Kenneth Reitz'
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
|
akshah/netra
|
refs/heads/master
|
resultWriter/resultWriter.py
|
1
|
import threading
from contextlib import closing
import os.path
import os
import subprocess
import traceback
from customUtilities.helperFunctions import *
from customUtilities.logger import logger
class ResultWriter():
def __init__(self,resultfilename,logger=logger('detourResultWriter.log')):
self.lock = threading.RLock()
self.resultfilename = resultfilename
#There could be some old garbage result file with same name, remove it
if os.path.exists(self.resultfilename):
os.remove(self.resultfilename)
self.logger=logger
self.peers = []
self.rib_name=None
self.rib_time='NULL'
self.num_entries=0
self.num_def_detours=0
self.num_poss_detours=0
self.ProcessedRibData=[] #List to hold summarized information about result file
self.ProcessedPeerData=[] #List to hold summarized information per peer
self.ProcessedPeerInfo=[] #List to hold peer location info
def resetparams(self):
if os.path.exists(self.resultfilename):
os.remove(self.resultfilename)
self.peers = []
self.rib_name=None
self.rib_time='NULL'
self.num_entries=0
self.num_def_detours=0
self.num_poss_detours=0
def write(self,val):
self.lock.acquire()
try:
#Log file
resultfile = open(self.resultfilename,'a')
valList=eval(val)
strg=''
for field in valList:
#logger.warn(str(field))
strg=strg+str(field)+'|'
print(strg[:-1],file=resultfile)
resultfile.close()
finally:
self.lock.release()
def populateProcessedPeerData(self,processedRibsID):
self.ProcessedPeerData=[]
for iter in range(0,len(self.peers)):
flist=[]
#flist.append('None') # For ID
flist.append(processedRibsID)
flist.append(self.peers[iter].peerIP)
flist.append(self.peers[iter].peer_num_entries)
flist.append(self.peers[iter].peer_num_poss_detours)
flist.append(self.peers[iter].peer_num_def_detours)
self.ProcessedPeerData.append(flist)
def populateProcessedRibData(self,status):
self.ProcessedRibData=[]
flist=[]
flist.append('None') # For ID
flist.append(self.rib_name)
flist.append(self.rib_time)
curr_epoch,_=currentTime()
flist.append(curr_epoch)
flist.append(status)
flist.append(self.num_entries)
flist.append(self.num_poss_detours)
flist.append(self.num_def_detours)
self.ProcessedRibData.append(flist)
def populateProcessedPeerInfo(self):
self.ProcessedPeerInfo=[]
for iter in range(0,len(self.peers)):
flist=[]
flist.append('None') # For ID
flist.append(self.peers[iter].peerAS)
flist.append(self.peers[iter].peerIP)
flist.append(self.peers[iter].peerCountry)
self.ProcessedPeerInfo.append(flist)
def get_ASPath(self,db,as_path):
with closing( db.cursor() ) as cur:
try:
cur.execute("select as_path from UniqueAbnormalPaths where as_path = '{0}'".format(as_path))
retval=cur.fetchone()
except:
logger.error("Invalid Path: "+as_path)
raise Exception('Select ASPath to UniqueAbnormalPaths Failed')
return retval
def push_UniqueAbnormalPaths(self,db,data):
with closing( db.cursor() ) as cur:
try:
#TODO: Check if this path has been pushed previously
cur.executemany("insert into UniqueAbnormalPaths(id,as_path,countries,analysis,detour_origin_asn,detour_origin_countries,detour_return_asn,detour_return_countries,detour_length,detour_destination_asn,detour_destination_countries,detour_countries_affected) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",data)
db.commit()
except:
raise Exception('Multi-Insert to UniqueAbnormalPaths Failed')
def push_AbnormalRibEntries(self,db,data):
with closing( db.cursor() ) as cur:
try:
cur.executemany("insert into AbnormalRibEntries(id,rib_time,peer,prefix,as_path) values (%s,%s,%s,%s,%s)",data)
db.commit()
except:
raise Exception('Multi-Insert to AbnormalRibEntries Failed')
def push_ProcessedRibs(self,db,data):
with closing( db.cursor() ) as cur:
try:
cur.execute("insert into ProcessedRibs(id,rib_name,rib_time,insert_time,read_status,num_entries,num_poss_detours,num_def_detours) values (%s,%s,%s,%s,%s,%s,%s,%s)",data[0])
db.commit()
cur.execute("select id from ProcessedRibs where rib_name= '{0}'".format(data[0][1]))
processedRibsID=cur.fetchone()
return processedRibsID[0]
except:
raise Exception('Multi-Insert to push_ProcessedRibs Failed')
def push_ProcessedPeers(self,db,data):
with closing( db.cursor() ) as cur:
try:
cur.executemany("insert into ProcessedPeers(processedRibsID,peerIP,peer_num_entries,peer_num_poss_detours,peer_num_def_detours) values (%s,%s,%s,%s,%s)",data)
db.commit()
except:
raise Exception('Multi-Insert to ProcessedPeers Failed')
def push_PeerInfo(self,db,data):
with closing( db.cursor() ) as cur:
try:
for datarow in data:
cur.execute("select id from PeerInfo where peerIP = '{0}'".format(datarow[2]))
retval=cur.fetchone()
if not retval:
cur.execute("insert into PeerInfo(id,peerAS,peerIP,peerIP_Country) values (%s,%s,%s,%s)",datarow)
db.commit()
except:
raise Exception('Multi-Insert to push_PeerInfo Failed')
def loadtoDB(self,db):
self.lock.acquire()
toPushUniqueAbnormalPaths=[]
toPushAbnormalRibEntries=[]
try:
#if not os.path.exists(self.resultfilename):
# logger.warn('No result file for: '+str(self.resultfilename))
# self.resetparams()
# return
self.logger.info("Pushing "+self.resultfilename+" to DB.")
seenASPaths=[]
if os.path.exists(self.resultfilename):
f=open(self.resultfilename, 'r')
for line in f:
if line == "":
continue
rl = line.strip()
#TODO: rline should be a dict
rline=rl.split('|')
if rline[6] == 'poss':
self.num_poss_detours+=1
elif rline[6] == 'def':
self.num_def_detours+=1
iter=0
for pObj in self.peers:
if pObj.peerIP == rline[2]:
#print(pObj.peerIP, rline[2])
if rline[6] == 'poss':
self.peers[iter].peer_num_poss_detours+=1
elif rline[6] == 'def':
self.peers[iter].peer_num_def_detours+=1
break
iter+=1
if rline[4] not in seenASPaths:
if self.get_ASPath(db,rline[4]) is None:
finalent=[]
finalent.append(rline[0])
finalent.append(rline[4])
seenASPaths.append(rline[4])
finalent.append(str(rline[5]))
finalent.append(rline[6])
finalent.append(rline[7])
#finalent.append("\'"+str(rline[8])+"\'")
finalent.append(str(rline[8]))
finalent.append(rline[9])
finalent.append(str(rline[10]))
finalent.append(rline[11])
finalent.append(str(rline[12]))
finalent.append(str(rline[13]))
finalent.append(str(rline[14]))
toPushUniqueAbnormalPaths.append(finalent)
if rline[6] == 'def':
fentry=[]
fentry.append(rline[0])
fentry.append(rline[1])
fentry.append(rline[2])
fentry.append(rline[3])
fentry.append(rline[4])
toPushAbnormalRibEntries.append(fentry)
f.close()
#Update ProcessedRibs table
self.populateProcessedRibData('OK')
processedRibsID=self.push_ProcessedRibs(db,self.ProcessedRibData)
#Update ProcessedPeers table
self.populateProcessedPeerData(processedRibsID)
self.push_ProcessedPeers(db,self.ProcessedPeerData)
#Update ProcessedPeers table
self.populateProcessedPeerInfo()
self.push_PeerInfo(db,self.ProcessedPeerInfo)
self.push_UniqueAbnormalPaths(db,toPushUniqueAbnormalPaths)
self.push_AbnormalRibEntries(db,toPushAbnormalRibEntries)
self.logger.info("Pushed "+self.resultfilename+" to DB.")
self.resetparams() #resultfile must be closed before this call
finally:
self.lock.release()
db.commit()
return toPushAbnormalRibEntries
def loadTracestoDB(self,db,normalizeabnormalRibEntries):
for entry in normalizeabnormalRibEntries:
try:
id=entry[0]
ribTime=entry[1]
peer=entry[2]
prefix=entry[3]
net=entry[4]
randomHost=entry[5]
asPath=entry[6]
outfileName=entry[7]
#Read the warts file
lines = subprocess.check_output(["sc_warts2json", outfileName],universal_newlines=True)
data=[]
data.append(id)
data.append(ribTime)
data.append(peer)
data.append(prefix)
data.append(net)
data.append(randomHost)
data.append(asPath)
data.append(lines)
with closing( db.cursor() ) as cur:
try:
cur.execute("insert into Traceroutes(id,rib_time,peer,prefix,net,host_ip,as_path,json_trace) values (%s,%s,%s,%s,%s,%s,%s,%s)",data)
db.commit()
except:
raise Exception('Multi-Insert to UniqueAbnormalPaths Failed')
os.remove(outfileName)
except:
traceback.print_exc()
self.logger.error('Problem in inserting data to Traceroutes table.')
db.commit()
|
malon/presupuesto
|
refs/heads/master
|
budget_app/management/commands/__init__.py
|
12133432
| |
havatv/QGIS
|
refs/heads/master
|
python/plugins/processing/modeler/__init__.py
|
12133432
| |
step21/inkscape-osx-packaging-native
|
refs/heads/master
|
packaging/macosx/Inkscape.app/Contents/Resources/extensions/export_gimp_palette.py
|
3
|
#!/usr/bin/env python
'''
Author: Jos Hirth, kaioa.com
License: GNU General Public License - http://www.gnu.org/licenses/gpl.html
Warranty: see above
'''
DOCNAME='sodipodi:docname'
import sys, simplestyle
try:
from xml.dom.minidom import parse
except:
sys.exit(_('The export_gpl.py module requires PyXML. Please download the latest version from http://pyxml.sourceforge.net/.'))
colortags=(u'fill',u'stroke',u'stop-color',u'flood-color',u'lighting-color')
colors={}
def walk(node):
checkStyle(node)
if node.hasChildNodes():
childs=node.childNodes
for child in childs:
walk(child)
def checkStyle(node):
if hasattr(node,"hasAttributes") and node.hasAttributes():
sa=node.getAttribute('style')
if sa!='':
styles=simplestyle.parseStyle(sa)
for c in range(len(colortags)):
if colortags[c] in styles.keys():
addColor(styles[colortags[c]])
def addColor(col):
if simplestyle.isColor(col):
c=simplestyle.parseColor(col)
colors['%3i %3i %3i ' % (c[0],c[1],c[2])]=simplestyle.formatColoria(c).upper()
stream = open(sys.argv[-1:][0],'r')
dom = parse(stream)
stream.close()
walk(dom)
print 'GIMP Palette\nName: %s\n#' % (dom.getElementsByTagName('svg')[0].getAttribute(DOCNAME).split('.')[0])
for k,v in sorted(colors.items()):
print k+v
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 encoding=utf-8
|
ryanahall/django
|
refs/heads/master
|
django/http/multipartparser.py
|
105
|
"""
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import binascii
import cgi
import sys
from django.conf import settings
from django.core.exceptions import SuspiciousMultipartForm
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
_BASE64_DECODE_ERROR = TypeError if six.PY2 else binascii.Error
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should contain multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = base64.b64decode(raw_data)
except _BASE64_DECODE_ERROR:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
msg = "Could not decode base64 data: %r" % e
six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(
force_text(old_field_name, self._encoding, errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = next(self)
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof=False):
"""
Finds a multipart boundary in data.
Should no boundary exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
if six.PY3:
value = unquote(value.decode(), encoding=encoding.decode())
else:
value = unquote(value).decode(encoding)
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
|
gminds/rapidnewsng
|
refs/heads/master
|
django/views/generic/base.py
|
107
|
from __future__ import unicode_literals
import logging
from functools import update_wrapper
from django import http
from django.core.exceptions import ImproperlyConfigured
from django.template.response import TemplateResponse
from django.utils.decorators import classonlymethod
from django.utils import six
logger = logging.getLogger('django.request')
class ContextMixin(object):
"""
A default context mixin that passes the keyword arguments received by
get_context_data as the template context.
"""
def get_context_data(self, **kwargs):
if 'view' not in kwargs:
kwargs['view'] = self
return kwargs
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': self.request
}
)
return http.HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = http.HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
response_class = TemplateResponse
content_type = None
def render_to_response(self, context, **response_kwargs):
"""
Returns a response, using the `response_class` for this
view, with a template rendered with the given context.
If any keyword arguments are provided, they will be
passed to the constructor of the response class.
"""
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request = self.request,
template = self.get_template_names(),
context = context,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, ContextMixin, View):
"""
A view that renders a template. This view will also pass into the context
any keyword arguments passed by the url conf.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = True
url = None
query_string = False
def get_redirect_url(self, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
url = self.url % kwargs
args = self.request.META.get('QUERY_STRING', '')
if args and self.query_string:
url = "%s?%s" % (url, args)
return url
else:
return None
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(**kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning('Gone: %s', self.request.path,
extra={
'status_code': 410,
'request': self.request
})
return http.HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
|
takeshineshiro/cinder
|
refs/heads/master
|
cinder/cmd/backup.py
|
21
|
#!/usr/bin/env python
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Cinder Volume Backup."""
import sys
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
eventlet.monkey_patch()
from cinder import i18n
i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config # noqa
from cinder import objects
from cinder import service
from cinder import utils
from cinder import version
CONF = cfg.CONF
def main():
objects.register_all()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
utils.monkey_patch()
gmr.TextGuruMeditation.setup_autorun(version)
server = service.Service.create(binary='cinder-backup')
service.serve(server)
service.wait()
|
crakensio/django_training
|
refs/heads/master
|
lib/python2.7/site-packages/sphinx/writers/text.py
|
3
|
# -*- coding: utf-8 -*-
"""
sphinx.writers.text
~~~~~~~~~~~~~~~~~~~
Custom docutils writer for plain text.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import textwrap
from itertools import groupby
from docutils import nodes, writers
from docutils.utils import column_width
from sphinx import addnodes
from sphinx.locale import admonitionlabels, _
class TextWrapper(textwrap.TextWrapper):
"""Custom subclass that uses a different word separator regex."""
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'(?<=\s)(?::[a-z-]+:)?`\S+|' # interpreted text start
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
The original _wrap_chunks uses len() to calculate width.
This method respects wide/fullwidth characters for width adjustment.
"""
drop_whitespace = getattr(self, 'drop_whitespace', True) #py25 compat
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
chunks.reverse()
while chunks:
cur_line = []
cur_len = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = self.width - column_width(indent)
if drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = column_width(chunks[-1])
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
else:
break
if chunks and column_width(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
if drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
def _break_word(self, word, space_left):
"""_break_word(word : string, space_left : int) -> (string, string)
Break line by unicode width instead of len(word).
"""
total = 0
for i,c in enumerate(word):
total += column_width(c)
if total > space_left:
return word[:i-1], word[i-1:]
return word, ''
def _split(self, text):
"""_split(text : string) -> [string]
Override original method that only split by 'wordsep_re'.
This '_split' split wide-characters into chunk by one character.
"""
split = lambda t: textwrap.TextWrapper._split(self, t)
chunks = []
for chunk in split(text):
for w, g in groupby(chunk, column_width):
if w == 1:
chunks.extend(split(''.join(g)))
else:
chunks.extend(list(g))
return chunks
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Override original method for using self._break_word() instead of slice.
"""
space_left = max(width - cur_len, 1)
if self.break_long_words:
l, r = self._break_word(reversed_chunks[-1], space_left)
cur_line.append(l)
reversed_chunks[-1] = r
elif not cur_line:
cur_line.append(reversed_chunks.pop())
MAXWIDTH = 70
STDINDENT = 3
def my_wrap(text, width=MAXWIDTH, **kwargs):
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
class TextWriter(writers.Writer):
supported = ('text',)
settings_spec = ('No options here.', '', ())
settings_defaults = {}
output = None
def __init__(self, builder):
writers.Writer.__init__(self)
self.builder = builder
def translate(self):
visitor = TextTranslator(self.document, self.builder)
self.document.walkabout(visitor)
self.output = visitor.body
class TextTranslator(nodes.NodeVisitor):
sectionchars = '*=-~"+`'
def __init__(self, document, builder):
nodes.NodeVisitor.__init__(self, document)
self.builder = builder
newlines = builder.config.text_newlines
if newlines == 'windows':
self.nl = '\r\n'
elif newlines == 'native':
self.nl = os.linesep
else:
self.nl = '\n'
self.sectionchars = builder.config.text_sectionchars
self.states = [[]]
self.stateindent = [0]
self.list_counter = []
self.sectionlevel = 0
self.lineblocklevel = 0
self.table = None
def add_text(self, text):
self.states[-1].append((-1, text))
def new_state(self, indent=STDINDENT):
self.states.append([])
self.stateindent.append(indent)
def end_state(self, wrap=True, end=[''], first=None):
content = self.states.pop()
maxindent = sum(self.stateindent)
indent = self.stateindent.pop()
result = []
toformat = []
def do_format():
if not toformat:
return
if wrap:
res = my_wrap(''.join(toformat), width=MAXWIDTH-maxindent)
else:
res = ''.join(toformat).splitlines()
if end:
res += end
result.append((indent, res))
for itemindent, item in content:
if itemindent == -1:
toformat.append(item)
else:
do_format()
result.append((indent + itemindent, item))
toformat = []
do_format()
if first is not None and result:
itemindent, item = result[0]
result_rest, result = result[1:], []
if item:
toformat = [first + ' '.join(item)]
do_format() #re-create `result` from `toformat`
_dummy, new_item = result[0]
result.insert(0, (itemindent - indent, [new_item[0]]))
result[1] = (itemindent, new_item[1:])
result.extend(result_rest)
self.states[-1].extend(result)
def visit_document(self, node):
self.new_state(0)
def depart_document(self, node):
self.end_state()
self.body = self.nl.join(line and (' '*indent + line)
for indent, lines in self.states[0]
for line in lines)
# XXX header/footer?
def visit_highlightlang(self, node):
raise nodes.SkipNode
def visit_section(self, node):
self._title_char = self.sectionchars[self.sectionlevel]
self.sectionlevel += 1
def depart_section(self, node):
self.sectionlevel -= 1
def visit_topic(self, node):
self.new_state(0)
def depart_topic(self, node):
self.end_state()
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_rubric(self, node):
self.new_state(0)
self.add_text('-[ ')
def depart_rubric(self, node):
self.add_text(' ]-')
self.end_state()
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_glossary(self, node):
pass
def depart_glossary(self, node):
pass
def visit_title(self, node):
if isinstance(node.parent, nodes.Admonition):
self.add_text(node.astext()+': ')
raise nodes.SkipNode
self.new_state(0)
def depart_title(self, node):
if isinstance(node.parent, nodes.section):
char = self._title_char
else:
char = '^'
text = ''.join(x[1] for x in self.states.pop() if x[0] == -1)
self.stateindent.pop()
self.states[-1].append(
(0, ['', text, '%s' % (char * column_width(text)), '']))
def visit_subtitle(self, node):
pass
def depart_subtitle(self, node):
pass
def visit_attribution(self, node):
self.add_text('-- ')
def depart_attribution(self, node):
pass
def visit_desc(self, node):
pass
def depart_desc(self, node):
pass
def visit_desc_signature(self, node):
self.new_state(0)
if node.parent['objtype'] in ('class', 'exception'):
self.add_text('%s ' % node.parent['objtype'])
def depart_desc_signature(self, node):
# XXX: wrap signatures in a way that makes sense
self.end_state(wrap=False, end=None)
def visit_desc_name(self, node):
pass
def depart_desc_name(self, node):
pass
def visit_desc_addname(self, node):
pass
def depart_desc_addname(self, node):
pass
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.add_text(' -> ')
def depart_desc_returns(self, node):
pass
def visit_desc_parameterlist(self, node):
self.add_text('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
self.add_text(')')
def visit_desc_parameter(self, node):
if not self.first_param:
self.add_text(', ')
else:
self.first_param = 0
self.add_text(node.astext())
raise nodes.SkipNode
def visit_desc_optional(self, node):
self.add_text('[')
def depart_desc_optional(self, node):
self.add_text(']')
def visit_desc_annotation(self, node):
pass
def depart_desc_annotation(self, node):
pass
def visit_desc_content(self, node):
self.new_state()
self.add_text(self.nl)
def depart_desc_content(self, node):
self.end_state()
def visit_figure(self, node):
self.new_state()
def depart_figure(self, node):
self.end_state()
def visit_caption(self, node):
pass
def depart_caption(self, node):
pass
def visit_productionlist(self, node):
self.new_state()
names = []
for production in node:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
for production in node:
if production['tokenname']:
self.add_text(production['tokenname'].ljust(maxlen) + ' ::=')
lastname = production['tokenname']
elif lastname is not None:
self.add_text('%s ' % (' '*len(lastname)))
self.add_text(production.astext() + self.nl)
self.end_state(wrap=False)
raise nodes.SkipNode
def visit_footnote(self, node):
self._footnote = node.children[0].astext().strip()
self.new_state(len(self._footnote) + 3)
def depart_footnote(self, node):
self.end_state(first='[%s] ' % self._footnote)
def visit_citation(self, node):
if len(node) and isinstance(node[0], nodes.label):
self._citlabel = node[0].astext()
else:
self._citlabel = ''
self.new_state(len(self._citlabel) + 3)
def depart_citation(self, node):
self.end_state(first='[%s] ' % self._citlabel)
def visit_label(self, node):
raise nodes.SkipNode
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
# XXX: option list could use some better styling
def visit_option_list(self, node):
pass
def depart_option_list(self, node):
pass
def visit_option_list_item(self, node):
self.new_state(0)
def depart_option_list_item(self, node):
self.end_state()
def visit_option_group(self, node):
self._firstoption = True
def depart_option_group(self, node):
self.add_text(' ')
def visit_option(self, node):
if self._firstoption:
self._firstoption = False
else:
self.add_text(', ')
def depart_option(self, node):
pass
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.add_text(node['delimiter'])
def depart_option_argument(self, node):
pass
def visit_description(self, node):
pass
def depart_description(self, node):
pass
def visit_tabular_col_spec(self, node):
raise nodes.SkipNode
def visit_colspec(self, node):
self.table[0].append(node['colwidth'])
raise nodes.SkipNode
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
pass
def depart_thead(self, node):
pass
def visit_tbody(self, node):
self.table.append('sep')
def depart_tbody(self, node):
pass
def visit_row(self, node):
self.table.append([])
def depart_row(self, node):
pass
def visit_entry(self, node):
if node.has_key('morerows') or node.has_key('morecols'):
raise NotImplementedError('Column or row spanning cells are '
'not implemented.')
self.new_state(0)
def depart_entry(self, node):
text = self.nl.join(self.nl.join(x[1]) for x in self.states.pop())
self.stateindent.pop()
self.table[-1].append(text)
def visit_table(self, node):
if self.table:
raise NotImplementedError('Nested tables are not supported.')
self.new_state(0)
self.table = [[]]
def depart_table(self, node):
lines = self.table[1:]
fmted_rows = []
colwidths = self.table[0]
realwidths = colwidths[:]
separator = 0
# don't allow paragraphs in table cells for now
for line in lines:
if line == 'sep':
separator = len(fmted_rows)
else:
cells = []
for i, cell in enumerate(line):
par = my_wrap(cell, width=colwidths[i])
if par:
maxwidth = max(map(column_width, par))
else:
maxwidth = 0
realwidths[i] = max(realwidths[i], maxwidth)
cells.append(par)
fmted_rows.append(cells)
def writesep(char='-'):
out = ['+']
for width in realwidths:
out.append(char * (width+2))
out.append('+')
self.add_text(''.join(out) + self.nl)
def writerow(row):
lines = zip(*row)
for line in lines:
out = ['|']
for i, cell in enumerate(line):
if cell:
adjust_len = len(cell) - column_width(cell)
out.append(' ' + cell.ljust(
realwidths[i] + 1 + adjust_len))
else:
out.append(' ' * (realwidths[i] + 2))
out.append('|')
self.add_text(''.join(out) + self.nl)
for i, row in enumerate(fmted_rows):
if separator and i == separator:
writesep('=')
else:
writesep('-')
writerow(row)
writesep('-')
self.table = None
self.end_state(wrap=False)
def visit_acks(self, node):
self.new_state(0)
self.add_text(', '.join(n.astext() for n in node.children[0].children)
+ '.')
self.end_state()
raise nodes.SkipNode
def visit_image(self, node):
if 'alt' in node.attributes:
self.add_text(_('[image: %s]') % node['alt'])
self.add_text(_('[image]'))
raise nodes.SkipNode
def visit_transition(self, node):
indent = sum(self.stateindent)
self.new_state(0)
self.add_text('=' * (MAXWIDTH - indent))
self.end_state()
raise nodes.SkipNode
def visit_bullet_list(self, node):
self.list_counter.append(-1)
def depart_bullet_list(self, node):
self.list_counter.pop()
def visit_enumerated_list(self, node):
self.list_counter.append(node.get('start', 1) - 1)
def depart_enumerated_list(self, node):
self.list_counter.pop()
def visit_definition_list(self, node):
self.list_counter.append(-2)
def depart_definition_list(self, node):
self.list_counter.pop()
def visit_list_item(self, node):
if self.list_counter[-1] == -1:
# bullet list
self.new_state(2)
elif self.list_counter[-1] == -2:
# definition list
pass
else:
# enumerated list
self.list_counter[-1] += 1
self.new_state(len(str(self.list_counter[-1])) + 2)
def depart_list_item(self, node):
if self.list_counter[-1] == -1:
self.end_state(first='* ')
elif self.list_counter[-1] == -2:
pass
else:
self.end_state(first='%s. ' % self.list_counter[-1])
def visit_definition_list_item(self, node):
self._li_has_classifier = len(node) >= 2 and \
isinstance(node[1], nodes.classifier)
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
self.new_state(0)
def depart_term(self, node):
if not self._li_has_classifier:
self.end_state(end=None)
def visit_termsep(self, node):
self.add_text(', ')
raise nodes.SkipNode
def visit_classifier(self, node):
self.add_text(' : ')
def depart_classifier(self, node):
self.end_state(end=None)
def visit_definition(self, node):
self.new_state()
def depart_definition(self, node):
self.end_state()
def visit_field_list(self, node):
pass
def depart_field_list(self, node):
pass
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_name(self, node):
self.new_state(0)
def depart_field_name(self, node):
self.add_text(':')
self.end_state(end=None)
def visit_field_body(self, node):
self.new_state()
def depart_field_body(self, node):
self.end_state()
def visit_centered(self, node):
pass
def depart_centered(self, node):
pass
def visit_hlist(self, node):
pass
def depart_hlist(self, node):
pass
def visit_hlistcol(self, node):
pass
def depart_hlistcol(self, node):
pass
def visit_admonition(self, node):
self.new_state(0)
def depart_admonition(self, node):
self.end_state()
def _visit_admonition(self, node):
self.new_state(2)
def _make_depart_admonition(name):
def depart_admonition(self, node):
self.end_state(first=admonitionlabels[name] + ': ')
return depart_admonition
visit_attention = _visit_admonition
depart_attention = _make_depart_admonition('attention')
visit_caution = _visit_admonition
depart_caution = _make_depart_admonition('caution')
visit_danger = _visit_admonition
depart_danger = _make_depart_admonition('danger')
visit_error = _visit_admonition
depart_error = _make_depart_admonition('error')
visit_hint = _visit_admonition
depart_hint = _make_depart_admonition('hint')
visit_important = _visit_admonition
depart_important = _make_depart_admonition('important')
visit_note = _visit_admonition
depart_note = _make_depart_admonition('note')
visit_tip = _visit_admonition
depart_tip = _make_depart_admonition('tip')
visit_warning = _visit_admonition
depart_warning = _make_depart_admonition('warning')
visit_seealso = _visit_admonition
depart_seealso = _make_depart_admonition('seealso')
def visit_versionmodified(self, node):
self.new_state(0)
def depart_versionmodified(self, node):
self.end_state()
def visit_literal_block(self, node):
self.new_state()
def depart_literal_block(self, node):
self.end_state(wrap=False)
def visit_doctest_block(self, node):
self.new_state(0)
def depart_doctest_block(self, node):
self.end_state(wrap=False)
def visit_line_block(self, node):
self.new_state()
self.lineblocklevel += 1
def depart_line_block(self, node):
self.lineblocklevel -= 1
self.end_state(wrap=False, end=None)
if not self.lineblocklevel:
self.add_text('\n')
def visit_line(self, node):
pass
def depart_line(self, node):
self.add_text('\n')
def visit_block_quote(self, node):
self.new_state()
def depart_block_quote(self, node):
self.end_state()
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_paragraph(self, node):
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.new_state(0)
def depart_paragraph(self, node):
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.end_state()
def visit_target(self, node):
raise nodes.SkipNode
def visit_index(self, node):
raise nodes.SkipNode
def visit_toctree(self, node):
raise nodes.SkipNode
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_pending_xref(self, node):
pass
def depart_pending_xref(self, node):
pass
def visit_reference(self, node):
pass
def depart_reference(self, node):
pass
def visit_download_reference(self, node):
pass
def depart_download_reference(self, node):
pass
def visit_emphasis(self, node):
self.add_text('*')
def depart_emphasis(self, node):
self.add_text('*')
def visit_literal_emphasis(self, node):
self.add_text('*')
def depart_literal_emphasis(self, node):
self.add_text('*')
def visit_strong(self, node):
self.add_text('**')
def depart_strong(self, node):
self.add_text('**')
def visit_abbreviation(self, node):
self.add_text('')
def depart_abbreviation(self, node):
if node.hasattr('explanation'):
self.add_text(' (%s)' % node['explanation'])
def visit_title_reference(self, node):
self.add_text('*')
def depart_title_reference(self, node):
self.add_text('*')
def visit_literal(self, node):
self.add_text('"')
def depart_literal(self, node):
self.add_text('"')
def visit_subscript(self, node):
self.add_text('_')
def depart_subscript(self, node):
pass
def visit_superscript(self, node):
self.add_text('^')
def depart_superscript(self, node):
pass
def visit_footnote_reference(self, node):
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_citation_reference(self, node):
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_Text(self, node):
self.add_text(node.astext())
def depart_Text(self, node):
pass
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_inline(self, node):
pass
def depart_inline(self, node):
pass
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_problematic(self, node):
self.add_text('>>')
def depart_problematic(self, node):
self.add_text('<<')
def visit_system_message(self, node):
self.new_state(0)
self.add_text('<SYSTEM MESSAGE: %s>' % node.astext())
self.end_state()
raise nodes.SkipNode
def visit_comment(self, node):
raise nodes.SkipNode
def visit_meta(self, node):
# only valid for HTML
raise nodes.SkipNode
def visit_raw(self, node):
if 'text' in node.get('format', '').split():
self.body.append(node.astext())
raise nodes.SkipNode
def visit_math(self, node):
self.builder.warn('using "math" markup without a Sphinx math extension '
'active, please use one of the math extensions '
'described at http://sphinx-doc.org/ext/math.html',
(self.builder.current_docname, node.line))
raise nodes.SkipNode
visit_math_block = visit_math
def unknown_visit(self, node):
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
|
dwkim78/pdtrend
|
refs/heads/master
|
pdtrend/utils/__init__.py
|
7
|
__author__ = 'kim'
|
linsicai/or-tools
|
refs/heads/master
|
examples/tests/test_cp_api.py
|
5
|
# Various calls to CP api from python to verify they work.
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import model_pb2
from ortools.constraint_solver import search_limit_pb2
def test_member():
solver = pywrapcp.Solver('test member')
x = solver.IntVar(1, 10, 'x')
ct = x.Member([1, 2, 3, 5])
print(ct)
def test_sparse_var():
solver = pywrapcp.Solver('test sparse')
x = solver.IntVar([1, 3, 5], 'x')
print(x)
def test_modulo():
solver = pywrapcp.Solver('test modulo')
x = solver.IntVar(0, 10, 'x')
y = solver.IntVar(2, 4, 'y')
print(x % 3)
print(x % y)
def test_limit():
solver = pywrapcp.Solver('test limit')
limit_proto = search_limit_pb2.SearchLimitProto()
limit_proto.time = 10000
limit_proto.branches = 10
print(limit_proto)
limit = solver.Limit(limit_proto)
print(limit)
def test_export():
solver = pywrapcp.Solver('test export')
x = solver.IntVar(1, 10, 'x')
ct = x.Member([1, 2, 3, 5])
solver.Add(ct)
proto = model_pb2.CPModelProto()
proto.model = 'wrong name'
solver.ExportModel(proto)
print(repr(proto))
print(str(proto))
class SearchMonitorTest(pywrapcp.SearchMonitor):
def __init__(self, solver, nexts):
print('Build')
pywrapcp.SearchMonitor.__init__(self, solver)
self._nexts = nexts
def BeginInitialPropagation(self):
print('In BeginInitialPropagation')
print(self._nexts)
def EndInitialPropagation(self):
print('In EndInitialPropagation')
print(self._nexts)
def test_search_monitor():
print('test_search_monitor')
solver = pywrapcp.Solver('test search monitor')
x = solver.IntVar(1, 10, 'x')
ct = (x == 3)
solver.Add(ct)
db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
monitor = SearchMonitorTest(solver, x)
solver.Solve(db, monitor)
class DemonTest(pywrapcp.PyDemon):
def __init__(self, x):
pywrapcp.Demon.__init__(self)
self._x = x
print('Demon built')
def Run(self, solver):
print('in Run(), saw ' + str(self._x))
def test_demon():
print('test_demon')
solver = pywrapcp.Solver('test export')
x = solver.IntVar(1, 10, 'x')
demon = DemonTest(x)
demon.Run(solver)
class ConstraintTest(pywrapcp.PyConstraint):
def __init__(self, solver, x):
pywrapcp.Constraint.__init__(self, solver)
self._x = x
print('Constraint built')
def Post(self):
print('in Post()')
self._demon = DemonTest(self._x)
self._x.WhenBound(self._demon)
print('out of Post()')
def InitialPropagate(self):
print('in InitialPropagate()')
self._x.SetMin(5)
print(self._x)
print('out of InitialPropagate()')
def DebugString(self):
return('ConstraintTest')
def test_constraint():
solver = pywrapcp.Solver('test export')
x = solver.IntVar(1, 10, 'x')
myct = ConstraintTest(solver, x)
solver.Add(myct)
db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
solver.Solve(db)
class InitialPropagateDemon(pywrapcp.PyDemon):
def __init__(self, ct):
pywrapcp.Demon.__init__(self)
self._ct = ct
def Run(self, solver):
self._ct.InitialPropagate()
class DumbGreaterOrEqualToFive(pywrapcp.PyConstraint):
def __init__(self, solver, x):
pywrapcp.Constraint.__init__(self, solver)
self._x = x
def Post(self):
self._demon = InitialPropagateDemon(self)
self._x.WhenBound(self._demon)
def InitialPropagate(self):
if self._x.Bound():
if self._x.Value() < 5:
print('Reject %d' % self._x.Value())
self.solver().Fail()
else:
print('Accept %d' % self._x.Value())
def test_failing_constraint():
solver = pywrapcp.Solver('test export')
x = solver.IntVar(1, 10, 'x')
myct = DumbGreaterOrEqualToFive(solver, x)
solver.Add(myct)
db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
solver.Solve(db)
def test_domain_iterator():
print('test_domain_iterator')
solver = pywrapcp.Solver('test_domain_iterator')
x = solver.IntVar([1, 2, 4, 6], 'x')
for i in x.DomainIterator():
print(i)
class WatchDomain(pywrapcp.PyDemon):
def __init__(self, x):
pywrapcp.Demon.__init__(self)
self._x = x
def Run(self, solver):
for i in self._x.HoleIterator():
print('Removed %d' % i)
class HoleConstraintTest(pywrapcp.PyConstraint):
def __init__(self, solver, x):
pywrapcp.Constraint.__init__(self, solver)
self._x = x
def Post(self):
self._demon = WatchDomain(self._x)
self._x.WhenDomain(self._demon)
def InitialPropagate(self):
self._x.RemoveValue(5)
def test_hole_iterator():
print('test_hole_iterator')
solver = pywrapcp.Solver('test export')
x = solver.IntVar(1, 10, 'x')
myct = HoleConstraintTest(solver, x)
solver.Add(myct)
db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
solver.Solve(db)
class BinarySum(pywrapcp.PyConstraint):
def __init__(self, solver, x, y, z):
pywrapcp.Constraint.__init__(self, solver)
self._x = x
self._y = y
self._z = z
def Post(self):
self._demon = InitialPropagateDemon(self)
self._x.WhenRange(self._demon)
self._y.WhenRange(self._demon)
self._z.WhenRange(self._demon)
def InitialPropagate(self):
self._z.SetRange(self._x.Min() + self._y.Min(), self._x.Max() + self._y.Max())
self._x.SetRange(self._z.Min() - self._y.Max(), self._z.Max() - self._y.Min())
self._y.SetRange(self._z.Min() - self._x.Max(), self._z.Max() - self._x.Min())
def test_sum_constraint():
print('test_sum_constraint')
solver = pywrapcp.Solver('test_sum_constraint')
x = solver.IntVar(1, 5, 'x')
y = solver.IntVar(1, 5, 'y')
z = solver.IntVar(1, 5, 'z')
binary_sum = BinarySum(solver, x, y, z)
solver.Add(binary_sum)
db = solver.Phase([x, y, z], solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
while solver.NextSolution():
print('%d + %d == %d' % (x.Value(), y.Value(), z.Value()))
solver.EndSearch()
def test_size_1_var():
solver = pywrapcp.Solver('test_size_1_var')
x = solver.IntVar([0], 'x')
def test_cumulative_api():
solver = pywrapcp.Solver('Problem')
#Vars
S=[solver.FixedDurationIntervalVar(0, 10, 5,False, "S_%s"%a)
for a in range(10)]
C = solver.IntVar(2, 5)
D = [a % 3 + 2 for a in range(10)]
solver.Add(solver.Cumulative(S, D, C, "cumul"))
class CustomDecisionBuilder(pywrapcp.PyDecisionBuilder):
def __init__(self):
pywrapcp.PyDecisionBuilder.__init__(self)
def Next(self, solver):
print("In Next")
return None
def DebugString(self):
return 'CustomDecisionBuilder'
def test_custom_decision_builder():
solver = pywrapcp.Solver('test_custom_decision_builder')
db = CustomDecisionBuilder()
print(str(db))
solver.Solve(db)
class CustomDecision(pywrapcp.PyDecision):
def __init__(self):
pywrapcp.PyDecision.__init__(self)
self._val = 1
print("Set value to", self._val)
def Apply(self, solver):
print('In Apply')
print("Expect value", self._val)
solver.Fail()
def Refute(self, solver):
print('In Refute')
def DebugString(self):
return('CustomDecision')
class CustomDecisionBuilderCustomDecision(pywrapcp.PyDecisionBuilder):
def __init__(self):
pywrapcp.PyDecisionBuilder.__init__(self)
self.__done = False
def Next(self, solver):
if not self.__done:
self.__done = True
self.__decision = CustomDecision()
return self.__decision
return None
def DebugString(self):
return 'CustomDecisionBuilderCustomDecision'
def test_custom_decision():
solver = pywrapcp.Solver('test_custom_decision')
db = CustomDecisionBuilderCustomDecision()
print(str(db))
solver.Solve(db)
def main():
test_member()
test_sparse_var()
test_modulo()
# test_limit()
# test_export()
test_search_monitor()
test_demon()
test_failing_constraint()
test_constraint()
test_domain_iterator()
test_hole_iterator()
test_sum_constraint()
test_size_1_var()
test_cumulative_api()
test_custom_decision_builder()
test_custom_decision()
if __name__ == '__main__':
main()
|
jk1/intellij-community
|
refs/heads/master
|
python/testData/mover/indent.py
|
83
|
class A:
def foo(self, a, b, c, d):
if a:
if b:
if c:
if d:
self.bar()
c<caret> = 3
if c:
a = 2
def bar(self):
pass
|
danlrobertson/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/html5lib/html5lib/treeadapters/__init__.py
|
77
|
"""Tree adapters let you convert from one tree structure to another
Example:
.. code-block:: python
import html5lib
from html5lib.treeadapters import genshi
doc = '<html><body>Hi!</body></html>'
treebuilder = html5lib.getTreeBuilder('etree')
parser = html5lib.HTMLParser(tree=treebuilder)
tree = parser.parse(doc)
TreeWalker = html5lib.getTreeWalker('etree')
genshi_tree = genshi.to_genshi(TreeWalker(tree))
"""
from __future__ import absolute_import, division, unicode_literals
from . import sax
__all__ = ["sax"]
try:
from . import genshi # noqa
except ImportError:
pass
else:
__all__.append("genshi")
|
sadanandb/pmt
|
refs/heads/master
|
src/client/examples/query_shot.py
|
10
|
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
import sys
from tactic_client_lib import TacticServerStub
SEARCH_TYPE = "prod/shot"
def main(args):
# USAGE: query_shot.py <shot_code>
shot_code = args[0]
server = TacticServerStub()
search_key = server.build_search_type(SEARCH_TYPE)
# do the actual work
server.start("Queried shot [%s]" % shot_code)
try:
filters = [
('code', shot_code)
]
print server.query(search_key, filters)
except:
server.abort()
raise
else:
server.finish()
if __name__ == '__main__':
executable = sys.argv[0]
args = sys.argv[1:]
main(args)
|
fastinetserver/portage-idfetch
|
refs/heads/master
|
pym/portage/_legacy_globals.py
|
1
|
# Copyright 2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
from portage import os
from portage.const import CACHE_PATH, PROFILE_PATH
def _get_legacy_global(name):
constructed = portage._legacy_globals_constructed
if name in constructed:
return getattr(portage, name)
if name == 'portdb':
portage.portdb = portage.db[portage.root]["porttree"].dbapi
constructed.add(name)
return getattr(portage, name)
elif name in ('mtimedb', 'mtimedbfile'):
portage.mtimedbfile = os.path.join(portage.root,
CACHE_PATH, "mtimedb")
constructed.add('mtimedbfile')
portage.mtimedb = portage.MtimeDB(portage.mtimedbfile)
constructed.add('mtimedb')
return getattr(portage, name)
# Portage needs to ensure a sane umask for the files it creates.
os.umask(0o22)
kwargs = {}
for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
kwargs[k] = os.environ.get(envvar, "/")
portage._initializing_globals = True
portage.db = portage.create_trees(**kwargs)
constructed.add('db')
del portage._initializing_globals
settings = portage.db["/"]["vartree"].settings
for root in portage.db:
if root != "/":
settings = portage.db[root]["vartree"].settings
break
portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
portage.settings = settings
constructed.add('settings')
portage.root = root
constructed.add('root')
# COMPATIBILITY
# These attributes should not be used within
# Portage under any circumstances.
portage.archlist = settings.archlist()
constructed.add('archlist')
portage.features = settings.features
constructed.add('features')
portage.groups = settings["ACCEPT_KEYWORDS"].split()
constructed.add('groups')
portage.pkglines = settings.packages
constructed.add('pkglines')
portage.selinux_enabled = settings.selinux_enabled()
constructed.add('selinux_enabled')
portage.thirdpartymirrors = settings.thirdpartymirrors()
constructed.add('thirdpartymirrors')
portage.usedefaults = settings.use_defs
constructed.add('usedefaults')
profiledir = os.path.join(settings["PORTAGE_CONFIGROOT"], PROFILE_PATH)
if not os.path.isdir(profiledir):
profiledir = None
portage.profiledir = profiledir
constructed.add('profiledir')
return getattr(portage, name)
|
downingstreet/Google-Code-Jam-2016
|
refs/heads/master
|
Round-1A/LastWord.py
|
1
|
def solve(S):
words = []
words.append(S[0])
for i in S[1:]:
if i >= words[0]:
words.insert(0,i)
else:
words.append(i)
return ''.join(words)
for t in xrange(1, input()+1):
S = raw_input()
print "Case #{0}: {1}".format(t, solve(S))
|
jonfoster/pyxb1
|
refs/heads/master
|
pyxb/bundles/wssplat/wsdlx.py
|
6
|
from pyxb.bundles.wssplat.raw.wsdlx import *
|
repotvsupertuga/tvsupertuga.repository
|
refs/heads/master
|
script.module.openscrapers/lib/openscrapers/sources_openscrapers/en_Torrent/111ys.py
|
1
|
# -*- coding: UTF-8 -*-
# ..#######.########.#######.##....#..######..######.########....###...########.#######.########..######.
# .##.....#.##.....#.##......###...#.##....#.##....#.##.....#...##.##..##.....#.##......##.....#.##....##
# .##.....#.##.....#.##......####..#.##......##......##.....#..##...##.##.....#.##......##.....#.##......
# .##.....#.########.######..##.##.#..######.##......########.##.....#.########.######..########..######.
# .##.....#.##.......##......##..###.......#.##......##...##..########.##.......##......##...##........##
# .##.....#.##.......##......##...##.##....#.##....#.##....##.##.....#.##.......##......##....##.##....##
# ..#######.##.......#######.##....#..######..######.##.....#.##.....#.##.......#######.##.....#..######.
"""
Created by Tempest
"""
import re
import urllib
import urlparse
from openscrapers.modules import cfscrape
from openscrapers.modules import cleantitle
from openscrapers.modules import client
from openscrapers.modules import debrid
from openscrapers.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['111-ys.com']
self.base_link = 'http://111-ys.com'
self.search_link = '/browse-movies/%s/all/all/0/latest'
self.scraper = cfscrape.create_scraper()
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except Exception:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None: return sources
if debrid.status() is False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
query = '%s %s' % (data['title'], data['year'])
url = self.search_link % urllib.quote(query)
url = urlparse.urljoin(self.base_link, url).replace('%20', '+')
html = self.scraper.get(url).content
items = re.findall('class="browse-movie-bottom">(.+?)</div>\s</div>', html, re.DOTALL)
if items is None:
return sources
for entry in items:
try:
try:
link, name = \
re.findall('<a href="(.+?)" class="browse-movie-title">(.+?)</a>', entry, re.DOTALL)[0]
name = client.replaceHTMLCodes(name)
if not cleantitle.get(name) == cleantitle.get(data['title']):
continue
except:
continue
y = entry[-4:]
if not y == data['year']:
continue
response = self.scraper.get(link).content
try:
entries = client.parseDOM(response, 'div', attrs={'class': 'modal-torrent'})
for torrent in entries:
link, name = re.findall(
'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"',
torrent, re.DOTALL)[0]
link = 'magnet:%s' % link
link = str(client.replaceHTMLCodes(link).split('&tr')[0])
quality, info = source_utils.get_release_quality(name, name)
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
size = '%.2f GB' % size
info.append(size)
except Exception:
pass
info = ' | '.join(info)
sources.append(
{'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info,
'direct': False, 'debridonly': True})
except Exception:
continue
except:
continue
return sources
except:
return sources
def resolve(self, url):
return url
|
AVSystem/avs_commons
|
refs/heads/master
|
tools/check_extern_c.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2021 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import sys
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def read_full_file(filename):
with open(filename) as f:
return f.read()
def is_purely_preprocessor_header(content):
content = re.sub(r'/\*.*?\*/', '', content, flags=re.MULTILINE | re.DOTALL)
content = re.sub(r'//.*$', '', content, flags=re.MULTILINE)
for line in content.split('\n'):
trimmed = line.strip()
if trimmed != '' and not trimmed.startswith('#'):
return False
return True
def has_proper_extern_c_clause(filename):
content = read_full_file(filename)
return ('extern "C"' in content) or is_purely_preprocessor_header(content)
def _main():
parser = argparse.ArgumentParser(
description='Check if all public headers contain an extern "C" clause')
parser.add_argument('-p', '--path', help='Project root path to start checking in',
default=PROJECT_ROOT)
parser.add_argument('-r', '--regex',
help='Regular expression that matches all files that need to be checked',
default=r'/include_public/.*\.h$')
args = parser.parse_args()
regex = re.compile(args.regex)
failure = False
for root, dirs, files in os.walk(args.path):
if '.git' in files:
# ".git" file that is not a subdirectory means that most likely
# we're in a submodule directory - do not iterate further
dirs.clear()
continue
try:
dirs.remove('.git')
except ValueError:
pass
for filename in files:
full_filename = os.path.join(root, filename)
if regex.search(full_filename) is not None:
if not has_proper_extern_c_clause(full_filename):
failure = True
sys.stderr.write('Missing extern "C" conditional in %s\n' % (full_filename,))
if failure:
return 1
if __name__ == '__main__':
sys.exit(_main())
|
Fillll/reddit2telegram
|
refs/heads/master
|
reddit2telegram/channels/canallixo/__init__.py
|
12133432
| |
priya-pp/Tacker
|
refs/heads/master
|
tacker/agent/__init__.py
|
12133432
| |
daxxi13/CouchPotatoServer
|
refs/heads/develop
|
libs/requests/packages/charade/jisfreq.py
|
3130
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
|
onaio/dkobo
|
refs/heads/master
|
dkobo/koboform/tests/test_kobo_to_xlsform.py
|
2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.test import TestCase
from dkobo.koboform.kobo_to_xlsform import convert_any_kobo_features_to_xlsform_survey_structure
from dkobo.koboform.kobo_to_xlsform import _sluggify_valid_xml
def convert_survey(surv, choices=[], sheets={}):
sheets.update({
'survey': surv
})
if len(choices) > 0:
sheets.update({
'choices': choices
})
return convert_any_kobo_features_to_xlsform_survey_structure(sheets)
COLS = {
'rank-cmessage': 'kobo--rank-constraint-message',
'rank-items': 'kobo--rank-items',
'score-choices': 'kobo--score-choices',
}
rank_s = [
{
'type': 'begin rank',
'name': 'x',
COLS['rank-cmessage']: 'Rank Message',
COLS['rank-items']: 'items',
'relevant': 'abcdef',
},
{'type': 'rank__level', 'name': 'rl1'},
{'type': 'rank__level', 'name': 'rl2', 'appearance': 'overridden'},
{'type': 'end rank'},
]
score_s = [
{
'type': 'begin score',
'name': 'x',
COLS['score-choices']: 'items',
'relevant': 'ghijkl',
},
{'type': 'score__row', 'name': 'rl1'},
{'type': 'score__row', 'name': 'rl2', 'appearance': 'overridden'},
{'type': 'end score'},
]
items = [
{'list name': 'items', 'name': 'a', 'label': 'A a a'},
{'list name': 'items', 'name': 'b', 'label': 'B b b'},
{'list name': 'items', 'name': 'c', 'label': 'C c c'},
]
class K2XSubModules(TestCase):
def test_sluggify_valid_xml(self):
'''
corresponding to tests from the cs model.utils -> sluggifyLabel
'''
self.cases = [
[["asdf jkl"], "asdf_jkl"],
[["2. asdf"], "_2_asdf"],
[[" hello "], "hello"],
[["asdf#123"], "asdf_123"],
# [["asdf", ["asdf"]], "asdf_001"],
# [["2. asdf", ["_2_asdf"]], "_2_asdf_001"],
]
for case in self.cases:
[inp, expected] = case
self.assertEqual(_sluggify_valid_xml(inp[0]), expected)
# def test_increment(self):
# self.cases = [
# [["asdf", ["asdf"]], "asdf_001"],
# [["2. asdf", ["_2_asdf"]], "_2_asdf_001"],
# ]
# for case in self.cases:
# [inp, expected] = case
# self.assertEqual(_sluggify_valid_xml(inp[0], names=inp[1]), expected)
class Converter(TestCase):
def test_rank_conversion(self):
result = convert_survey(rank_s, items)
surv = result['survey']
self.assertEqual(len(surv), 5)
self.assertEqual(surv[0]['appearance'], 'field-list')
self.assertEqual(surv[0]['type'], 'begin group')
self.assertEqual(surv[0]['relevant'], 'abcdef')
self.assertEqual(surv[1]['type'], 'note')
self.assertEqual(surv[1].get('relevant', None), None)
self.assertEqual(surv[2]['required'], 'true')
self.assertEqual(surv[2]['type'], 'select_one items')
self.assertTrue('constraint' not in surv[2].keys())
self.assertEqual(surv[2].get('constraint_message'), 'Rank Message')
self.assertEqual(surv[3]['appearance'], 'overridden')
self.assertEqual(surv[4]['type'], 'end group')
self.assertEqual(len(surv[4].keys()), 1)
def test_score_conversion(self):
result = convert_survey(score_s, items)
surv = result['survey']
self.assertEqual(len(surv), 5)
self.assertEqual(surv[0]['appearance'], 'field-list')
self.assertEqual(surv[0]['type'], 'begin group')
self.assertEqual(surv[0]['relevant'], 'ghijkl')
self.assertEqual(surv[1]['type'], 'select_one items')
self.assertEqual(surv[1]['appearance'], 'label')
self.assertEqual(surv[1].get('relevant', None), None)
self.assertEqual(surv[2]['appearance'], 'list-nolabel')
self.assertEqual(surv[2]['type'], 'select_one items')
self.assertEqual(surv[3]['appearance'], 'overridden')
self.assertEqual(surv[4]['type'], 'end group')
self.assertEqual(len(surv[4].keys()), 1)
|
santoshsahoo/filesync-server
|
refs/heads/master
|
src/server/tests/test_oops.py
|
6
|
# Copyright 2008-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/filesync-server
"""OOPS testing."""
import os
import sys
import time
from twisted.internet import reactor, defer
from oops_datedir_repo import serializer
from ubuntuone.storage.server import server
from ubuntuone.storage.server.testing.testcase import (
TestWithDatabase,
FactoryHelper)
from ubuntuone.storageprotocol import protocol_pb2
class TestOops(TestWithDatabase):
"""Test the the oops system stuff."""
createOOPSFiles = True
def setUp(self):
self._max_oops_line = server.MAX_OOPS_LINE
return super(TestOops, self).setUp()
def tearDown(self):
server.MAX_OOPS_LINE = self._max_oops_line
return super(TestOops, self).tearDown()
def get_oops_data(self):
"""Read oops data for first oops."""
oopses = list(self.get_oops())
self.assert_(len(oopses) > 0)
with open(oopses[0], 'r') as f:
oops_data = serializer.read(f)
return oops_data
def test_ping_error(self):
"""Fail on ping."""
d = defer.Deferred()
@defer.inlineCallbacks
def login(client):
try:
pd = self.service.factory.protocols[0].wait_for_poison()
self.service.factory.protocols[0].poison("ping")
message = protocol_pb2.Message()
message.id = 5
message.type = protocol_pb2.Message.PING
client.sendMessage(message)
yield pd
oops_data = self.get_oops_data()
self.assertEqual("Service was poisoned with: ping",
oops_data["value"])
self.assertEqual(None, oops_data["username"])
d.callback(True)
except Exception, e:
d.errback(e)
client.transport.loseConnection()
factory.timeout.cancel()
factory = FactoryHelper(login)
reactor.connectTCP('localhost', self.port, factory)
return d
def make_auth_fail(poison):
"build test case"
def inner(self):
"""Fail on ping."""
d = defer.Deferred()
@defer.inlineCallbacks
def login(client):
pd = self.service.factory.protocols[0].wait_for_poison()
self.service.factory.protocols[0].poison(poison)
message = protocol_pb2.Message()
message.id = 5
message.type = protocol_pb2.Message.AUTH_REQUEST
client.handle_ERROR = lambda *args: True
client.sendMessage(message)
yield pd
try:
oops_data = self.get_oops_data()
self.assertEqual("Service was poisoned with: " + poison,
oops_data["value"])
self.assertEqual(None, oops_data["username"])
d.callback(True)
except Exception, e:
d.errback(e)
client.transport.loseConnection()
factory.timeout.cancel()
factory = FactoryHelper(login)
reactor.connectTCP('localhost', self.port, factory)
return d
return inner
test_request_start = make_auth_fail("request_start")
test_request_schedule = make_auth_fail("request_schedule")
test_authenticate_start = make_auth_fail("authenticate_start")
test_authenticate_cont = make_auth_fail("authenticate_continue")
def test_user_extra_data(self):
"""Test that the user id and username is included in the extra data"""
@defer.inlineCallbacks
def poisoned_ping(client):
try:
pd = self.service.factory.protocols[0].wait_for_poison()
self.service.factory.protocols[0].poison("ping")
message = protocol_pb2.Message()
message.id = 5
message.type = protocol_pb2.Message.PING
client.sendMessage(message)
yield pd
oops_data = self.get_oops_data()
self.assertEqual("Service was poisoned with: ping",
oops_data["value"])
self.assertEqual("0,0,usr0", oops_data["username"])
except Exception, e:
raise e
def auth(client):
d = client.dummy_authenticate("open sesame")
d.addCallback(lambda _: poisoned_ping(client))
d.addCallbacks(client.test_done, client.test_fail)
return self.callback_test(auth)
class TestOopsToStderr(TestOops):
"""Test writing oops to stderr."""
createOOPSFiles = False
def setUp(self):
self.former_stderr = sys.stderr
self.tmpdir = self.mktemp()
if not os.path.exists(self.tmpdir):
os.makedirs(self.tmpdir)
self.stderr_filename = os.path.join(self.tmpdir,
'%s.stderr' % (time.time(),))
sys.stderr = open(self.stderr_filename, 'w')
return super(TestOopsToStderr, self).setUp()
def tearDown(self):
sys.stderr.close()
sys.stderr = self.former_stderr
return super(TestOopsToStderr, self).tearDown()
def get_oops_data(self):
"""Read oops data for first oops from stderr."""
sys.stderr.flush()
with open(self.stderr_filename) as f:
oops_data = serializer.read(f)
return oops_data
|
coreyfarrell/testsuite
|
refs/heads/master
|
lib/python/asterisk/astdicts.py
|
2
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# copied from http://code.activestate.com/recipes/576693/
try:
# Use builtin OrderedDict() from Python2.7.
from collections import OrderedDict
except ImportError:
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
###############################################################################
### MultiOrderedDict
###############################################################################
class MultiOrderedDict(OrderedDict):
def __init__(self, *args, **kwds):
OrderedDict.__init__(self, *args, **kwds)
def __setitem__(self, key, val, i=None):
if key not in self:
# print "__setitem__ key = ", key, " val = ", val
OrderedDict.__setitem__(
self, key, val if isinstance(val, list) else [val])
return
# print "inserting key = ", key, " val = ", val
vals = self[key]
if i is None:
i = len(vals)
if not isinstance(val, list):
if val not in vals:
vals.insert(i, val)
else:
for j in val.reverse():
if j not in vals:
vals.insert(i, j)
def insert(self, i, key, val):
self.__setitem__(key, val, i)
def copy(self):
# TODO - find out why for some reason copies
# the [] as an [[]], so do manually
c = MultiOrderedDict() #self.__class__(self)
for key, val in self.items():
for v in val:
c[key] = v
return c
|
icomms/wqmanager
|
refs/heads/master
|
reportlab/graphics/charts/legends.py
|
4
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/legends.py
__version__=''' $Id: legends.py 3604 2009-11-27 16:35:29Z meitham $ '''
__doc__="""This will be a collection of legends to be used with charts."""
import copy, operator
from reportlab.lib import colors
from reportlab.lib.validators import isNumber, OneOf, isString, isColorOrNone,\
isNumberOrNone, isListOfNumbersOrNone, isStringOrNone, isBoolean,\
EitherOr, NoneOr, AutoOr, isAuto, Auto, isBoxAnchor, SequenceOf, isInstanceOf
from reportlab.lib.attrmap import *
from reportlab.pdfbase.pdfmetrics import stringWidth, getFont
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.shapes import Drawing, Group, String, Rect, Line, STATE_DEFAULTS
from reportlab.graphics.charts.areas import PlotArea
from reportlab.graphics.widgets.markers import uSymbol2Symbol, isSymbol
from reportlab.lib.utils import isSeqType, find_locals
def _transMax(n,A):
X = n*[0]
m = 0
for a in A:
m = max(m,len(a))
for i,x in enumerate(a):
X[i] = max(X[i],x)
X = [0] + X[:m]
for i in xrange(m):
X[i+1] += X[i]
return X
def _objStr(s):
if isinstance(s,basestring):
return s
else:
return str(s)
def _getStr(s):
if isSeqType(s):
return map(_getStr,s)
else:
return _objStr(s)
def _getLines(s):
if isSeqType(s):
return tuple([(x or '').split('\n') for x in s])
else:
return (s or '').split('\n')
def _getLineCount(s):
T = _getLines(s)
if isSeqType(s):
return max([len(x) for x in T])
else:
return len(T)
def _getWidths(i,s, fontName, fontSize, subCols):
S = []
aS = S.append
if isSeqType(s):
for j,t in enumerate(s):
sc = subCols[j,i]
fN = getattr(sc,'fontName',fontName)
fS = getattr(sc,'fontSize',fontSize)
m = [stringWidth(x, fN, fS) for x in t.split('\n')]
m = max(sc.minWidth,m and max(m) or 0)
aS(m)
aS(sc.rpad)
del S[-1]
else:
sc = subCols[0,i]
fN = getattr(sc,'fontName',fontName)
fS = getattr(sc,'fontSize',fontSize)
m = [stringWidth(x, fN, fS) for x in s.split('\n')]
aS(max(sc.minWidth,m and max(m) or 0))
return S
class SubColProperty(PropHolder):
dividerLines = 0
_attrMap = AttrMap(
minWidth = AttrMapValue(isNumber,desc="minimum width for this subcol"),
rpad = AttrMapValue(isNumber,desc="right padding for this subcol"),
align = AttrMapValue(OneOf('left','right','center','centre','numeric'),desc='alignment in subCol'),
fontName = AttrMapValue(isString, desc="Font name of the strings"),
fontSize = AttrMapValue(isNumber, desc="Font size of the strings"),
leading = AttrMapValue(isNumber, desc="leading for the strings"),
fillColor = AttrMapValue(isColorOrNone, desc="fontColor"),
underlines = AttrMapValue(EitherOr((NoneOr(isInstanceOf(Line)),SequenceOf(isInstanceOf(Line),emptyOK=0,lo=0,hi=0x7fffffff))), desc="underline definitions"),
overlines = AttrMapValue(EitherOr((NoneOr(isInstanceOf(Line)),SequenceOf(isInstanceOf(Line),emptyOK=0,lo=0,hi=0x7fffffff))), desc="overline definitions"),
)
class LegendCallout:
def _legendValues(legend,*args):
'''return a tuple of values from the first function up the stack with isinstance(self,legend)'''
L = find_locals(lambda L: L.get('self',None) is legend and L or None)
return tuple([L[a] for a in args])
_legendValues = staticmethod(_legendValues)
def _selfOrLegendValues(self,legend,*args):
L = find_locals(lambda L: L.get('self',None) is legend and L or None)
return tuple([getattr(self,a,L[a]) for a in args])
def __call__(self,legend,g,thisx,y,(col,name)):
pass
class LegendSwatchCallout(LegendCallout):
def __call__(self,legend,g,thisx,y,i,(col,name),swatch):
pass
class LegendColEndCallout(LegendCallout):
def __call__(self,legend, g, x, xt, y, width, lWidth):
pass
class Legend(Widget):
"""A simple legend containing rectangular swatches and strings.
The swatches are filled rectangles whenever the respective
color object in 'colorNamePairs' is a subclass of Color in
reportlab.lib.colors. Otherwise the object passed instead is
assumed to have 'x', 'y', 'width' and 'height' attributes.
A legend then tries to set them or catches any error. This
lets you plug-in any widget you like as a replacement for
the default rectangular swatches.
Strings can be nicely aligned left or right to the swatches.
"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc="x-coordinate of upper-left reference point"),
y = AttrMapValue(isNumber, desc="y-coordinate of upper-left reference point"),
deltax = AttrMapValue(isNumberOrNone, desc="x-distance between neighbouring swatches"),
deltay = AttrMapValue(isNumberOrNone, desc="y-distance between neighbouring swatches"),
dxTextSpace = AttrMapValue(isNumber, desc="Distance between swatch rectangle and text"),
autoXPadding = AttrMapValue(isNumber, desc="x Padding between columns if deltax=None",advancedUsage=1),
autoYPadding = AttrMapValue(isNumber, desc="y Padding between rows if deltay=None",advancedUsage=1),
yGap = AttrMapValue(isNumber, desc="Additional gap between rows",advancedUsage=1),
dx = AttrMapValue(isNumber, desc="Width of swatch rectangle"),
dy = AttrMapValue(isNumber, desc="Height of swatch rectangle"),
columnMaximum = AttrMapValue(isNumber, desc="Max. number of items per column"),
alignment = AttrMapValue(OneOf("left", "right"), desc="Alignment of text with respect to swatches"),
colorNamePairs = AttrMapValue(None, desc="List of color/name tuples (color can also be widget)"),
fontName = AttrMapValue(isString, desc="Font name of the strings"),
fontSize = AttrMapValue(isNumber, desc="Font size of the strings"),
fillColor = AttrMapValue(isColorOrNone, desc="swatches filling color"),
strokeColor = AttrMapValue(isColorOrNone, desc="Border color of the swatches"),
strokeWidth = AttrMapValue(isNumber, desc="Width of the border color of the swatches"),
swatchMarker = AttrMapValue(NoneOr(AutoOr(isSymbol)), desc="None, Auto() or makeMarker('Diamond') ...",advancedUsage=1),
callout = AttrMapValue(None, desc="a user callout(self,g,x,y,(color,text))",advancedUsage=1),
boxAnchor = AttrMapValue(isBoxAnchor,'Anchor point for the legend area'),
variColumn = AttrMapValue(isBoolean,'If true column widths may vary (default is false)',advancedUsage=1),
dividerLines = AttrMapValue(OneOf(0,1,2,3,4,5,6,7),'If 1 we have dividers between the rows | 2 for extra top | 4 for bottom',advancedUsage=1),
dividerWidth = AttrMapValue(isNumber, desc="dividerLines width",advancedUsage=1),
dividerColor = AttrMapValue(isColorOrNone, desc="dividerLines color",advancedUsage=1),
dividerDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array for dividerLines.',advancedUsage=1),
dividerOffsX = AttrMapValue(SequenceOf(isNumber,emptyOK=0,lo=2,hi=2), desc='divider lines X offsets',advancedUsage=1),
dividerOffsY = AttrMapValue(isNumber, desc="dividerLines Y offset",advancedUsage=1),
colEndCallout = AttrMapValue(None, desc="a user callout(self,g, x, xt, y,width, lWidth)",advancedUsage=1),
subCols = AttrMapValue(None,desc="subColumn properties"),
swatchCallout = AttrMapValue(None, desc="a user swatch callout(self,g,x,y,i,(col,name),swatch)",advancedUsage=1),
)
def __init__(self):
# Upper-left reference point.
self.x = 0
self.y = 0
# Alginment of text with respect to swatches.
self.alignment = "left"
# x- and y-distances between neighbouring swatches.
self.deltax = 75
self.deltay = 20
self.autoXPadding = 5
self.autoYPadding = 2
# Size of swatch rectangle.
self.dx = 10
self.dy = 10
# Distance between swatch rectangle and text.
self.dxTextSpace = 10
# Max. number of items per column.
self.columnMaximum = 3
# Color/name pairs.
self.colorNamePairs = [ (colors.red, "red"),
(colors.blue, "blue"),
(colors.green, "green"),
(colors.pink, "pink"),
(colors.yellow, "yellow") ]
# Font name and size of the labels.
self.fontName = STATE_DEFAULTS['fontName']
self.fontSize = STATE_DEFAULTS['fontSize']
self.fillColor = STATE_DEFAULTS['fillColor']
self.strokeColor = STATE_DEFAULTS['strokeColor']
self.strokeWidth = STATE_DEFAULTS['strokeWidth']
self.swatchMarker = None
self.boxAnchor = 'nw'
self.yGap = 0
self.variColumn = 0
self.dividerLines = 0
self.dividerWidth = 0.5
self.dividerDashArray = None
self.dividerColor = colors.black
self.dividerOffsX = (0,0)
self.dividerOffsY = 0
self.colEndCallout = None
self._init_subCols()
def _init_subCols(self):
sc = self.subCols = TypedPropertyCollection(SubColProperty)
sc.rpad = 1
sc.minWidth = 0
sc.align = 'right'
sc[0].align = 'left'
def _getChartStyleName(self,chart):
for a in 'lines', 'bars', 'slices', 'strands':
if hasattr(chart,a): return a
return None
def _getChartStyle(self,chart):
return getattr(chart,self._getChartStyleName(chart),None)
def _getTexts(self,colorNamePairs):
if not isAuto(colorNamePairs):
texts = [_getStr(p[1]) for p in colorNamePairs]
else:
chart = getattr(colorNamePairs,'chart',getattr(colorNamePairs,'obj',None))
texts = [chart.getSeriesName(i,'series %d' % i) for i in xrange(chart._seriesCount)]
return texts
def _calculateMaxBoundaries(self, colorNamePairs):
"Calculate the maximum width of some given strings."
fontName = self.fontName
fontSize = self.fontSize
subCols = self.subCols
M = [_getWidths(i, m, fontName, fontSize, subCols) for i,m in enumerate(self._getTexts(colorNamePairs))]
if not M:
return [0,0]
n = max([len(m) for m in M])
if self.variColumn:
columnMaximum = self.columnMaximum
return [_transMax(n,M[r:r+columnMaximum]) for r in xrange(0,len(M),self.columnMaximum)]
else:
return _transMax(n,M)
def _calcHeight(self):
dy = self.dy
yGap = self.yGap
thisy = upperlefty = self.y - dy
fontSize = self.fontSize
ascent=getFont(self.fontName).face.ascent/1000.
if ascent==0: ascent=0.718 # default (from helvetica)
ascent *= fontSize
leading = fontSize*1.2
deltay = self.deltay
if not deltay: deltay = max(dy,leading)+self.autoYPadding
columnCount = 0
count = 0
lowy = upperlefty
lim = self.columnMaximum - 1
for name in self._getTexts(self.colorNamePairs):
y0 = thisy+(dy-ascent)*0.5
y = y0 - _getLineCount(name)*leading
leadingMove = 2*y0-y-thisy
newy = thisy-max(deltay,leadingMove)-yGap
lowy = min(y,newy,lowy)
if count==lim:
count = 0
thisy = upperlefty
columnCount = columnCount + 1
else:
thisy = newy
count = count+1
return upperlefty - lowy
def _defaultSwatch(self,x,thisy,dx,dy,fillColor,strokeWidth,strokeColor):
return Rect(x, thisy, dx, dy,
fillColor = fillColor,
strokeColor = strokeColor,
strokeWidth = strokeWidth,
)
def draw(self):
colorNamePairs = self.colorNamePairs
autoCP = isAuto(colorNamePairs)
if autoCP:
chart = getattr(colorNamePairs,'chart',getattr(colorNamePairs,'obj',None))
swatchMarker = None
autoCP = Auto(obj=chart)
n = chart._seriesCount
chartTexts = self._getTexts(colorNamePairs)
else:
swatchMarker = getattr(self,'swatchMarker',None)
if isAuto(swatchMarker):
chart = getattr(swatchMarker,'chart',getattr(swatchMarker,'obj',None))
swatchMarker = Auto(obj=chart)
n = len(colorNamePairs)
dx = self.dx
dy = self.dy
alignment = self.alignment
columnMaximum = self.columnMaximum
deltax = self.deltax
deltay = self.deltay
dxTextSpace = self.dxTextSpace
fontName = self.fontName
fontSize = self.fontSize
fillColor = self.fillColor
strokeWidth = self.strokeWidth
strokeColor = self.strokeColor
subCols = self.subCols
leading = fontSize*1.2
yGap = self.yGap
if not deltay:
deltay = max(dy,leading)+self.autoYPadding
ba = self.boxAnchor
maxWidth = self._calculateMaxBoundaries(colorNamePairs)
nCols = int((n+columnMaximum-1)/columnMaximum)
xW = dx+dxTextSpace+self.autoXPadding
variColumn = self.variColumn
if variColumn:
width = reduce(operator.add,[m[-1] for m in maxWidth],0)+xW*nCols
else:
deltax = max(maxWidth[-1]+xW,deltax)
width = maxWidth[-1]+nCols*deltax
maxWidth = nCols*[maxWidth]
thisx = self.x
thisy = self.y - self.dy
if ba not in ('ne','n','nw','autoy'):
height = self._calcHeight()
if ba in ('e','c','w'):
thisy += height/2.
else:
thisy += height
if ba not in ('nw','w','sw','autox'):
if ba in ('n','c','s'):
thisx -= width/2
else:
thisx -= width
upperlefty = thisy
g = Group()
ascent=getFont(fontName).face.ascent/1000.
if ascent==0: ascent=0.718 # default (from helvetica)
ascent *= fontSize # normalize
lim = columnMaximum - 1
callout = getattr(self,'callout',None)
scallout = getattr(self,'swatchCallout',None)
dividerLines = self.dividerLines
if dividerLines:
dividerWidth = self.dividerWidth
dividerColor = self.dividerColor
dividerDashArray = self.dividerDashArray
dividerOffsX = self.dividerOffsX
dividerOffsY = self.dividerOffsY
for i in xrange(n):
if autoCP:
col = autoCP
col.index = i
name = chartTexts[i]
else:
col, name = colorNamePairs[i]
if isAuto(swatchMarker):
col = swatchMarker
col.index = i
if isAuto(name):
name = getattr(swatchMarker,'chart',getattr(swatchMarker,'obj',None)).getSeriesName(i,'series %d' % i)
T = _getLines(name)
S = []
aS = S.append
j = int(i/columnMaximum)
jOffs = maxWidth[j]
# thisy+dy/2 = y+leading/2
y = y0 = thisy+(dy-ascent)*0.5
if callout: callout(self,g,thisx,y,(col,name))
if alignment == "left":
x = thisx
xn = thisx+jOffs[-1]+dxTextSpace
elif alignment == "right":
x = thisx+dx+dxTextSpace
xn = thisx
else:
raise ValueError, "bad alignment"
if not isSeqType(name):
T = [T]
yd = y
for k,lines in enumerate(T):
y = y0
kk = k*2
x1 = x+jOffs[kk]
x2 = x+jOffs[kk+1]
sc = subCols[k,i]
anchor = sc.align
fN = getattr(sc,'fontName',fontName)
fS = getattr(sc,'fontSize',fontSize)
fC = getattr(sc,'fillColor',fillColor)
fL = getattr(sc,'leading',1.2*fontSize)
if fN==fontName:
fA = (ascent*fS)/fontSize
else:
fA = getFont(fontName).face.ascent/1000.
if fA==0: fA=0.718
fA *= fS
if anchor=='left':
anchor = 'start'
xoffs = x1
elif anchor=='right':
anchor = 'end'
xoffs = x2
elif anchor=='numeric':
xoffs = x2
else:
anchor = 'middle'
xoffs = 0.5*(x1+x2)
for t in lines:
aS(String(xoffs,y,t,fontName=fN,fontSize=fS,fillColor=fC, textAnchor = anchor))
y -= fL
yd = min(yd,y)
y += fL
for iy, a in ((y-max(fL-fA,0),'underlines'),(y+fA,'overlines')):
il = getattr(sc,a,None)
if il:
if not isinstance(il,(tuple,list)): il = (il,)
for l in il:
l = copy.copy(l)
l.y1 += iy
l.y2 += iy
l.x1 += x1
l.x2 += x2
aS(l)
x = xn
y = yd
leadingMove = 2*y0-y-thisy
if dividerLines:
xd = thisx+dx+dxTextSpace+jOffs[-1]+dividerOffsX[1]
yd = thisy+dy*0.5+dividerOffsY
if ((dividerLines&1) and i%columnMaximum) or ((dividerLines&2) and not i%columnMaximum):
g.add(Line(thisx+dividerOffsX[0],yd,xd,yd,
strokeColor=dividerColor, strokeWidth=dividerWidth, strokeDashArray=dividerDashArray))
if (dividerLines&4) and (i%columnMaximum==lim or i==(n-1)):
yd -= max(deltay,leadingMove)+yGap
g.add(Line(thisx+dividerOffsX[0],yd,xd,yd,
strokeColor=dividerColor, strokeWidth=dividerWidth, strokeDashArray=dividerDashArray))
# Make a 'normal' color swatch...
if isAuto(col):
chart = getattr(col,'chart',getattr(col,'obj',None))
c = chart.makeSwatchSample(getattr(col,'index',i),x,thisy,dx,dy)
elif isinstance(col, colors.Color):
if isSymbol(swatchMarker):
c = uSymbol2Symbol(swatchMarker,x+dx/2.,thisy+dy/2.,col)
else:
c = self._defaultSwatch(x,thisy,dx,dy,fillColor=col,strokeWidth=strokeWidth,strokeColor=strokeColor)
elif col is not None:
try:
c = copy.deepcopy(col)
c.x = x
c.y = thisy
c.width = dx
c.height = dy
except:
c = None
else:
c = None
if c:
g.add(c)
if scallout: scallout(self,g,thisx,y0,i,(col,name),c)
map(g.add,S)
if self.colEndCallout and (i%columnMaximum==lim or i==(n-1)):
if alignment == "left":
xt = thisx
else:
xt = thisx+dx+dxTextSpace
yd = thisy+dy*0.5+dividerOffsY - (max(deltay,leadingMove)+yGap)
self.colEndCallout(self, g, thisx, xt, yd, jOffs[-1], jOffs[-1]+dx+dxTextSpace)
if i%columnMaximum==lim:
if variColumn:
thisx += jOffs[-1]+xW
else:
thisx = thisx+deltax
thisy = upperlefty
else:
thisy = thisy-max(deltay,leadingMove)-yGap
return g
def demo(self):
"Make sample legend."
d = Drawing(200, 100)
legend = Legend()
legend.alignment = 'left'
legend.x = 0
legend.y = 100
legend.dxTextSpace = 5
items = 'red green blue yellow pink black white'.split()
items = map(lambda i:(getattr(colors, i), i), items)
legend.colorNamePairs = items
d.add(legend, 'legend')
return d
class TotalAnnotator(LegendColEndCallout):
def __init__(self, lText='Total', rText='0.0', fontName='Times-Roman', fontSize=10,
fillColor=colors.black, strokeWidth=0.5, strokeColor=colors.black, strokeDashArray=None,
dx=0, dy=0, dly=0, dlx=(0,0)):
self.lText = lText
self.rText = rText
self.fontName = fontName
self.fontSize = fontSize
self.fillColor = fillColor
self.dy = dy
self.dx = dx
self.dly = dly
self.dlx = dlx
self.strokeWidth = strokeWidth
self.strokeColor = strokeColor
self.strokeDashArray = strokeDashArray
def __call__(self,legend, g, x, xt, y, width, lWidth):
from reportlab.graphics.shapes import String, Line
fontSize = self.fontSize
fontName = self.fontName
fillColor = self.fillColor
strokeColor = self.strokeColor
strokeWidth = self.strokeWidth
ascent=getFont(fontName).face.ascent/1000.
if ascent==0: ascent=0.718 # default (from helvetica)
ascent *= fontSize
leading = fontSize*1.2
yt = y+self.dy-ascent*1.3
if self.lText and fillColor:
g.add(String(xt,yt,self.lText,
fontName=fontName,
fontSize=fontSize,
fillColor=fillColor,
textAnchor = "start"))
if self.rText:
g.add(String(xt+width,yt,self.rText,
fontName=fontName,
fontSize=fontSize,
fillColor=fillColor,
textAnchor = "end"))
if strokeWidth and strokeColor:
yL = y+self.dly-leading
g.add(Line(x+self.dlx[0],yL,x+self.dlx[1]+lWidth,yL,
strokeColor=strokeColor, strokeWidth=strokeWidth,
strokeDashArray=self.strokeDashArray))
class LineSwatch(Widget):
"""basically a Line with properties added so it can be used in a LineLegend"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc="x-coordinate for swatch line start point"),
y = AttrMapValue(isNumber, desc="y-coordinate for swatch line start point"),
width = AttrMapValue(isNumber, desc="length of swatch line"),
height = AttrMapValue(isNumber, desc="used for line strokeWidth"),
strokeColor = AttrMapValue(isColorOrNone, desc="color of swatch line"),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc="dash array for swatch line"),
)
def __init__(self):
from reportlab.lib.colors import red
from reportlab.graphics.shapes import Line
self.x = 0
self.y = 0
self.width = 20
self.height = 1
self.strokeColor = red
self.strokeDashArray = None
def draw(self):
l = Line(self.x,self.y,self.x+self.width,self.y)
l.strokeColor = self.strokeColor
l.strokeDashArray = self.strokeDashArray
l.strokeWidth = self.height
return l
class LineLegend(Legend):
"""A subclass of Legend for drawing legends with lines as the
swatches rather than rectangles. Useful for lineCharts and
linePlots. Should be similar in all other ways the the standard
Legend class.
"""
def __init__(self):
Legend.__init__(self)
# Size of swatch rectangle.
self.dx = 10
self.dy = 2
def _defaultSwatch(self,x,thisy,dx,dy,fillColor,strokeWidth,strokeColor):
l = LineSwatch()
l.x = x
l.y = thisy
l.width = dx
l.height = dy
l.strokeColor = fillColor
return l
def sample1c():
"Make sample legend."
d = Drawing(200, 100)
legend = Legend()
legend.alignment = 'right'
legend.x = 0
legend.y = 100
legend.dxTextSpace = 5
items = 'red green blue yellow pink black white'.split()
items = map(lambda i:(getattr(colors, i), i), items)
legend.colorNamePairs = items
d.add(legend, 'legend')
return d
def sample2c():
"Make sample legend."
d = Drawing(200, 100)
legend = Legend()
legend.alignment = 'right'
legend.x = 20
legend.y = 90
legend.deltax = 60
legend.dxTextSpace = 10
legend.columnMaximum = 4
items = 'red green blue yellow pink black white'.split()
items = map(lambda i:(getattr(colors, i), i), items)
legend.colorNamePairs = items
d.add(legend, 'legend')
return d
def sample3():
"Make sample legend with line swatches."
d = Drawing(200, 100)
legend = LineLegend()
legend.alignment = 'right'
legend.x = 20
legend.y = 90
legend.deltax = 60
legend.dxTextSpace = 10
legend.columnMaximum = 4
items = 'red green blue yellow pink black white'.split()
items = map(lambda i:(getattr(colors, i), i), items)
legend.colorNamePairs = items
d.add(legend, 'legend')
return d
def sample3a():
"Make sample legend with line swatches and dasharrays on the lines."
d = Drawing(200, 100)
legend = LineLegend()
legend.alignment = 'right'
legend.x = 20
legend.y = 90
legend.deltax = 60
legend.dxTextSpace = 10
legend.columnMaximum = 4
items = 'red green blue yellow pink black white'.split()
darrays = ([2,1], [2,5], [2,2,5,5], [1,2,3,4], [4,2,3,4], [1,2,3,4,5,6], [1])
cnp = []
for i in range(0, len(items)):
l = LineSwatch()
l.strokeColor = getattr(colors, items[i])
l.strokeDashArray = darrays[i]
cnp.append((l, items[i]))
legend.colorNamePairs = cnp
d.add(legend, 'legend')
return d
|
tmpgit/intellij-community
|
refs/heads/master
|
python/lib/Lib/wsgiref/simple_server.py
|
104
|
"""BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21)
This is both an example of how WSGI can be implemented, and a basis for running
simple web applications on a local machine, such as might be done when testing
or debugging an application. It has not been reviewed for security issues,
however, and we strongly recommend that you use a "real" web server for
production use.
For example usage, see the 'if __name__=="__main__"' block at the end of the
module. See also the BaseHTTPServer module docs for other API information.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urllib, sys
from wsgiref.handlers import SimpleHandler
__version__ = "0.1"
__all__ = ['WSGIServer', 'WSGIRequestHandler', 'demo_app', 'make_server']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class ServerHandler(SimpleHandler):
server_software = software_version
def close(self):
try:
self.request_handler.log_request(
self.status.split(' ',1)[0], self.bytes_sent
)
finally:
SimpleHandler.close(self)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
HTTPServer.server_bind(self)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def demo_app(environ,start_response):
from StringIO import StringIO
stdout = StringIO()
print >>stdout, "Hello world!"
print >>stdout
h = environ.items(); h.sort()
for k,v in h:
print >>stdout, k,'=',`v`
start_response("200 OK", [('Content-Type','text/plain')])
return [stdout.getvalue()]
def make_server(
host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
):
"""Create a new WSGI server listening on `host` and `port` for `app`"""
server = server_class((host, port), handler_class)
server.set_app(app)
return server
if __name__ == '__main__':
httpd = make_server('', 8000, demo_app)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
import webbrowser
webbrowser.open('http://localhost:8000/xyz?abc')
httpd.handle_request() # serve one request, then exit
#
|
rhertzog/django
|
refs/heads/master
|
tests/file_uploads/urls.py
|
452
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^upload/$', views.file_upload_view),
url(r'^verify/$', views.file_upload_view_verify),
url(r'^unicode_name/$', views.file_upload_unicode_name),
url(r'^echo/$', views.file_upload_echo),
url(r'^echo_content_type_extra/$', views.file_upload_content_type_extra),
url(r'^echo_content/$', views.file_upload_echo_content),
url(r'^quota/$', views.file_upload_quota),
url(r'^quota/broken/$', views.file_upload_quota_broken),
url(r'^getlist_count/$', views.file_upload_getlist_count),
url(r'^upload_errors/$', views.file_upload_errors),
url(r'^filename_case/$', views.file_upload_filename_case_view),
url(r'^fd_closing/(?P<access>t|f)/$', views.file_upload_fd_closing),
]
|
Krolov18/Languages
|
refs/heads/master
|
Projet_media/Collecteur_texte.py
|
2
|
__author__ = 'krolev'
import os
import shlex
import argparse
import codecs
import subprocess
import re
import string
import urllib.request
import urllib.error
from bs4 import BeautifulSoup
from pyinotify import ProcessEvent, Notifier, ALL_EVENTS, WatchManager
import sqlite3
caractère_special = (' : ', ';')
net="""
class Nettoyage():
def __init__(self, objet):
self.objet = objet
def supprimer_elements(self,*args):
for element in args:
if element in self.objet:
self.objet.remove(element)
return self.objet
def supprimer_caracteres(self,table,sep=' '):
return "".join([element for element in self.objet.translate(table).split(sep) if element != ''])
"""
###### Charsets de substitution
changer_PointEspace2Tiret6 = str.maketrans('. ', '--')
changer_ponctuation2tiret6 = str.maketrans(string.punctuation + ' ', '-' * (len(string.punctuation) + 1))
changer_ponctuation2tiret8 = str.maketrans(string.punctuation + ' ', '_' * (len(string.punctuation) + 1))
changer_ponctuation2space = str.maketrans(string.punctuation, ' ' * (len(string.punctuation)))
#######
def nommer_lien_wiki_fre(langue, string):
return 'http://{langue}.wikipedia.org/wiki/{string}'.format(langue=langue, string=string.lower())
#def nommer_lien_wiki_eng(string):
#nvString = 'http://en.wikipedia.org/wiki/{0}'.format(string).lower()
#return nvString
def selectionner_donnees_wikipedia(database):
tempList = [liste[liste.index(element) + 1] for element in liste if
("Titre " in element or "Titre Original " in element) or
("Réalisateur " in element) or
("Société de production " in element or "Sociétés de production " in element) or
("Musique " in element)]
def recolter_donnees_wikipedia(liste, recherche):
"""
Récupération de sdonnées précises sur Wikipédia.
"""
ouvertureLien = urllib.request.urlopen(string)
parseur = Beautifulsoup(ouvertureLien)
print(parseur.prettify())
print()
rep = input("Ces données vous satisfaient-elles? ")
oui = ["1","o","y","yes","oui"]
non = ["0","n","no","non"]
while rep not in oui or non:
input("Je n'ai pas compris votre choix.\nVeuillez tapez (y or n) à nouveau. Merci!")
if rep.lower() in non:
nvOuv = input("Tapez le lien exact vers le site du média svp: ")
ouvertureLien = urllib.request.urlopen(nvOuv)
parseur = BeautifulSoup(ouvertureLien)
elif rep.lower() in oui:
list(recherche)
def selectionner_mediaInfo(liste):
"""
Mediainfo nous fournit des informations que nous allons mettre dans un format plus utilisable.
voir à terme de faire un dictionnaire de dictionnaire:
"""
if isinstance(liste, list):
temp = [re.sub(' *: ', ';', element) for element in liste]
return dict([(x.split(';')) for x in temp])
#return [element.split(";")[1]
#for element in temp if
#(element.split(";")[0] == "Track name") or
#(element.split(";")[0] == "Track name/Position") or
#(element.split(";")[0] == "Album") or
#(element.split(";")[0] == "Performer") or
#(element.split(";")[0] == "Recorded date") or
#(element.split(";")[0] == "Sampling rate")]
else:
print("Ce doit être une liste pas un(e) {0}".format(type(liste)))
def formatter_mediaInfo(liste):
return [re.sub(' *: ', ';', element) for element in liste]
def nettoyer_lignes_parsees(liste):
"""
Cette fonction va parser du code HTML. Il va simplement sélectionner une zone
par la balise qui débute (arg2) et ses attributs.
L'argument "liste" peut s'avérer être un objet BeautifulSoup (ex: list(parseur.find(balise, attributs)))
Cette fonction nettoie des balises "<>" et de leur contenu une liste.
ex: "<salutation>Bonjour!</salutation>" --> "Bonjour!"
"""
return [y for y in [re.sub('<[^<]+?>', '', str(elt)).strip() for elt in liste] if y != ""]
def subprocess_command(commande, stdout=False):
"""
:param commande: string
:param stdout: False (par défaut)
:return: rien si stdout est à False san quoi il retourne une liste.
"""
if not isinstance(commande, str):
print("Erreur: ",type(commande))
else:
appel = subprocess.Popen(shlex.split(commande), stdout=subprocess.PIPE, universal_newlines=True)
if stdout == True:
return [y for y in [element.strip() for element in appel.stdout.readlines()] if y != '']
fermeture = appel.communicate()
return
def renommer_fichier(parent):
"""
Fonction qui prend un chemin vers un dossier, regarde ses composants
et si ce sont des dossiers il crée "enfants", ensuite en utilisant
subprocess_command, on renomme avec la commande mv "oldName newName"
La ligne suivante fait la même action mais sur une seule ligne.
[[print("mv '{0}' '{1}'".format(parent+"/"+element+"/"+fichier,parent+"/"+element+os.path.splitext(fichier)[1])) for fichier in os.listdir(parent+"/"+element) if (len(os.listdir(parent+"/"+element)) == 1)] for element in enfants]
"""
enfants = [enfant for enfant in os.listdir(parent) if os.path.isdir(os.path.join(parent, enfant))]
for element in enfants:
nvPath=parent+"/"+element
for fichier in os.listdir(parent+"/"+element):
if len(os.listdir(nvPath)) == 1:
print("mv '{0}' '{1}'".format(nvPath+"/"+fichier,parent+"/"+element+os.path.splitext(fichier)[1]))
subprocess_command("mv '{0}' '{1}'".format(nvPath+"/"+fichier,nvPath+"/"+element+os.path.splitext(fichier)[1]))
return
def Ouverture_lien(string):
try:
htmlreq = urllib.request.urlopen(string)
except urllib.error.HTTPError as err:
print(err.code)
return False
return htmlreq
def db_creation(databaseName,donnees):
with sqlite3.connect(databaseName) as database:
curseur = database.cursor()
curseur.execute('CREATE TABLE IF NOT EXISTS Musiques ()')
curseur.execute('INSERT INTO Musiques VALUES (?,?,?,?,?,?,?)', donnees)
database.commit()
def Recuperer_base(databaseName):
with sqlite3.connect(databaseName) as database:
curseur = database.cursor()
curseur.execute('SELECT orthographe, phonétique FROM lexique')
liste = curseur.fetchall()
return liste
|
johndoe31415/flightpanel
|
refs/heads/master
|
travis-deploy/BuildManager.py
|
1
|
#!/usr/bin/python3
#
# flightpanel - A Cortex-M4 based USB flight panel for flight simulators.
# Copyright (C) 2017-2017 Johannes Bauer
#
# This file is part of flightpanel.
#
# flightpanel is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; this program is ONLY licensed under
# version 3 of the License, later versions are explicitly excluded.
#
# flightpanel is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with flightpanel; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Johannes Bauer <JohannesBauer@gmx.de>
import os
import re
import logging
import subprocess
from .TimeLogger import TimeLogger
from .Exceptions import BuildImplementationError, BuildPrerequisitesNotMetException
class BuildManager(object):
__SUBSTITUTION_RE = re.compile(r"\${(?P<varname>[a-zA-Z_]+)}")
_SUPPORTED_ACTIONS = [ ]
_REQUIRED_ENVIRONMENT = { }
_PATHS = { }
_FILES = { }
def __init__(self, args):
self._args = args
for filename in self._FILES.values():
if filename.endswith("/"):
raise BuildImplementationError("Declared file '%s' ends with '/'." % (filename))
self._log = logging.getLogger("bm")
def _execute(self, cmd, add_env = None):
environment = dict(os.environ)
if add_env is not None:
environment.update(add_env)
subprocess.check_call(cmd, env = environment)
def _add_to_path(self, dirname):
path = os.environ.get("PATH")
if path is None:
os.environ["PATH"] = dirname
else:
os.environ["PATH"] = dirname + ":" + path
def _substitute(self, text):
def sub_fnc(match):
varname = match.groupdict()["varname"]
return self._get_dir(varname, must_exist = False, create = False)
text = self.__SUBSTITUTION_RE.sub(sub_fnc, text)
return text
def _get_dir(self, dirhandle, must_exist = False, create = False):
if dirhandle == "HOME":
directory = os.getenv("HOME")
else:
if dirhandle not in self._PATHS:
raise BuildImplementationError("Requested directory '%s' not declared." % (dirhandle))
directory = self._PATHS[dirhandle]
directory = self._substitute(directory)
if not directory.endswith("/"):
directory += "/"
if create and not os.path.exists(directory):
os.makedirs(directory)
if must_exist:
if not os.path.exists(directory):
raise BuildPrerequisitesNotMetException("Requested directory '%s' should exist, but does not." % (directory))
if os.path.exists(directory) and (not os.path.isdir(directory)):
raise BuildPrerequisitesNotMetException("Requested directory '%s' exists, but is not a directory." % (directory))
return directory
def _get_file(self, filehandle, must_exist = False):
if filehandle not in self._FILES:
raise BuildImplementationError("Requested filename '%s' not declared." % (filehandle))
filename = self._FILES[filehandle]
filename = self._substitute(filename)
if must_exist:
if not os.path.exists(filename):
raise BuildPrerequisitesNotMetException("Requested file '%s' should exist, but does not." % (filename))
if os.path.exists(filename) and (not os.path.isfile(filename)):
raise BuildPrerequisitesNotMetException("Requested file '%s' exists, but is not a regular file." % (filename))
return filename
def execute_actions(self, actions):
for action in actions:
if action not in self._SUPPORTED_ACTIONS:
raise BuildImplementationError("Action '%s' requested, but not contained within SUPPORTED_ACTIONS." % (action))
required_environment = self._REQUIRED_ENVIRONMENT.get(action, [ ])
for required_variable in required_environment:
if os.getenv(required_variable) is None:
raise BuildPrerequisitesNotMetException("Build action '%s' requires environment variable '%s' to be set, but this wasn't the case." % (action, required_variable))
for action in self._SUPPORTED_ACTIONS:
handler_name = "do_" + action
handler = getattr(self, handler_name)
if handler is None:
raise BuildImplementationError("Action '%s' declared in SUPPORTED_ACTIONS, but no handler called '%s' implemented." % (action, handler_name))
if len(actions) == 0:
return False
action_order = { action: index for (index, action) in enumerate(self._SUPPORTED_ACTIONS) }
requested_action_ids = sorted(action_order[action] for action in actions)
ordered_actions = [ self._SUPPORTED_ACTIONS[action_id] for action_id in requested_action_ids ]
with TimeLogger(self._log, "Running %d build actions (%s)" % (len(ordered_actions), ", ".join(ordered_actions))):
for action in ordered_actions:
handler_name = "do_" + action
handler = getattr(self, handler_name)
with TimeLogger(self._log, "Performing \"%s\"" % (action)):
handler()
return True
def execute(self):
actions = [ ]
for name in vars(self._args):
value = getattr(self._args, name)
if name.startswith("do_") and value:
action = name[3:]
actions.append(action)
return self.execute_actions(actions)
def execute_all(self):
return self.execute_actions(self._SUPPORTED_ACTIONS)
|
KennethPierce/pylearnk
|
refs/heads/fixNogil/master
|
pylearn2/distributions/__init__.py
|
147
|
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
|
eddyb/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/XMLHttpRequest/resources/access-control-origin-header.py
|
20
|
#!/usr/bin/env python
def main(request, response):
response.headers.set("Content-Type", "text/plain");
response.headers.set("Cache-Control", "no-cache, no-store");
response.headers.set("Access-Control-Allow-External", "true");
response.headers.set("Access-Control-Allow-Origin", "*");
response.content = "PASS: Cross-domain access allowed.\n"
response.content += "HTTP_ORIGIN: " + request.headers.get("origin");
|
radianbaskoro/mathdoku-solver
|
refs/heads/master
|
mathdokusolver/__init__.py
|
12133432
| |
bohlian/erpnext
|
refs/heads/develop
|
erpnext/docs/assets/img/videos/__init__.py
|
12133432
| |
netsuileo/sfu-cluster-dashboard
|
refs/heads/master
|
dashboard/app/api/monitoring/__init__.py
|
12133432
| |
IEEE-NITK/DeepNLP
|
refs/heads/master
|
Project-Code/classifier_model/classifier_model.py
|
2
|
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding, TimeDistributed
from keras.layers import LSTM, Input, RepeatVector
from keras.preprocessing.sequence import pad_sequences
from scipy import spatial
import numpy as np
import nltk
import re
import sys
import pickle
from os.path import exists as file_exists
from functools import reduce
model_save_file = 'classifier1.h5'
input_qa_file = 'GOT_QA.pkl'
classifier_data_file = 'classifier_data.pkl'
pad_token = '<pad>'
loss_function = 'categorical_crossentropy'
optimizer = 'rmsprop'
metrics = ['accuracy']
maxlen = 10
num_epoch = 5
batch_size = 16
num_feat_word_vec = 128
num_feat_sent_vec = 128
vocab = {}
vocab["word2id"] = {}
vocab["id2word"] = {}
is_word = re.compile(r'^[a-zA-Z]*$')
val_qas = pickle.load(open(input_qa_file, 'rb+'))
questions = list(map(lambda x: x[0], val_qas))
answers = list(map(lambda x: x[1], val_qas))
book_sents = pickle.load(open(classifier_data_file, 'rb+'))
def clean_input(questions):
questions = list(map(lambda x: x.strip(), questions))
lower_sent = lambda sent: ' '.join([word.lower() for word in nltk.word_tokenize(sent)])
questions = list(map(lambda x: lower_sent(x), questions))
return questions
questions = clean_input(questions)
book_sents = clean_input(book_sents)
def getID(word, create=True):
if word != pad_token and not is_word.match(word):
return -2
wid = vocab["word2id"].get(word, -1)
if wid == -1:
if create:
wid = len(vocab["word2id"])
vocab["word2id"][word] = wid
else:
wid = vocab["word2id"].get("<unknown>")
return wid
getID(pad_token)
for ques in questions:
for word in nltk.word_tokenize(ques):
getID(word)
for sent in book_sents:
for word in nltk.word_tokenize(sent):
getID(word)
vocab_length = len(vocab["word2id"])
vocab["id2word"] = { v: k for k, v in vocab["word2id"].items() }
id_mat = np.identity(2, dtype='int32')
print('Vocabulary created.')
print("Created vocabulary of " + str(vocab_length) + " words.")
def sen2enco(sentence):
return [getID(word, create=False) for word in nltk.word_tokenize(sentence)]
print('Creating training samples...')
onehot_quotes = [sen2enco(ques) for ques in questions]
book_onehotq = [sen2enco(ques) for ques in book_sents]
sequences_ques = pad_sequences(onehot_quotes, maxlen=maxlen, dtype='int32',
padding='pre', truncating='pre', value=0.)
sequences_book_qs = pad_sequences(book_onehotq, maxlen=maxlen, dtype='int32',
padding='pre', truncating='pre', value=0.)
labels = []
X_train = []
for x in sequences_ques:
X_train.append(x)
labels.append(id_mat[0])
for x in sequences_book_qs:
X_train.append(x)
labels.append(id_mat[1])
#labels = [list(map(lambda x: id_mat[x], y)) for y in sequences_ques]
labels = np.array(labels, dtype='int32')
X_train = np.array(X_train, dtype='int32')
input_dim = num_feat_word_vec
output_dim = 2
def create_seqauto_model():
inputs = Input(shape=(maxlen,))
embed = embeddding(vocab_length, input_dim,
input_length=maxlen,
mask_zero=True)(inputs)
encoded = LSTM(num_feat_sent_vec)(embed)
dense_out = Dense(output_dim, activation='softmax')(encoded)
sequence_autoencoder = Model(inputs, dense_out)
return sequence_autoencoder
if file_exists(model_save_file):
sequence_autoencoder = load_model(model_save_file)
else:
sequence_autoencoder = create_seqauto_model()
sequence_autoencoder.compile(loss=loss_function,
optimizer=optimizer,
metrics=metrics)
def format_model_i(ques):
ques = clean_input([ques])[0]
id_s = sen2enco(ques)
padded_ids = pad_sequences([id_s], maxlen=maxlen, dtype='int32',
padding='pre', truncating='pre', value=0.)
enc_ques = padded_ids
return enc_ques
def format_model_o(pred):
pred_l = np.argmax(pred, axis=1)[0]
return pred_l
def predict(model, ques):
enc_ques = format_model_i(ques)
pred = model.predict(enc_ques)
return format_model_o(pred)
PREDICT_FLAG = False
TRAIN_FLAG = False
if sys.argv[1] == '--predict':
PREDICT_FLAG = True
elif sys.argv[1] == '--train':
TRAIN_FLAG = True
while PREDICT_FLAG and True:
input_q = input("enter query: ")
print(predict(sequence_autoencoder, input_q))
q = 'y'
while TRAIN_FLAG and q != 'n':
sequence_autoencoder.fit(X_train, labels, batch_size=batch_size, nb_epoch=num_epoch)
sequence_autoencoder.save(model_save_file)
q = input("More? y/n: ")
|
hale36/SRTV
|
refs/heads/master
|
lib/hachoir_core/event_handler.py
|
188
|
class EventHandler(object):
"""
Class to connect events to event handlers.
"""
def __init__(self):
self.handlers = {}
def connect(self, event_name, handler):
"""
Connect an event handler to an event. Append it to handlers list.
"""
try:
self.handlers[event_name].append(handler)
except KeyError:
self.handlers[event_name] = [handler]
def raiseEvent(self, event_name, *args):
"""
Raiser an event: call each handler for this event_name.
"""
if event_name not in self.handlers:
return
for handler in self.handlers[event_name]:
handler(*args)
|
stephen144/odoo
|
refs/heads/9.0
|
openerp/addons/base/tests/test_func.py
|
30
|
# -*- coding: utf-8 -*-
import functools
import unittest
from openerp.tools.func import compose
from openerp.tools import frozendict
class TestCompose(unittest.TestCase):
def test_basic(self):
str_add = compose(str, lambda a, b: a + b)
self.assertEqual(
str_add(1, 2),
"3")
def test_decorator(self):
""" ensure compose() can be partially applied as a decorator
"""
@functools.partial(compose, unicode)
def mul(a, b):
return a * b
self.assertEqual(mul(5, 42), u"210")
class TestFrozendict(unittest.TestCase):
def test_frozendict_immutable(self):
""" Ensure that a frozendict is immutable. """
vals = {'name': 'Joe', 'age': 42}
frozen_vals = frozendict(vals)
# check __setitem__, __delitem__
with self.assertRaises(Exception):
frozen_vals['surname'] = 'Jack'
with self.assertRaises(Exception):
frozen_vals['name'] = 'Jack'
with self.assertRaises(Exception):
del frozen_vals['name']
# check update, setdefault, pop, popitem, clear
with self.assertRaises(Exception):
frozen_vals.update({'surname': 'Jack'})
with self.assertRaises(Exception):
frozen_vals.update({'name': 'Jack'})
with self.assertRaises(Exception):
frozen_vals.setdefault('surname', 'Jack')
with self.assertRaises(Exception):
frozen_vals.pop('surname', 'Jack')
with self.assertRaises(Exception):
frozen_vals.pop('name', 'Jack')
with self.assertRaises(Exception):
frozen_vals.popitem()
with self.assertRaises(Exception):
frozen_vals.clear()
def test_frozendict_hash(self):
""" Ensure that a frozendict is hashable. """
# dict with simple values
hash(frozendict({'name': 'Joe', 'age': 42}))
# dict with tuples, lists, and embedded dicts
hash(frozendict({
'user_id': (42, 'Joe'),
'line_ids': [(0, 0, {'values': [42]})],
}))
|
rockyzhang/zhangyanhit-python-for-android-mips
|
refs/heads/master
|
python-build/python-libs/gdata/build/lib/gdata/alt/__init__.py
|
271
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package's modules adapt the gdata library to run in other environments
The first example is the appengine module which contains functions and
classes which modify a GDataService object to run on Google App Engine.
"""
|
andris210296/andris-projeto
|
refs/heads/master
|
backend/venv/lib/python2.7/site-packages/gaecookie/security.py
|
9
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
from webapp2_extras import securecookie
from gaecookie.manager import FindOrCreateSecrets
class SignCmd(FindOrCreateSecrets):
def __init__(self, name, dct):
self.name = name
self.dct = dct
self._find_secret = ()
super(SignCmd, self).__init__()
def do_business(self, stop_on_error=False):
super(SignCmd, self).do_business(stop_on_error)
secret = self.result
if secret:
value = json.dumps(self.dct)
serializer = securecookie.SecureCookieSerializer(str(secret[0]))
self.result = serializer.serialize(self.name, value)
else:
self.result = None
class RetrieveCmd(FindOrCreateSecrets):
def __init__(self, name, signed, max_age):
self.max_age = max_age
self.name = name
self.signed = signed
super(RetrieveCmd, self).__init__()
def do_business(self, stop_on_error=False):
super(RetrieveCmd, self).do_business(stop_on_error)
secrets = self.result
if secrets:
for s in secrets:
serializer = securecookie.SecureCookieSerializer(str(s))
data = serializer.deserialize(self.name, self.signed, self.max_age)
if data:
self.result = json.loads(data)
return
self.result = None
|
ASCrookes/django
|
refs/heads/master
|
django/contrib/gis/gdal/geomtype.py
|
297
|
from django.contrib.gis.gdal.error import GDALException
from django.utils import six
class OGRGeomType(object):
"Encapulates OGR Geometry Types."
wkb25bit = -2147483648
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {0: 'Unknown',
1: 'Point',
2: 'LineString',
3: 'Polygon',
4: 'MultiPoint',
5: 'MultiLineString',
6: 'MultiPolygon',
7: 'GeometryCollection',
100: 'None',
101: 'LinearRing',
1 + wkb25bit: 'Point25D',
2 + wkb25bit: 'LineString25D',
3 + wkb25bit: 'Polygon25D',
4 + wkb25bit: 'MultiPoint25D',
5 + wkb25bit: 'MultiLineString25D',
6 + wkb25bit: 'MultiPolygon25D',
7 + wkb25bit: 'GeometryCollection25D',
}
# Reverse type dictionary, keyed by lower-case of the name.
_str_types = {v.lower(): k for k, v in _types.items()}
def __init__(self, type_input):
"Figures out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, six.string_types):
type_input = type_input.lower()
if type_input == 'geometry':
type_input = 'unknown'
num = self._str_types.get(type_input)
if num is None:
raise GDALException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if type_input not in self._types:
raise GDALException('Invalid OGR Integer Type: %d' % type_input)
num = type_input
else:
raise TypeError('Invalid OGR input type given.')
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Returns the value of the name property."
return self.name
def __eq__(self, other):
"""
Does an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, six.string_types):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
def __ne__(self, other):
return not (self == other)
@property
def name(self):
"Returns a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Returns the Django GeometryField for this OGR Type."
s = self.name.replace('25D', '')
if s in ('LinearRing', 'None'):
return None
elif s == 'Unknown':
s = 'Geometry'
return s + 'Field'
def to_multi(self):
"""
Transform Point, LineString, Polygon, and their 25D equivalents
to their Multi... counterpart.
"""
if self.name.startswith(('Point', 'LineString', 'Polygon')):
self.num += 3
|
Ozerich/ajenti
|
refs/heads/master
|
ajenti/ui/api.py
|
2
|
from ajenti.com import Interface
class IXSLTFunctionProvider(Interface):
def get_funcs(self):
pass
|
clobrano/personfinder
|
refs/heads/master
|
app/pytz/zoneinfo/America/Port_of_Spain.py
|
9
|
'''tzinfo timezone information for America/Port_of_Spain.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Port_of_Spain(DstTzInfo):
'''America/Port_of_Spain timezone definition. See datetime.tzinfo for details'''
zone = 'America/Port_of_Spain'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1912,3,2,4,6,4),
]
_transition_info = [
i(-14760,0,'LMT'),
i(-14400,0,'AST'),
]
Port_of_Spain = Port_of_Spain()
|
cloudera/hue
|
refs/heads/master
|
desktop/core/ext-py/ply-3.11/example/BASIC/basic.py
|
10
|
# An implementation of Dartmouth BASIC (1964)
#
import sys
sys.path.insert(0, "../..")
if sys.version_info[0] >= 3:
raw_input = input
import basiclex
import basparse
import basinterp
# If a filename has been specified, we try to run it.
# If a runtime error occurs, we bail out and enter
# interactive mode below
if len(sys.argv) == 2:
data = open(sys.argv[1]).read()
prog = basparse.parse(data)
if not prog:
raise SystemExit
b = basinterp.BasicInterpreter(prog)
try:
b.run()
raise SystemExit
except RuntimeError:
pass
else:
b = basinterp.BasicInterpreter({})
# Interactive mode. This incrementally adds/deletes statements
# from the program stored in the BasicInterpreter object. In
# addition, special commands 'NEW','LIST',and 'RUN' are added.
# Specifying a line number with no code deletes that line from
# the program.
while 1:
try:
line = raw_input("[BASIC] ")
except EOFError:
raise SystemExit
if not line:
continue
line += "\n"
prog = basparse.parse(line)
if not prog:
continue
keys = list(prog)
if keys[0] > 0:
b.add_statements(prog)
else:
stat = prog[keys[0]]
if stat[0] == 'RUN':
try:
b.run()
except RuntimeError:
pass
elif stat[0] == 'LIST':
b.list()
elif stat[0] == 'BLANK':
b.del_line(stat[1])
elif stat[0] == 'NEW':
b.new()
|
Kromey/fbxnano
|
refs/heads/master
|
forum/forms.py
|
2
|
from django.forms import ModelForm,Textarea,TextInput
from .models import Post
class PostForm(ModelForm):
class Meta:
model = Post
fields = ('subject','body')
widgets = {
'subject': TextInput(attrs={'autofocus':'autofocus'}),
'body': Textarea(
attrs={
'data-provide':'markdown',
'data-hidden-buttons':'cmdHeading',
'data-iconlibrary':'octicons',
'data-resize': 'vertical',
}),
}
|
CAAD-RWTH/ClockworkForDynamo
|
refs/heads/master
|
nodes/0.8.x/python/View.SetPhase.py
|
16
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
views = UnwrapElement(IN[0])
phase = UnwrapElement(IN[1])
booleans = list()
TransactionManager.Instance.EnsureInTransaction(doc)
for view in views:
try:
view.get_Parameter(BuiltInParameter.VIEW_PHASE).Set(phase.Id)
booleans.append(True)
except:
booleans.append(False)
TransactionManager.Instance.TransactionTaskDone()
OUT = (views,booleans)
|
dtschan/weblate
|
refs/heads/master
|
weblate/trans/tests/test_suggestions.py
|
2
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for sugestion views.
"""
from django.core.urlresolvers import reverse
from weblate.trans.models.unitdata import Suggestion
from weblate.trans.tests.test_views import ViewTestCase
class SuggestionsTest(ViewTestCase):
def add_suggestion_1(self):
return self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n',
suggest='yes'
)
def add_suggestion_2(self):
return self.edit_unit(
'Hello, world!\n',
'Ahoj svete!\n',
suggest='yes'
)
def test_add(self):
translate_url = reverse('translate', kwargs=self.kw_translation)
# Try empty suggestion (should not be added)
response = self.edit_unit(
'Hello, world!\n',
'',
suggest='yes'
)
# We should stay on same message
self.assertRedirectsOffset(response, translate_url, 0)
# Add first suggestion
response = self.add_suggestion_1()
# We should get to second message
self.assertRedirectsOffset(response, translate_url, 1)
# Add second suggestion
response = self.add_suggestion_2()
# We should get to second message
self.assertRedirectsOffset(response, translate_url, 1)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 1)
self.assertBackend(0)
# Unit should not be translated
self.assertEqual(len(unit.checks()), 0)
self.assertFalse(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(len(self.get_unit().suggestions()), 2)
def test_delete(self):
translate_url = reverse('translate', kwargs=self.kw_translation)
# Create two suggestions
self.add_suggestion_1()
self.add_suggestion_2()
# Get ids of created suggestions
suggestions = [sug.pk for sug in self.get_unit().suggestions()]
self.assertEqual(len(suggestions), 2)
# Delete one of suggestions
response = self.edit_unit(
'Hello, world!\n',
'',
delete=suggestions[0],
)
self.assertRedirectsOffset(response, translate_url, 0)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 1)
self.assertBackend(0)
# Unit should not be translated
self.assertEqual(len(unit.checks()), 0)
self.assertFalse(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(len(self.get_unit().suggestions()), 1)
def test_accept_edit(self):
translate_url = reverse('translate', kwargs=self.kw_translation)
# Create suggestion
self.add_suggestion_1()
# Get ids of created suggestions
suggestion = self.get_unit().suggestions()[0].pk
# Accept one of suggestions
response = self.edit_unit(
'Hello, world!\n',
'',
accept_edit=suggestion,
)
self.assertRedirectsOffset(response, translate_url, 0)
def test_accept(self):
translate_url = reverse('translate', kwargs=self.kw_translation)
# Create two suggestions
self.add_suggestion_1()
self.add_suggestion_2()
# Get ids of created suggestions
suggestions = [sug.pk for sug in self.get_unit().suggestions()]
self.assertEqual(len(suggestions), 2)
# Accept one of suggestions
response = self.edit_unit(
'Hello, world!\n',
'',
accept=suggestions[1],
)
self.assertRedirectsOffset(response, translate_url, 1)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 1)
# Unit should be translated
self.assertEqual(len(unit.checks()), 0)
self.assertTrue(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(unit.target, 'Ahoj svete!\n')
self.assertBackend(1)
self.assertEqual(len(self.get_unit().suggestions()), 1)
def test_accept_anonymous(self):
translate_url = reverse('translate', kwargs=self.kw_translation)
self.client.logout()
# Create suggestions
self.add_suggestion_1()
self.client.login(username='testuser', password='testpassword')
# Get ids of created suggestion
suggestions = list(self.get_unit().suggestions())
self.assertEqual(len(suggestions), 1)
self.assertIsNone(suggestions[0].user)
# Accept one of suggestions
response = self.edit_unit(
'Hello, world!\n',
'',
accept=suggestions[0].pk,
)
self.assertRedirectsOffset(response, translate_url, 1)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 0)
# Unit should be translated
self.assertEqual(unit.target, 'Nazdar svete!\n')
def test_vote(self):
translate_url = reverse('translate', kwargs=self.kw_translation)
self.subproject.suggestion_voting = True
self.subproject.suggestion_autoaccept = 0
self.subproject.save()
self.add_suggestion_1()
suggestion_id = self.get_unit().suggestions()[0].pk
response = self.edit_unit(
'Hello, world!\n',
'',
upvote=suggestion_id,
)
self.assertRedirectsOffset(response, translate_url, 0)
suggestion = Suggestion.objects.get(pk=suggestion_id)
self.assertEqual(
suggestion.get_num_votes(),
1
)
response = self.edit_unit(
'Hello, world!\n',
'',
downvote=suggestion_id,
)
self.assertRedirectsOffset(response, translate_url, 0)
suggestion = Suggestion.objects.get(pk=suggestion_id)
self.assertEqual(
suggestion.get_num_votes(),
-1
)
def test_vote_autoaccept(self):
self.add_suggestion_1()
translate_url = reverse('translate', kwargs=self.kw_translation)
self.subproject.suggestion_voting = True
self.subproject.suggestion_autoaccept = 1
self.subproject.save()
suggestion_id = self.get_unit().suggestions()[0].pk
response = self.edit_unit(
'Hello, world!\n',
'',
upvote=suggestion_id,
)
self.assertRedirectsOffset(response, translate_url, 0)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 0)
# Unit should be translated
self.assertEqual(len(unit.checks()), 0)
self.assertTrue(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertBackend(1)
|
reachalpineswift/frappe-bench
|
refs/heads/master
|
frappe/custom/doctype/customize_form/customize_form.py
|
3
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Customize Form is a Single DocType used to mask the Property Setter
Thus providing a better UI from user perspective
"""
import frappe, json
from frappe import _
from frappe.utils import cint
from frappe.model.document import Document
from frappe.model import no_value_fields
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
class CustomizeForm(Document):
doctype_properties = {
'search_fields': 'Data',
'sort_field': 'Data',
'sort_order': 'Data',
'default_print_format': 'Data',
'read_only_onload': 'Check',
'allow_copy': 'Check',
'max_attachments': 'Int'
}
docfield_properties = {
'idx': 'Int',
'label': 'Data',
'fieldtype': 'Select',
'options': 'Text',
'permlevel': 'Int',
'width': 'Data',
'print_width': 'Data',
'reqd': 'Check',
'unique': 'Check',
'ignore_user_permissions': 'Check',
'in_filter': 'Check',
'in_list_view': 'Check',
'hidden': 'Check',
'print_hide': 'Check',
'report_hide': 'Check',
'allow_on_submit': 'Check',
'depends_on': 'Data',
'description': 'Text',
'default': 'Text',
'precision': 'Select'
}
allowed_fieldtype_change = (('Currency', 'Float', 'Percent'), ('Small Text', 'Data'),
('Text', 'Text Editor', 'Code'), ('Data', 'Select'), ('Text', 'Small Text'))
def on_update(self):
frappe.db.sql("delete from tabSingles where doctype='Customize Form'")
frappe.db.sql("delete from `tabCustomize Form Field`")
def fetch_to_customize(self):
self.clear_existing_doc()
if not self.doc_type:
return
meta = frappe.get_meta(self.doc_type)
# doctype properties
for property in self.doctype_properties:
self.set(property, meta.get(property))
for d in meta.get("fields"):
new_d = {"fieldname": d.fieldname, "is_custom_field": d.get("is_custom_field"), "name": d.name}
for property in self.docfield_properties:
new_d[property] = d.get(property)
self.append("fields", new_d)
# NOTE doc is sent to clientside by run_method
def clear_existing_doc(self):
doc_type = self.doc_type
for fieldname in self.meta.get_valid_columns():
self.set(fieldname, None)
for df in self.meta.get_table_fields():
self.set(df.fieldname, [])
self.doc_type = doc_type
self.name = "Customize Form"
def save_customization(self):
if not self.doc_type:
return
self.set_property_setters()
self.update_custom_fields()
self.set_idx_property_setter()
validate_fields_for_doctype(self.doc_type)
frappe.msgprint(_("{0} updated").format(_(self.doc_type)))
frappe.clear_cache(doctype=self.doc_type)
self.fetch_to_customize()
def set_property_setters(self):
meta = frappe.get_meta(self.doc_type)
# doctype property setters
for property in self.doctype_properties:
if self.get(property) != meta.get(property):
self.make_property_setter(property=property, value=self.get(property),
property_type=self.doctype_properties[property])
update_db = False
for df in self.get("fields"):
if df.get("__islocal"):
continue
meta_df = meta.get("fields", {"fieldname": df.fieldname})
if not meta_df or meta_df[0].get("is_custom_field"):
continue
for property in self.docfield_properties:
if property != "idx" and df.get(property) != meta_df[0].get(property):
if property == "fieldtype":
self.validate_fieldtype_change(df, meta_df[0].get(property), df.get(property))
elif property == "allow_on_submit" and df.get(property):
frappe.msgprint(_("Row {0}: Not allowed to enable Allow on Submit for standard fields")\
.format(df.idx))
continue
elif property == "in_list_view" and df.get(property) \
and df.fieldtype!="Image" and df.fieldtype in no_value_fields:
frappe.msgprint(_("'In List View' not allowed for type {0} in row {1}")
.format(df.fieldtype, df.idx))
continue
elif property == "precision" and cint(df.get("precision")) > 6 \
and cint(df.get("precision")) > cint(meta_df[0].get("precision")):
update_db = True
elif property == "unique":
update_db = True
self.make_property_setter(property=property, value=df.get(property),
property_type=self.docfield_properties[property], fieldname=df.fieldname)
if update_db:
from frappe.model.db_schema import updatedb
updatedb(self.doc_type)
def update_custom_fields(self):
for df in self.get("fields"):
if df.get("__islocal"):
self.add_custom_field(df)
else:
self.update_in_custom_field(df)
self.delete_custom_fields()
def add_custom_field(self, df):
d = frappe.new_doc("Custom Field")
d.dt = self.doc_type
for property in self.docfield_properties:
d.set(property, df.get(property))
d.insert()
df.fieldname = d.fieldname
def update_in_custom_field(self, df):
meta = frappe.get_meta(self.doc_type)
meta_df = meta.get("fields", {"fieldname": df.fieldname})
if not (meta_df and meta_df[0].get("is_custom_field")):
return
custom_field = frappe.get_doc("Custom Field", meta_df[0].name)
changed = False
for property in self.docfield_properties:
if df.get(property) != custom_field.get(property):
if property == "fieldtype":
self.validate_fieldtype_change(df, meta_df[0].get(property), df.get(property))
custom_field.set(property, df.get(property))
changed = True
if changed:
custom_field.flags.ignore_validate = True
custom_field.save()
def delete_custom_fields(self):
meta = frappe.get_meta(self.doc_type)
fields_to_remove = (set([df.fieldname for df in meta.get("fields")])
- set(df.fieldname for df in self.get("fields")))
for fieldname in fields_to_remove:
df = meta.get("fields", {"fieldname": fieldname})[0]
if df.get("is_custom_field"):
frappe.delete_doc("Custom Field", df.name)
def set_idx_property_setter(self):
meta = frappe.get_meta(self.doc_type)
field_order_has_changed = [df.fieldname for df in meta.get("fields")] != \
[d.fieldname for d in self.get("fields")]
if field_order_has_changed:
_idx = []
for df in sorted(self.get("fields"), key=lambda x: x.idx):
_idx.append(df.fieldname)
self.make_property_setter(property="_idx", value=json.dumps(_idx), property_type="Text")
def make_property_setter(self, property, value, property_type, fieldname=None):
self.delete_existing_property_setter(property, fieldname)
property_value = self.get_existing_property_value(property, fieldname)
if property_value==value:
return
# create a new property setter
# ignore validation becuase it will be done at end
frappe.make_property_setter({
"doctype": self.doc_type,
"doctype_or_field": "DocField" if fieldname else "DocType",
"fieldname": fieldname,
"property": property,
"value": value,
"property_type": property_type
}, ignore_validate=True)
def delete_existing_property_setter(self, property, fieldname=None):
# first delete existing property setter
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.doc_type,
"property": property, "field_name['']": fieldname or ''})
if existing_property_setter:
frappe.delete_doc("Property Setter", existing_property_setter)
def get_existing_property_value(self, property_name, fieldname=None):
# check if there is any need to make property setter!
if fieldname:
property_value = frappe.db.get_value("DocField", {"parent": self.doc_type,
"fieldname": fieldname}, property_name)
else:
try:
property_value = frappe.db.get_value("DocType", self.doc_type, property_name)
except Exception, e:
if e.args[0]==1054:
property_value = None
else:
raise
return property_value
def validate_fieldtype_change(self, df, old_value, new_value):
allowed = False
for allowed_changes in self.allowed_fieldtype_change:
if (old_value in allowed_changes and new_value in allowed_changes):
allowed = True
if not allowed:
frappe.throw(_("Fieldtype cannot be changed from {0} to {1} in row {2}").format(old_value, new_value, df.idx))
def reset_to_defaults(self):
if not self.doc_type:
return
frappe.db.sql("""delete from `tabProperty Setter` where doc_type=%s
and ifnull(field_name, '')!='naming_series'""", self.doc_type)
frappe.clear_cache(doctype=self.doc_type)
self.fetch_to_customize()
|
liweitianux/chandra-acis-analysis
|
refs/heads/master
|
acispy/spectrum.py
|
1
|
# Copyright (c) 2017 Weitian LI <liweitianux@live.com>
# MIT license
"""
Chandra ACIS spectrum.
"""
from astropy.io import fits
from .acis import ACIS
class Spectrum:
"""
Chandra ACIS spectrum
"""
def __init__(self, filepath):
self.filepath = filepath
self.fitsobj = fits.open(filepath)
ext_spec = self.fitsobj["SPECTRUM"]
self.header = ext_spec.header
# spectral data
self.channel = ext_spec.data.columns["CHANNEL"].array
self.counts = ext_spec.data.columns["COUNTS"].array
# spectral keywords
self.EXPOSURE = self.header.get("EXPOSURE")
self.BACKSCAL = self.header.get("BACKSCAL")
def calc_flux(self, elow, ehigh, verbose=False):
"""
Calculate the flux:
flux = counts / exposure / area
Parameters
----------
elow, ehigh : float, optional
Lower and upper energy limit to calculate the flux.
"""
chlow = ACIS.energy2channel(elow)
chhigh = ACIS.energy2channel(ehigh)
counts = self.counts[(chlow-1):chhigh].sum()
if verbose:
print("counts / exposure / backscale :: %d / %.1f / %.5g" %
(counts, self.EXPOSURE, self.BACKSCAL))
flux = counts / self.EXPOSURE / self.BACKSCAL
return flux
def calc_pb_flux(self, elow=9500, ehigh=12000, verbose=False):
"""
Calculate the particle background (default: 9.5-12 keV) flux.
"""
return self.calc_flux(elow=elow, ehigh=ehigh, verbose=verbose)
|
Ryati/satchmo
|
refs/heads/master
|
satchmo/apps/satchmo_store/contact/signals.py
|
14
|
import django.dispatch
"""
Signals for Contacts
"""
#: Sent after a user changes their location in their profile.
#:
#: :param sender: The form which was responsible for the location change.
#: :type sender: ``satchmo_store.contact.forms.ContactInfoForm``
#:
#: :param contact: The contact which was updated with a new location.
#: :type contact: ``satchmo_store.contact.models.Contact``
satchmo_contact_location_changed = django.dispatch.Signal()
#: Sent when contact information is viewed or updated before a template is
#: rendered. Allows you to override the contact information and context passed
#: to the templates used.
#:
#: :param sender: The contact representing the contact information being viewed,
#: or None if the information cannot be found.
#: :type sender: ``satchmo_store.contact.models.Contact``
#:
#: :param contact: The contact representing the contact information being
#: viewed, or None if the information cannot be found.
#: :type contact: ``satchmo_store.contact.models.Contact``
#:
#: :param contact_dict: A dictionary containing the intitial data for the
#: instance of ``satchmo_store.contact.forms.ExtendedContactInfoForm``
#: instance that will be rendered to the user.
#:
#: .. Note:: *contact* is the same as *sender*.
satchmo_contact_view = django.dispatch.Signal()
#: Sent when a form that contains postal codes (shipping and billing forms)
#: needs to validate. This signal can be used to custom validate postal postal
#: codes. Any listener should return the validated postal code or raise an
#: exception for an invalid postal code.
#:
#: :param sender: The form which is validating its postal codes.
#: :type sender: ``satchmo_store.contact.forms.ContactInfoForm``
#:
#: :param postcode: The postal code as a string being validated.
#:
#: :param country: The country that was selected in the form (or specified in
#: the configuration if local sales are only allowed).
#: :type country: ``l10n.models.Country``
validate_postcode = django.dispatch.Signal()
|
matthiasdiener/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/py-xopen/package.py
|
5
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyXopen(PythonPackage):
"""This small Python module provides a xopen function that works like the
built-in open function, but can also deal with compressed files. Supported
compression formats are gzip, bzip2 and xz. They are automatically
recognized by their file extensions .gz, .bz2 or .xz."""
homepage = "https://github.com/marcelm/xopen"
url = "https://pypi.io/packages/source/x/xopen/xopen-0.1.1.tar.gz"
version('0.1.1', '4e0e955546ee6bee4ea736b54623a671')
depends_on('py-setuptools', type='build')
depends_on('python@2.6:', type=('build', 'run'))
|
SwankSwashbucklers/bottle-builder
|
refs/heads/master
|
bottle-builder.py
|
1
|
"""
"""
################################################################################
##### Command Line Interface ###################################################
################################################################################
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from tempfile import gettempdir
import os
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
description=__doc__ )
parser.add_argument("-p", "--path",
type=str,
help="the path to the desired location of the generated site")
parser.add_argument("-d", "--deploy",
action="store_true",
help="package site for movement to deployment server. Default path is the"
"current working directory, but the path flag will override that value" )
parser.add_argument("-r", "--reuse",
action="store_true",
help="if an already built website exists at the targeted path, attempt to"
"reuse already present resources (i.e. images, favicon elements and other"
"static resources)" )
args = parser.parse_args()
if args.path is None:
args.path = os.getcwd()
# if args.deploy:
# args.path = os.getcwd()
# else:
# args.path = gettempdir()
################################################################################
##### Overrides ################################################################
################################################################################
from string import Template
from re import compile
class TemplateWrapper():
def __init__(self, cls):
PYTHON_LL = 80
HTML_LL = 112
self.cls = cls
self.headers = [
( # Primary python file header template
compile(r'\$ph{(.*?)}'),
lambda x: "\n\n{1}\n##### {0} {2}\n{1}\n".format(
x.upper(), '#'*PYTHON_LL, '#'*(PYTHON_LL-len(x)-7) )
),
( # Secondary python file header template
compile(r'\$sh{(.*?)}'),
lambda x: "\n### {0} {1}".format(
x, '#'*(PYTHON_LL-len(x)-5) )
),
( # HTML file header template
compile(r'\$wh{(.*?)}'),
lambda x: "<!-- ***** {0} {1} -->".format(
x, '*'*(HTML_LL-len(x)-16) )
)
]
def __call__(self, template):
for header in self.headers:
ptn, tpl = header
for match in ptn.finditer(template):
replacements = ( match.group(0), tpl(match.group(1)) )
template = template.replace(*replacements)
template_obj = self.cls(template)
template_obj.populate = self.populate
return template_obj
@staticmethod
def populate(template, filepath, **kwargs):
for key, value in kwargs.items():
if isinstance(value, list):
kwargs[key] = "\n".join(
[ t[0].safe_substitute(**t[1]) for t in value ]
)
try:
with open(filepath, 'w') as f:
f.write(template.safe_substitute(**kwargs))
except Exception as exception:
raise exception
Template = TemplateWrapper(Template)
from subprocess import Popen, call, DEVNULL, STDOUT, PIPE
from sys import executable
def sPopen(*args):
command, shell = list(args), True
if command[0] == 'python':
command[0] = executable
shell = False
if os.name == 'nt':
from subprocess import CREATE_NEW_CONSOLE
return Popen( command, shell=shell, creationflags=CREATE_NEW_CONSOLE )
else:
return Popen( command, shell=shell )
def sCall(*args):
command, shell = list(args), True
if command[0] == 'python':
command[0] = executable
shell = False
if os.name != 'nt':
shell = False
call( command, shell=shell, stdout=DEVNULL, stderr=STDOUT )
################################################################################
##### Templates ################################################################
################################################################################
APP_PY_TEMPLATE = Template("""\
\"""
${doc_string}
\"""
from bottle import run, route, get, post, error
from bottle import static_file, template, request
from bottle import HTTPError
$ph{Command Line Interface}
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from inspect import getframeinfo, currentframe
from os.path import dirname, abspath
import os
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
description=__doc__ )
parser.add_argument('-d', '--deploy',
action='store_true',
help='Run server for deployment' )
parser.add_argument('-i', '--ip',
type=str,
default="127.0.0.1",
help='ip to run the server against, default localhost' )
parser.add_argument('-p', '--port',
type=str,
default="8080",
help='port to run server on' )
args = parser.parse_args()
# change working directory to script directory
os.chdir(dirname(abspath(getframeinfo(currentframe()).filename)))
$ph{Main Site Routes}
${main_routes}
$ph{API and Additional Site Routes}
${api_routes}
$ph{Static Routes}
${static_routes}
$sh{Favicon Routes}
${favicon_routes}
$sh{Image Routes}
${image_routes}
$sh{Font Routes}
${font_routes}
$sh{Stylesheet Routes}
${css_routes}
$sh{Javascript Routes}
${js_routes}
$ph{Error Routes}
@error(404)
def error404(error):
return 'nothing to see here'
$ph{Run Server}
if args.deploy:
run(host=args.ip, port=args.port, server='cherrypy') #deployment
else:
run(host=args.ip, port=args.port, debug=True, reloader=True) #development
""" )
MAIN_ROUTE_TEMPLATE = Template("""\
@route('/${path}')
def ${method_name}():
return template('${template}', request=request, template='${template}')
""" )
STATIC_ROUTE_TEMPLATE = Template("""\
@get('/${path}')
def load_resource():
return static_file('${file}', root='${root}')
""" )
WATCH_SASS_SCRIPT = Template("""\
from sys import argv, exit
from signal import signal, SIGTERM, SIGINT
from shutil import rmtree
from subprocess import Popen
from inspect import getframeinfo, currentframe
from os.path import dirname, abspath, isdir, isfile
from os import chdir, remove
def signal_term_handler(signal, frame):
if not p is None: p.kill()
if isfile("_all.scss"): remove("_all.scss")
if isdir(".sass-cache"): rmtree(".sass-cache")
print(argv[0])
remove("watch.py") # argv[0] contains full path
exit(0)
p = None
signal(SIGTERM, signal_term_handler)
signal(SIGINT, signal_term_handler)
chdir(dirname(abspath(getframeinfo(currentframe()).filename)))
command = "sass --watch"
for x in range(1, len(argv)):
command += " {0}.scss:../../www/static/css/{0}.css".format(argv[x])
p = Popen(command, shell=True)
p.wait()
""" )
################################################################################
##### Script Body ##############################################################
################################################################################
from os.path import relpath, abspath, normpath, join, isfile, isdir, splitext
from shutil import copy, copyfileobj, rmtree
from urllib.request import urlopen
from time import sleep
from re import match, search
from sys import exit
SCRIPT_DIR = os.getcwd()
PROJECT_NAME = relpath(SCRIPT_DIR, "..")
STATIC_ROUTE = lambda p, f, r: \
( STATIC_ROUTE_TEMPLATE, { "path": p, "file": f, "root": r } )
MAIN_ROUTE = lambda p, m, t: \
( MAIN_ROUTE_TEMPLATE, { "path": p, "method_name": m, "template": t } )
def migrate_files(directory, destination):
src_path = join(SCRIPT_DIR, directory)
if not isdir(destination): os.makedirs(destination)
for root, dirs, files in os.walk(src_path):
for dirname in dirs:
if dirname.startswith('!') or dirname in ['.DS_STORE']:
dirs.remove(dirname)
for filename in files:
if not filename.startswith('!') and filename not in ['.DS_Store']:
if not isfile(filename): #added for the reuse flag
copy(join(root, filename), join(destination, filename))
if not filename.startswith('~'):
yield normpath(join(relpath(root, src_path),
filename) ).replace('\\', '/')
def migrate_views():
routes = [ MAIN_ROUTE("", "load_root", "index") ]
for route in migrate_files("dev/views", "views"):
tpl_name = splitext(route.split("/")[-1])[0]
if tpl_name == "index":
continue
routes.append(MAIN_ROUTE(
splitext(route)[0],
"load_" + tpl_name.replace("-","_"),
tpl_name
))
return routes
def get_api_routes():
with open( join(SCRIPT_DIR, "dev/py", "routes.py"), 'r') as f:
return f.read()
def migrate_static_files(source, destination):
return [ STATIC_ROUTE(r, r.split("/")[-1], destination)
for r in migrate_files(source, destination) ]
def generate_favicon_resources():
fav_tpl = lambda r: "favicon-{0}x{0}.png".format(r)
and_tpl = lambda r: "touch-icon-{0}x{0}.png".format(r)
app_tpl = lambda r: "apple-touch-icon-{0}x{0}.png".format(r)
pra_tpl = lambda r: "apple-touch-icon-{0}x{0}-precomposed.png".format(r)
fav_path = lambda p: normpath(join("static/favicon", p))
favicon_tpl = normpath(join(SCRIPT_DIR, "res/favicon.svg"))
ico_res = [ "16", "24", "32", "48", "64", "128", "256" ]
fav_res = [ "16", "32", "96", "160", "196", "300" ]
android_res = [ "192" ]
apple_res = [ "57", "76", "120", "152", "180" ] # add to head backwards
if not isdir("static/favicon"): os.makedirs("static/favicon")
# generate favicon resources
for res in (list(set(ico_res) | set(fav_res)) + android_res + apple_res):
if res in android_res: path = abspath( fav_path(and_tpl(res)) )
elif res in apple_res: path = abspath( fav_path(app_tpl(res)) )
else: path = abspath( fav_path(fav_tpl(res)) )
sCall("inkscape", "-z", "-e", path, "-w", res, "-h", res, favicon_tpl)
sCall( *(["convert"] + [fav_path(fav_tpl(r)) for r in ico_res] +
[fav_path("favicon.ico")]) )
for res in [ r for r in ico_res if r not in fav_res ]:
os.remove(fav_path(fav_tpl(res)))
# return routes for generated favicon resources
fav_route = lambda f: STATIC_ROUTE(f, f, "static/favicon")
app_route = lambda p,t: STATIC_ROUTE(p, t("57"), "static/favicon")
return ([ fav_route("favicon.ico") ] +
[ fav_route(fav_tpl(r)) for r in fav_res ] +
[ fav_route(and_tpl(r)) for r in android_res ] +
[ fav_route(app_tpl(r)) for r in apple_res if r!="57" ] +
[ fav_route(pra_tpl(r)) for r in apple_res if r!="57" ] +
[ app_route("apple-touch-icon.png", app_tpl),
app_route("apple-touch-icon-precomposed.png", pra_tpl) ])
def generate_stylesheets():
dev_path = join( SCRIPT_DIR, "dev/sass" )
is_sass = lambda f: splitext(f)[-1].lower() in ['.scss', '.sass']
is_mixin = lambda f: match(r'.*mixins?$', splitext(f)[0].lower())
get_import = lambda p: [ join( relpath(r, dev_path), f )
for r, d, fs in os.walk( join(dev_path, p) )
for f in fs if is_sass(f) ]
if not isdir("static/css"): os.makedirs("static/css")
# generate _all.scss file from existing sass resources
with open( join( dev_path, '_all.scss' ), 'w') as f:
f.write('\n'.join( # probably not the most efficient way
[ '@import "{}";'.format(path.replace('\\', '/')) for path in
( # mixins and global variables must be imported first
# modules
[ f for f in get_import('modules') ]
# vendor mixins
+ [ f for f in get_import('vendor') if is_mixin(f) ]
# all other vendor files
+ [ f for f in get_import('vendor') if not is_mixin(f) ]
# partials (comment out this line for manually selection)
+ [ f for f in get_import('partials') ]
)
] )
)
# use sass command line tool to generate stylesheets
stylesheets = [ splitext(f)[0] for f in os.listdir(dev_path)
if is_sass(f) and not f.startswith('_') ]
sass_path = relpath(dev_path, os.getcwd()).replace('\\', '/')
if args.deploy:
for s in stylesheets:
sCall("sass", sass_path+"/"+s+".scss", "static/css/"+s+".min.css",
"-t", "compressed", "--sourcemap=none", "-C")
os.remove( join(dev_path, "_all.scss") )
else:
Template.populate(WATCH_SASS_SCRIPT, '../dev/sass/watch.py')
command = "sass --watch"
for s in stylesheets:
command += " ../dev/sass/{0}.scss:./static/css/{0}.css".format(s)
p = Popen(command, shell=True)
#p = sPopen( 'python', '../dev/sass/watch.py', *stylesheets )
sleep(3) # delay so the stylesheets have time to be created
p.kill() # note: kill sends SIGKILL
# return css routes from generated stylesheets
return [ STATIC_ROUTE(f, f, "static/css") for f in os.listdir("static/css")]
def generate_javascript():
return migrate_static_files("dev/js", "static/js")
def get_favicon_head():
link_tpl = lambda c: ' <link {0}>\n'.format(c)
all_favs = os.listdir('static/favicon')
favicons = [ x for x in all_favs if x.startswith('favicon') ]
apple_favs = [ x for x in all_favs if x.startswith('apple') ]
android_favs = [ x for x in all_favs if not x in favicons + apple_favs ]
fav_head = link_tpl('rel="shortcut icon" href="favicon.ico"')
favicons.remove('favicon.ico')
def gen_head(fav_tpl, fav_set):
dic = {}
for fav in fav_set:
res = int(search(r'([0-9]+)x', fav).group(1))
dic[res] = fav
keys = list(dic.keys())
keys.sort()
keys.reverse()
for key in keys:
yield link_tpl( fav_tpl.format(key, dic[key]) )
for fav_set in [
('rel="icon" sizes="{0}x{0}" href="/{1}"', android_favs),
('rel="apple-touch-icon" sizes="{0}x{0}" href="/{1}"', apple_favs),
('rel="icon" type="image/png" sizes="{0}x{0}" href="/{1}"', favicons) ]:
fav_head += "".join( gen_head(*fav_set) )
return fav_head
def get_opengraph_head():
og_head_string = """\
% url = request.environ['HTTP_HOST']
<meta property="og:url" content="http://{{url}}/">
<meta property="og:type" content="website">
<meta property="og:title" content="{{title}}">
<meta property="open_graph_image">
<meta property="og:description" content="{{description}}">"""
og_image_string = """<meta property="og:image:type" content="image/png">
<meta property="og:image:width" content="300">
<meta property="og:image:height" content="300">
<meta property="og:image:url" content="http://{{url}}/favicon-300x300.png">
<meta property="og:image" content="http://{{url}}/favicon-300x300.png">"""
if isfile("static/favicon/favicon-300x300.png"):
og_head_string = og_head_string.replace(
'<meta property="open_graph_image">',
og_image_string
)
return og_head_string
def get_stylesheet_head():
styles_tpl = ' <link rel="stylesheet" type="text/css" href="/{0}">\n'
stylesheets = os.listdir('static/css')
styles_head = ''
for style in stylesheets:
if style.split('.')[0] == 'styles':
styles_head += styles_tpl.format(style)
stylesheets.remove(style)
break
stylesheets = [ s.split('.')[0] for s in stylesheets ]
styles_head += " % if template in {}:\n".format(stylesheets)
tpl_style = '{{template}}.min.css' if args.deploy else '{{template}}.css'
styles_head += styles_tpl.format(tpl_style)
styles_head += " % end"
return styles_head
os.chdir(args.path)
if isdir("www"): rmtree("www")
os.makedirs("www")
os.chdir("www")
### Import Bottle Framework ####################################################
from urllib.error import URLError
bottle_url = "https://raw.githubusercontent.com/bottlepy/bottle/master/bottle.py"
try:
with urlopen(bottle_url) as response, open('bottle.py', 'wb') as f:
copyfileobj(response, f)
except URLError as e:
print(e)
### Generate App.py ############################################################
Template.populate(APP_PY_TEMPLATE, 'app.py',
doc_string="",
main_routes=migrate_views(),
api_routes=get_api_routes(),
static_routes=migrate_static_files("res/static", "static"),
favicon_routes=generate_favicon_resources(),
image_routes=migrate_static_files("res/img", "static/img"),
font_routes=migrate_static_files("res/font", "static/font"),
css_routes=generate_stylesheets(),
js_routes=generate_javascript() )
### Generate Head Template #####################################################
if isfile('views/~head.tpl'): os.remove('views/~head.tpl')
head_tpl = ""
with open(join(SCRIPT_DIR, "dev/views/~head.tpl"), 'r') as head:
head_tpl = head.read()
metas = [ "Favicon_Resources", "Open_Graph", "Style_Sheets" ]
for meta in metas:
head_tpl = head_tpl.replace(
'<meta name="'+meta.lower()+'">',
'\n$wh{'+meta.replace('_', ' ')+'}\n${'+meta.lower()+'}'
)
Template.populate(Template(head_tpl), 'views/~head.tpl',
favicon_resources=get_favicon_head(),
open_graph=get_opengraph_head(),
style_sheets=get_stylesheet_head() )
### Packaging For Deployment ###################################################
if not args.deploy:
#sCall('python', 'app.py', '-p', '8081')
exit(0)
from zipfile import ZipFile
os.chdir('..') # work on this
if isfile('www.zip'): os.remove('www.zip')
with ZipFile('www.zip', 'w') as zip_file:
for root, dirs, files in os.walk( join(os.getcwd(), 'www') ):
rel_path = relpath(root, os.getcwd())
for f in files:
zip_file.write( join(rel_path, f) )
# set up watch for template and js files using watchdog
#
#
# from zipfile import ZipFile
#
# def package_site():
|
shamindrasorg/eda_play
|
refs/heads/master
|
data/repeated-phrases-gop/robopol2.py
|
2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import random
import numpy
import math
import string
import operator
from collections import defaultdict
# candidates to pay attention to
candidates = ["TRUMP", "CRUZ", "RUBIO", "KASICH"]
# n-gram lengths to iterate through
min_N = 1 # inclusive
max_N = 15 # exclusive
####
#### HELPER FUNCTIONS
####
# returns a dict mapping each n-gram that appears in the corpus to its frequency in the corpus
def ngram_freqs(corpus, n):
# generate a list of all n-grams in the corpus
ngrams = []
for i in range(n, len(corpus)):
if not "<BR>" in tuple(corpus[i-n:i]):
ngrams += [tuple(corpus[i-n:i])]
# count the frequency of each n-gram
freq_dict = defaultdict(int)
for ngram in ngrams:
freq_dict[ngram] += 1
return freq_dict
# combines two dicts by performing the provided operation on their values
def combine_dicts(a, b, op=operator.add):
return dict(a.items() + b.items() + [(k, op(a[k], b[k])) for k in set(b) & set(a)])
# checks whether two n-grams overlap too much to include both
def overlap(a, b):
max_overlap = min(3, len(a), len(b))
overlap = False
# the begnning of a is in b
if '-'.join(a[:max_overlap]) in '-'.join(b):
overlap = True
# the end of a is in b
if '-'.join(a[-max_overlap:]) in '-'.join(b):
overlap = True
# the begnning of b is in a
if '-'.join(b[:max_overlap]) in '-'.join(a):
overlap = True
# the end of b is in a
if '-'.join(b[-max_overlap:]) in '-'.join(a):
overlap = True
return overlap
####
#### ANALYSIS FUNCTIONS
####
# returns a list of corpora, each a sequential list of all words said by one candidate
def corpus_list_from_file(filename):
# load all words from the file into memory
words = open(filename).read().split()
# initialize the list of corpora
corpus_list = []
for candidate in candidates:
corpus_list += [[]]
# iterate through words, putting them in the correct corpus
speaker_index = -1
for word in words:
# change of speaker
if word[-1] == ":" and word.isupper():
# name of the new speaker
speaker = word[:-1]
# speaker is one of the candidates
if speaker in candidates:
speaker_index = candidates.index(speaker)
# speaker is moderator or candidate not listed
else:
speaker_index = -1
# add a speaking break indicator
corpus_list[speaker_index] += ["<BR>"]
# regular word
elif word[0] is not "(" and word[-1] is not ")":
# remove punctuation and convert to lowercase
word = word.translate(string.maketrans("",""), string.punctuation).lower()
if speaker_index >= 0:
if word is not "":
corpus_list[speaker_index] += [word]
return corpus_list
# returns a list of dicts, each mapping an n-gram to its frequency in the respective corpus
def freq_dicts_from_corpus_list(corpus_list):
# initialize the list of dicts
freq_dicts = []
for candidate in range(len(candidates)):
freq_dicts += [defaultdict(int)]
# iteratively add all n-grams
for n in range(min_N, max_N):
for candidate in range(len(candidates)):
corpus = corpus_list[candidate]
dict_to_add = ngram_freqs(corpus, n)
freq_dicts[candidate] = combine_dicts(freq_dicts[candidate], dict_to_add)
return freq_dicts
# returns a list of dicts, each mapping an n-gram to its tf-idf in the respective corpus
# see https://en.wikipedia.org/wiki/Tf-idf for further information
def tfidf_dicts_from_freq_dicts(freq_dicts):
# initialize the list of dicts
tfidf_dicts = []
for candidate in range(len(candidates)):
tfidf_dicts += [defaultdict(int)]
# create a dict that maps an n-gram to the number of corpora containing that n-gram
num_containing = defaultdict(int)
for candidate in range(len(candidates)):
for ngram in freq_dicts[candidate]:
num_containing[ngram] += 1
# calculate tf-idf for each n-gram in each corpus
for candidate in range(len(candidates)):
for ngram in freq_dicts[candidate]:
tf = freq_dicts[candidate][ngram]
idf = math.log(len(candidates) / num_containing[ngram])
# normalize by length of n-gram
tfidf_dicts[candidate][ngram] = tf * idf * len(ngram)
# kill anything ending in "and" "or" "of" "with"
if ngram[-1] in ["and", "or", "of", "with"]:
tfidf_dicts[candidate][ngram] = 0
return tfidf_dicts
# kills any phrase (tfidf=0) contained inside a larger phrase with a higher score
def prune_substrings(tfidf_dicts, prune_thru=1000):
pruned = tfidf_dicts
for candidate in range(len(candidates)):
# growing list of n-grams in list form
so_far = []
ngrams_sorted = sorted(tfidf_dicts[candidate].items(), key=operator.itemgetter(1), reverse=True)[:prune_thru]
for ngram in ngrams_sorted:
# contained in a previous aka 'better' phrase
for better_ngram in so_far:
if overlap(list(better_ngram), list(ngram[0])):
#print "PRUNING!! "
#print list(better_ngram)
#print list(ngram[0])
pruned[candidate][ngram[0]] = 0
# not contained, so add to so_far to prevent future subphrases
else:
so_far += [list(ngram[0])]
return pruned
# sorts the n-grams for a candidate by tf-idf
def top_ngrams_for_candidate(tfidf_dicts, candidate, count=20):
return sorted(tfidf_dicts[candidate].items(), key=operator.itemgetter(1), reverse=True)[:count]
def main():
corpus_list = corpus_list_from_file("gop_debate_all.txt")
freq_dicts = freq_dicts_from_corpus_list(corpus_list)
tfidf_dicts = tfidf_dicts_from_freq_dicts(freq_dicts)
tfidf_dicts = prune_substrings(tfidf_dicts)
# print the top ngrams sorted by tfidf
for candidate in range(len(candidates)):
print candidates[candidate]
for ngram in top_ngrams_for_candidate(tfidf_dicts, candidate, 400):
print ngram
if __name__ == '__main__':
main()
|
bgris/ODL_bgris
|
refs/heads/master
|
lib/python3.5/site-packages/skimage/morphology/tests/__init__.py
|
672
|
from ..._shared.testing import setup_test, teardown_test
def setup():
setup_test()
def teardown():
teardown_test()
|
amoad/amoad-native-cocos2dx-sdk
|
refs/heads/master
|
AMoAdNativeCocos2dxDemo/cocos2d/plugin/tools/android-build.py
|
240
|
#!/usr/bin/python
# android-build.py
# Build android samples
import sys
import os, os.path
import shutil
from optparse import OptionParser
CPP_SAMPLES = ["HelloPlugins"]
ALL_SAMPLES = CPP_SAMPLES
def check_environment_variables():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment"
sys.exit(1)
return NDK_ROOT
def select_toolchain_version():
'''Because ndk-r8e uses gcc4.6 as default. gcc4.6 doesn't support c++11. So we should select gcc4.7 when
using ndk-r8e. But gcc4.7 is removed in ndk-r9, so we should determine whether gcc4.7 exist.
Conclution:
ndk-r8e -> use gcc4.7
ndk-r9 -> use gcc4.8
'''
ndk_root = check_environment_variables()
if os.path.isdir(os.path.join(ndk_root,"toolchains/arm-linux-androideabi-4.8")):
os.environ['NDK_TOOLCHAIN_VERSION'] = '4.8'
print "The Selected NDK toolchain version was 4.8 !"
elif os.path.isdir(os.path.join(ndk_root,"toolchains/arm-linux-androideabi-4.7")):
os.environ['NDK_TOOLCHAIN_VERSION'] = '4.7'
print "The Selected NDK toolchain version was 4.7 !"
else:
print "Couldn't find the gcc toolchain."
exit(1)
def caculate_built_samples(args):
''' Compute the sampels to be built
'cpp' for short of all cpp samples
'jsb' for short of all javascript samples
'''
if 'all' in args:
return ALL_SAMPLES
if 'jsb' in args:
return JSB_SAMPLES
if 'cpp' in args:
return CPP_SAMPLES
targets = []
targets += args
# remove duplicate elements, for example
# python android-build.py cpp hellocpp
targets = set(targets)
return list(targets)
def do_build(plugin_root, cocos_root, ndk_root, app_android_root, ndk_build_param):
ndk_path = os.path.join(ndk_root, "ndk-build")
# windows should use ";" to seperate module paths
platform = sys.platform
if platform == 'win32':
ndk_module_path = 'NDK_MODULE_PATH=%s/publish;%s;%s/external;%s/cocos' % (plugin_root, cocos_root, cocos_root, cocos_root)
else:
ndk_module_path = 'NDK_MODULE_PATH=%s/publish:%s:%s/external:%s/cocos' % (plugin_root, cocos_root, cocos_root, cocos_root)
if ndk_build_param == None:
command = '%s -C %s %s' % (ndk_path, app_android_root, ndk_module_path)
else:
command = '%s -C %s %s %s' % (ndk_path, app_android_root, ndk_build_param, ndk_module_path)
os.system(command)
def copy_files(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
# Android can not package the file that ends with ".gz"
if not item.startswith('.') and not item.endswith('.gz') and os.path.isfile(path):
shutil.copy(path, dst)
if os.path.isdir(path):
new_dst = os.path.join(dst, item)
if not os.path.exists(new_dst):
os.mkdir(new_dst)
copy_files(path, new_dst)
def copy_resources(target, app_android_root, plugin_root):
# remove app_android_root/assets if it exists
assets_dir = os.path.join(app_android_root, "assets")
if os.path.isdir(assets_dir):
shutil.rmtree(assets_dir)
# copy resources(cpp samples and lua samples)
os.mkdir(assets_dir)
resources_dir = os.path.join(app_android_root, "../Resources")
if os.path.isdir(resources_dir):
copy_files(resources_dir, assets_dir)
# jsb samples should copy javascript files and resources(shared with cocos2d-html5)
# if target in JSB_SAMPLES:
# resources_dir = os.path.join(app_android_root, "../../../../cocos/scripting/javascript/script")
# copy_files(resources_dir, assets_dir)
# resources_dir = os.path.join(plugin_root, "jsbindings/js")
# copy_files(resources_dir, assets_dir)
# copy plugin resources to the assets
plugins_dir = os.path.join(plugin_root, "publish" + os.path.sep + "plugins")
for item in os.listdir(plugins_dir):
src = os.path.join(plugins_dir, item + os.path.sep + "android" + os.path.sep + "ForAssets")
if os.path.isdir(src):
copy_files(src, assets_dir)
def copy_clibs(app_android_root, plugin_root):
target_cpath = os.path.join(app_android_root, "libs")
plugins_dir = os.path.join(plugin_root, "publish" + os.path.sep + "plugins")
for item in os.listdir(plugins_dir):
src = os.path.join(plugins_dir, item + os.path.sep + "android" + os.path.sep + "CLibs")
if os.path.isdir(src):
if not os.path.exists(target_cpath):
os.mkdir(target_cpath)
copy_files(src, target_cpath)
def build_samples(target,ndk_build_param):
ndk_root = check_environment_variables()
select_toolchain_version()
build_targets = caculate_built_samples(target)
current_dir = os.path.dirname(os.path.realpath(__file__))
cocos_root = os.path.join(current_dir, "../../")
plugin_root = os.path.join(current_dir, "..")
app_android_root = ''
for target in build_targets:
app_android_root = os.path.join(plugin_root, "samples" + os.path.sep + target + os.path.sep + "proj.android")
copy_resources(target, app_android_root, plugin_root)
do_build(plugin_root, cocos_root, ndk_root, app_android_root, ndk_build_param)
copy_clibs(app_android_root, plugin_root)
# -------------- main --------------
if __name__ == '__main__':
usage = "usage: %prog all"
#parse the params
parser = OptionParser(usage=usage)
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='parameter for ndk-build')
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
else:
build_samples(args, opts.ndk_build_param)
|
sfepy/sfepy
|
refs/heads/master
|
sfepy/postprocess/plot_dofs.py
|
6
|
"""
Functions to visualize the mesh connectivity with global and local DOF
numberings.
"""
import numpy as nm
import matplotlib.pyplot as plt
def _get_axes(ax, dim):
if ax is None:
fig = plt.figure()
if dim == 3:
from mpl_toolkits.mplot3d import axes3d
axes3d # Make pyflakes happy...
ax = fig.add_subplot(111, projection='3d')
else:
ax = fig.add_subplot(111)
return ax
def _to2d(coors):
if coors.shape[1] == 1:
coors = nm.c_[coors, nm.zeros_like(coors)]
return coors
def plot_points(ax, coors, vals=None, point_size=20,
show_colorbar=False):
"""
Plot points with given coordinates, optionally colored using `vals` values.
"""
dim = coors.shape[1]
ax = _get_axes(ax, dim)
colors = 'b' if vals is None else vals
coors = _to2d(coors)
sc = ax.scatter(*coors.T, s=point_size, c=colors, alpha=1)
if show_colorbar and (vals is not None):
plt.colorbar(sc)
return ax
def plot_mesh(ax, coors, conn, edges):
"""
Plot a finite element mesh as a wireframe.
"""
dim = coors.shape[1]
ax = _get_axes(ax, dim)
coors = _to2d(coors)
for el in conn:
eds = el[edges]
for ed in eds:
cc = coors[ed]
ax.plot(*cc.T, color='k')
return ax
def plot_global_dofs(ax, coors, econn):
"""
Plot global DOF numbers given in an extended connectivity.
The DOF numbers are plotted for each element, so on common facets they are
plotted several times - this can be used to check the consistency of the
global DOF connectivity.
"""
dim = coors.shape[1]
ax = _get_axes(ax, dim)
coors = _to2d(coors)
for el in econn:
for gdof in el:
ax.text(*coors[gdof], s='%d' % gdof,
color='g', fontsize=12, weight='bold')
return ax
def plot_local_dofs(ax, coors, econn):
"""
Plot local DOF numbers corresponding to an extended connectivity.
"""
dim = coors.shape[1]
ax = _get_axes(ax, dim)
coors = _to2d(coors)
eps = 0.1
oeps = 1.0 - eps
for el in econn:
# Element centre.
centre = coors[el].sum(0) / el.shape[0]
for ldof, gdof in enumerate(el):
# Shift labels towards the centre.
cc = oeps * coors[gdof] + eps * centre
ax.text(*cc, s='%d' % ldof,
color='b', fontsize=10, weight='light')
return ax
def plot_nodes(ax, coors, econn, ref_nodes, dofs):
"""
Plot Lagrange reference element nodes corresponding to global DOF numbers
given in an extended connectivity.
"""
dim = coors.shape[1]
ax = _get_axes(ax, dim)
coors = _to2d(coors)
eps = 0.2
oeps = 1.0 - eps
for el in econn:
# Element centre.
centre = coors[el].sum(0) / el.shape[0]
for gdof in dofs:
if not gdof in el:
continue
ldof = nm.where(el == gdof)[0]
node = ref_nodes[ldof]
# Shift labels towards the centre.
cc = oeps * coors[gdof] + eps * centre
ax.text(*cc, s='%s' % node,
color='r', fontsize=8, weight='light')
return ax
|
puuu/micropython
|
refs/heads/master
|
tests/basics/string_rpartition.py
|
16
|
try:
str.partition
except AttributeError:
print("SKIP")
import sys
sys.exit()
print("asdf".rpartition('g'))
print("asdf".rpartition('a'))
print("asdf".rpartition('s'))
print("asdf".rpartition('f'))
print("asdf".rpartition('d'))
print("asdf".rpartition('asd'))
print("asdf".rpartition('sdf'))
print("asdf".rpartition('as'))
print("asdf".rpartition('df'))
print("asdf".rpartition('asdf'))
print("asdf".rpartition('asdfa'))
print("asdf".rpartition('fasdf'))
print("asdf".rpartition('fasdfa'))
print("abba".rpartition('a'))
print("abba".rpartition('b'))
try:
print("asdf".rpartition(1))
except TypeError:
print("Raised TypeError")
else:
print("Did not raise TypeError")
try:
print("asdf".rpartition(''))
except ValueError:
print("Raised ValueError")
else:
print("Did not raise ValueError")
|
Openlights/firemix
|
refs/heads/master
|
lib/plugin_loader.py
|
1
|
# This file is part of Firemix.
#
# Copyright 2013-2016 Jonathan Evans <jon@craftyjon.com>
#
# Firemix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Firemix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Firemix. If not, see <http://www.gnu.org/licenses/>.
from builtins import object
import os
import logging
import inspect
log = logging.getLogger("firemix.lib.plugin_loader")
#TODO: This and PatternLoader share a lot of code...
class PluginLoader(object):
"""
Scans the ./plugins/ directory and imports objects into lists based on base class
Based on code copyright 2005 Jesse Noller <jnoller@gmail.com>
http://code.activestate.com/recipes/436873-import-modulesdiscover-methods-from-a-directory-na/
"""
def __init__(self):
self._classes = {}
self.load()
def load(self):
self._classes = {}
log.info("Loading plugins...")
for f in os.listdir(os.path.join(os.getcwd(), "plugins")):
module_name, ext = os.path.splitext(f)
if ext == ".py":
# Skip emacs lock files.
if f.startswith('.#'):
continue
module = __import__("plugins." + module_name, fromlist=['dummy'])
for name, obj in inspect.getmembers(module, inspect.isclass):
bases = inspect.getmro(obj)
if len(bases) > 1:
base = bases[1].__name__.rsplit('.',1)[0]
if self._classes.get(base, None) is None:
self._classes[base] = []
self._classes[base].append(obj)
log.info("Loaded %s::%s" % (base, obj.__name__))
def get(self, base):
return self._classes.get(base, [])
|
greyhwndz/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/build/gyp/test/win/gyptest-link-subsystem.py
|
239
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure subsystem setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('subsystem.gyp', chdir=CHDIR)
test.build('subsystem.gyp', 'test_console_ok', chdir=CHDIR)
test.build('subsystem.gyp', 'test_console_fail', chdir=CHDIR, status=1)
test.build('subsystem.gyp', 'test_windows_ok', chdir=CHDIR)
test.build('subsystem.gyp', 'test_windows_fail', chdir=CHDIR, status=1)
test.build('subsystem.gyp', 'test_console_xp', chdir=CHDIR)
test.build('subsystem.gyp', 'test_windows_xp', chdir=CHDIR)
# Make sure we are targeting XP.
def GetHeaders(exe):
return test.run_dumpbin('/headers', test.built_file_path(exe, chdir=CHDIR))
if '5.01 subsystem version' not in GetHeaders('test_console_xp.exe'):
test.fail_test()
if '5.01 subsystem version' not in GetHeaders('test_windows_xp.exe'):
test.fail_test()
# TODO(scottmg): There are other subsystems (WinCE, etc.) that we don't use.
test.pass_test()
|
afandria/mojo
|
refs/heads/master
|
mojo/public/python/mojo_bindings/__init__.py
|
1201
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
|
leakim/svtplay-dl
|
refs/heads/master
|
lib/svtplay_dl/fetcher/__init__.py
|
1
|
from __future__ import absolute_import
from svtplay_dl.utils import HTTP
class VideoRetriever(object):
def __init__(self, options, url, bitrate=0, **kwargs):
self.options = options
self.url = url
self.bitrate = int(bitrate)
self.kwargs = kwargs
self.http = HTTP()
def name(self):
pass
|
RayMick/scikit-learn
|
refs/heads/master
|
benchmarks/bench_rcv1_logreg_convergence.py
|
149
|
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
|
aleaxit/pysolper
|
refs/heads/master
|
latrop/lib/dist/werkzeug/templates.py
|
26
|
# -*- coding: utf-8 -*-
r"""
werkzeug.templates
~~~~~~~~~~~~~~~~~~
A minimal template engine.
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD License.
"""
import sys
import re
import __builtin__ as builtins
from compiler import ast, parse
from compiler.pycodegen import ModuleCodeGenerator
from tokenize import PseudoToken
from werkzeug import utils, urls
from werkzeug._internal import _decode_unicode
# Copyright notice: The `parse_data` method uses the string interpolation
# algorithm by Ka-Ping Yee which originally was part of `Itpl20.py`_.
#
# .. _Itpl20.py: http://lfw.org/python/Itpl20.py
token_re = re.compile('%s|%s(?s)' % (
r'[uU]?[rR]?("""|\'\'\')((?<!\\)\\\1|.)*?\1',
PseudoToken
))
directive_re = re.compile(r'(?<!\\)<%(?:(#)|(py(?:thon)?\b)|'
r'(?:\s*(\w+))\s*)(.*?)\s*%>\n?(?s)')
escape_re = re.compile(r'\\\n|\\(\\|<%)')
namestart_chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
undefined = type('UndefinedType', (object,), {
'__iter__': lambda x: iter(()),
'__repr__': lambda x: 'Undefined',
'__str__': lambda x: ''
})()
runtime_vars = dict.fromkeys(('Undefined', '__to_unicode', '__context',
'__write', '__write_many'))
def call_stmt(func, args, lineno):
return ast.CallFunc(ast.Name(func, lineno=lineno),
args, lineno=lineno)
def tokenize(source, filename):
escape = escape_re.sub
escape_repl = lambda m: m.group(1) or ''
lineno = 1
pos = 0
for match in directive_re.finditer(source):
start, end = match.span()
if start > pos:
data = source[pos:start]
yield lineno, 'data', escape(escape_repl, data)
lineno += data.count('\n')
is_comment, is_code, cmd, args = match.groups()
if is_code:
yield lineno, 'code', args
elif not is_comment:
yield lineno, 'cmd', (cmd, args)
lineno += source[start:end].count('\n')
pos = end
if pos < len(source):
yield lineno, 'data', escape(escape_repl, source[pos:])
def transform(node, filename):
root = ast.Module(None, node, lineno=1)
nodes = [root]
while nodes:
node = nodes.pop()
node.filename = filename
if node.__class__ in (ast.Printnl, ast.Print):
node.dest = ast.Name('__context')
elif node.__class__ is ast.Const and isinstance(node.value, str):
try:
node.value.decode('ascii')
except UnicodeError:
node.value = node.value.decode('utf-8')
nodes.extend(node.getChildNodes())
return root
class TemplateSyntaxError(SyntaxError):
def __init__(self, msg, filename, lineno):
from linecache import getline
l = getline(filename, lineno)
SyntaxError.__init__(self, msg, (filename, lineno, len(l) or 1, l))
class Parser(object):
def __init__(self, gen, filename):
self.gen = gen
self.filename = filename
self.lineno = 1
def fail(self, msg):
raise TemplateSyntaxError(msg, self.filename, self.lineno)
def parse_python(self, expr, type='exec'):
if isinstance(expr, unicode):
expr = '\xef\xbb\xbf' + expr.encode('utf-8')
try:
node = parse(expr, type)
except SyntaxError, e:
raise TemplateSyntaxError(str(e), self.filename,
self.lineno + e.lineno - 1)
nodes = [node]
while nodes:
n = nodes.pop()
if hasattr(n, 'lineno'):
n.lineno = (n.lineno or 1) + self.lineno - 1
nodes.extend(n.getChildNodes())
return node.node
def parse(self, needle=()):
start_lineno = self.lineno
result = []
add = result.append
for self.lineno, token, value in self.gen:
if token == 'data':
add(self.parse_data(value))
elif token == 'code':
add(self.parse_code(value.splitlines()))
elif token == 'cmd':
name, args = value
if name in needle:
return name, args, ast.Stmt(result, lineno=start_lineno)
if name in ('for', 'while'):
add(self.parse_loop(args, name))
elif name == 'if':
add(self.parse_if(args))
else:
self.fail('unknown directive %s' % name)
if needle:
self.fail('unexpected end of template')
return ast.Stmt(result, lineno=start_lineno)
def parse_loop(self, args, type):
rv = self.parse_python('%s %s: pass' % (type, args), 'exec').nodes[0]
tag, value, rv.body = self.parse(('end' + type, 'else'))
if value:
self.fail('unexpected data after ' + tag)
if tag == 'else':
tag, value, rv.else_ = self.parse(('end' + type,))
if value:
self.fail('unexpected data after else')
return rv
def parse_if(self, args):
cond = self.parse_python('if %s: pass' % args).nodes[0]
tag, value, body = self.parse(('else', 'elif', 'endif'))
cond.tests[0] = (cond.tests[0][0], body)
while 1:
if tag == 'else':
if value:
self.fail('unexpected data after else')
tag, value, cond.else_ = self.parse(('endif',))
elif tag == 'elif':
expr = self.parse_python(value, 'eval')
tag, value, body = self.parse(('else', 'elif', 'endif'))
cond.tests.append((expr, body))
continue
break
if value:
self.fail('unexpected data after endif')
return cond
def parse_code(self, lines):
margin = sys.maxint
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxint:
for i in xrange(1, len(lines)):
lines[i] = lines[i][margin:]
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return self.parse_python('\n'.join(lines))
def parse_data(self, text):
start_lineno = lineno = self.lineno
pos = 0
end = len(text)
nodes = []
def match_or_fail(pos):
match = token_re.match(text, pos)
if match is None:
self.fail('invalid syntax')
return match.group().strip(), match.end()
def write_expr(code):
node = self.parse_python(code, 'eval')
nodes.append(call_stmt('__to_unicode', [node], lineno))
return code.count('\n')
def write_data(value):
if value:
nodes.append(ast.Const(value, lineno=lineno))
return value.count('\n')
return 0
while 1:
offset = text.find('$', pos)
if offset < 0:
break
next = text[offset + 1]
if next == '{':
lineno += write_data(text[pos:offset])
pos = offset + 2
level = 1
while level:
token, pos = match_or_fail(pos)
if token in ('{', '}'):
level += token == '{' and 1 or -1
lineno += write_expr(text[offset + 2:pos - 1])
elif next in namestart_chars:
lineno += write_data(text[pos:offset])
token, pos = match_or_fail(offset + 1)
while pos < end:
if text[pos] == '.' and pos + 1 < end and \
text[pos + 1] in namestart_chars:
token, pos = match_or_fail(pos + 1)
elif text[pos] in '([':
pos += 1
level = 1
while level:
token, pos = match_or_fail(pos)
if token in ('(', ')', '[', ']'):
level += token in '([' and 1 or -1
else:
break
lineno += write_expr(text[offset + 1:pos])
else:
lineno += write_data(text[pos:offset + 1])
pos = offset + 1 + (next == '$')
write_data(text[pos:])
return ast.Discard(call_stmt(len(nodes) == 1 and '__write' or
'__write_many', nodes, start_lineno),
lineno=start_lineno)
class Context(object):
def __init__(self, namespace, charset, errors):
self.charset = charset
self.errors = errors
self._namespace = namespace
self._buffer = []
self._write = self._buffer.append
_extend = self._buffer.extend
self.runtime = dict(
Undefined=undefined,
__to_unicode=self.to_unicode,
__context=self,
__write=self._write,
__write_many=lambda *a: _extend(a)
)
def write(self, value):
self._write(self.to_unicode(value))
def to_unicode(self, value):
if isinstance(value, str):
return _decode_unicode(value, self.charset, self.errors)
return unicode(value)
def get_value(self, as_unicode=True):
rv = u''.join(self._buffer)
if not as_unicode:
return rv.encode(self.charset, self.errors)
return rv
def __getitem__(self, key, default=undefined):
try:
return self._namespace[key]
except KeyError:
return getattr(builtins, key, default)
def get(self, key, default=None):
return self.__getitem__(key, default)
def __setitem__(self, key, value):
self._namespace[key] = value
def __delitem__(self, key):
del self._namespace[key]
class TemplateCodeGenerator(ModuleCodeGenerator):
def __init__(self, node, filename):
ModuleCodeGenerator.__init__(self, transform(node, filename))
def _nameOp(self, prefix, name):
if name in runtime_vars:
return self.emit(prefix + '_GLOBAL', name)
return ModuleCodeGenerator._nameOp(self, prefix, name)
class Template(object):
"""Represents a simple text based template. It's a good idea to load such
templates from files on the file system to get better debug output.
"""
default_context = {
'escape': utils.escape,
'url_quote': urls.url_quote,
'url_quote_plus': urls.url_quote_plus,
'url_encode': urls.url_encode
}
def __init__(self, source, filename='<template>', charset='utf-8',
errors='strict', unicode_mode=True):
if isinstance(source, str):
source = _decode_unicode(source, charset, errors)
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
node = Parser(tokenize(u'\n'.join(source.splitlines()),
filename), filename).parse()
self.code = TemplateCodeGenerator(node, filename).getCode()
self.filename = filename
self.charset = charset
self.errors = errors
self.unicode_mode = unicode_mode
@classmethod
def from_file(cls, file, charset='utf-8', errors='strict',
unicode_mode=True):
"""Load a template from a file.
.. versionchanged:: 0.5
The encoding parameter was renamed to charset.
:param file: a filename or file object to load the template from.
:param charset: the charset of the template to load.
:param errors: the error behavior of the charset decoding.
:param unicode_mode: set to `False` to disable unicode mode.
:return: a template
"""
close = False
if isinstance(file, basestring):
f = open(file, 'r')
close = True
try:
data = _decode_unicode(f.read(), charset, errors)
finally:
if close:
f.close()
return cls(data, getattr(f, 'name', '<template>'), charset,
errors, unicode_mode)
def render(self, *args, **kwargs):
"""This function accepts either a dict or some keyword arguments which
will then be the context the template is evaluated in. The return
value will be the rendered template.
:param context: the function accepts the same arguments as the
:class:`dict` constructor.
:return: the rendered template as string
"""
ns = self.default_context.copy()
if len(args) == 1 and isinstance(args[0], utils.MultiDict):
ns.update(args[0].to_dict(flat=True))
else:
ns.update(dict(*args))
if kwargs:
ns.update(kwargs)
context = Context(ns, self.charset, self.errors)
exec self.code in context.runtime, context
return context.get_value(self.unicode_mode)
def substitute(self, *args, **kwargs):
"""For API compatibility with `string.Template`."""
return self.render(*args, **kwargs)
|
kdwink/intellij-community
|
refs/heads/master
|
python/testData/refactoring/unwrap/whileUnwrap_before.py
|
83
|
while True:
x =<caret> 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.