repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
beiko-lab/timeclust | ananke/_cluster.py | Python | gpl-3.0 | 14,320 | 0.004921 | import sys
import argparse
import multiprocessing
from functools import partial
from math import sqrt
import h5py as h5
import numpy as np
from scipy.sparse import vstack, coo_matrix
from scipy.stats.mstats import gmean
from sklearn.cluster import DBSCAN
from ._database import TimeSeriesData
# Calculate the slopes, mx
def calculate_slopes(matrix, time_points, mask):
"""Calculates the slopes (first order difference) of the time-series
matrix. If there are multiple time-series in the matrix, the mask array
defines the borders of these time-series. The inter-time-series slopes
are discarded as they are meaningless to the downstream distance measures.
Parameters
----------
matrix: np.matrix
The time-series matrix with samples/time-points as columns, sequences/
time-series as rows, and sequence count as the entries.
time_points: list or np.array
The time-points. Should be the same length as matrix.shape[1].
mask: list or np.array
A list or arbitrary types where each unique value represents a
time-series.
Returns
-------
slope_matrix: np.matrix
A matrix of size ngenes by nsamples - 1.
"""
time_points = np.array(time_points)
border = []
for i in range(len(mask) - 1):
border.append(mask[i] == mask[i + 1])
border = np.array(border)
time_difference = time_points[1:] - time_points[0:len(time_points) - 1]
time_difference = time_difference[border]
if (min(time_difference) <= 0):
raise ValueError("Minimum time difference is less than or equal to" \
" zero (may be caused by two consecutive samples" \
" with identical time points)")
slope_matrix = matrix[:, 1:] - matrix[:, 0:matrix.shape[1] - 1]
slope_matrix = slope_matrix[:, border]
slope_matrix = slope_matrix / time_difference
return slope_matrix
# Handles multithreading of STS distance matrix calculation
def generate_STS_distance_matrix(slope_matrix, nthreads=4):
"""Takes in the slope matrix and returns the distance matrix. Uses parallel
processing.
Parameters
----------
slope_matrix: np.matrix
Matrix of the time-series slopes, produced by calculate_slopes()
nthreads: int
Number of threads to use (default 4).
Returns
-------
sts_dist_matrix: np.matrix
Pair-wise STS distance matrix, size ngenes x ngenes.
"""
sts_dist_matrix = np.zeros(shape = (slope_matrix.shape[0],
slope_matrix.shape[0]),
dtype='float64')
nrows = slope_matrix.shape[0]
p = multiprocessing.Pool(nthreads)
partial_sts_matrix_generator = partial(sts_matrix_generator,
slope_matrix = slope_matrix)
print("Beginning parallel calculations on %d threads" % nthreads)
count = 1
for result in p.imap_unordered(partial_sts_matrix_generator,
range(0, nrows - 1), 1000):
ind = result[0]
dists = result[1].flatten()
sts_dist_matrix[ind, ind:] = dists
sts_dist_matrix[ind:, ind] = dists
count += 1
sys.stdout.write("\r%d/%d" % (count, nrows))
sys.stdout.flush()
p.close()
p.join()
return sts_dist_matrix
# Calculates short time-series distance
# somewhat efficiently
def sts_matrix_generator(ind, slope_matrix):
"""Work-horse function. Computes the short time-series (STS) distance for
an index, ind of the slope matrix.
Parameters
----------
ind: int
The index of the slope matrix that is being computed.
slope_matrix: np.matrix
The slope matrix.
Returns
-------
(ind, dists): ind is the index and dists is a np.matrix containing the
STS distances
"""
mx = slope_matrix[ind, :]
mv = slope_matrix[ind:, :]
mx_rep = np.vstack((mx,)*mv.shape[0])
diff = mx_rep - mv
diff = np.square(diff)
sts_squared = diff.sum(axis=1)
dists = np.sqrt(sts_squared)
return (ind, dists)
# DBSCAN from scikit learn
def cluster_dbscan(matrix, distance_measure="sts", eps=1):
"""Clusters the distance matrix for a given epsilon value, if distance
measure is sts. Other distance measures are: [‘cityblock’, ‘cosine’,
‘euclidean’, ‘l1’, ‘l2’, ‘manhattan’, ‘braycurtis’, ‘canberra’,
‘chebyshev’, ‘correlation’, ‘dice’, ‘hamming’, ‘jaccard’, ‘kulsinski’,
‘mahalanobis’, ‘matching’, ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’,
‘seuclidean’, ‘sokalmichener’, ‘sokalsneath’, ‘sqeuclidean’, ‘yule’]
Parameters
----------
matrix: np.matrix
The input matrix. If distance measure is sts, this should be the sts
distance matrix. If other distance, this should be the time-series
matrix of size ngenes x nsamples.
distance_measure: str
The distance measure, default is sts, short time-series distance.
Any distance measure available in scikit-learn is available here.
Note: multiple time-series is NOT supported for distances other than
"sts".
Returns
-------
cluster_labels: list of int
A list of size ngenes that defines cluster membership.
"""
if (distance_measure == "sts"):
dbs = DBSCAN(eps=eps, metric='precomputed', min_samples=2)
else:
dbs = DBSCAN(eps=eps, metric=distance_measure, min_samples=2)
cluster_labels = dbs.fit_predict(matrix)
return cluster_labels
def zscore(x):
"""Computes the Z-score of a vector x. Removes the mean and divides by the
standard deviation. Has a failback if std is 0 to return all zeroes.
Parameters
----------
x: list of int
Input time-series
Returns
-------
z: list of float
Z-score norm | alized time-series
"""
mean = np.mean(x)
sd = np.std(x)
if sd == 0:
z = np.zeros_like(x)
else:
z = (x - mean)/sd
return z
def normalize_simple(matrix, mask):
"""Normalizes a matrix by columns, and then by rows. With multiple
time-series, the data are normalized to the within-series total, not the
entire data set total.
Parameters
----------
matrix: np.matrix
Ti | me-series matrix of abundance counts. Rows are sequences, columns
are samples/time-points.
mask: list or np.array
List of objects with length matching the number of timepoints, where
unique values delineate multiple time-series. If there is only one
time-series in the data set, it's a list of identical objects.
Returns
-------
normal_matrix: np.matrix
Matrix where the columns (within-sample) have been converted to
proportions, then the rows are normalized to sum to 1.
"""
normal_matrix = matrix / matrix.sum(0)
normal_matrix[np.invert(np.isfinite(normal_matrix))] = 0
for mask_val in np.unique(mask):
y = normal_matrix[:, np.where(mask == mask_val)[0]]
y = np.apply_along_axis(zscore, 1, y)
normal_matrix[:, np.where(mask == mask_val)[0]] = y
del y
return normal_matrix
def normalize_clr(matrix, delta = 0.65, threshold = 0.5):
"""Normalizes a matrix by centre log ratio transform with zeros imputed
by the count zero multiplicative method from the zCompositions package
by Javier Palarea-Albaladejo and Josep Antoni Martin-Fernandez. Uses two
parameters, delta and threshold, identically to the zCompositions
implementation. This scheme is the same as used by the CoDaSeq R package.
Parameters
----------
matrix: np.matrix
Time-series matrix of abundance counts. Rows are sequences, columns
are samples/time-points.
delta: float
Fraction of the upper threshold used to impute zeros (default=0.65)
threshold: float
For a vector of counts, factor applied to the quotient 1 over the
number of trials (sum of the counts) used to produce an upper limit
for replacing zero counts by the CZM method (default=0.5).
Returns
- |
jmborr/confinedBSA | simulation/silica/amorphous_from_md/confineBSA/poretop/carve_silica.py | Python | mit | 11,095 | 0.003155 | #!/usr/bin/env/python
from __future__ import print_function
import MDAnalysis as mda
from MDAnalysis.analysis.distances import contact_matrix
import numbers
import operator
def contact_sample(siatom, sample, vdw_radii, overlap=1.0):
"""
Checks if atom of silica is in contact with the protein+water system
:param siatom: silica Atom, can be Si or O
:param sample: AtomGroup for the protein plus water
:param vdw_radii: dictionary of Van der Waals atomic radii
:param overlap: interpenetration between atoms, in Angstroms
:returns True if contact found
"""
ele1 = siatom.type
for ele2 in ('H', 'C', 'O', 'N', 'Si'):
cutoff = vdw_radii[ele1] + vdw_radii[ele2] - overlap
vals = {'type': ele2, 'x': si.position[0], 'y':si.position[1], 'z':si.position[2], 'co': cutoff}
neighbors = sample.select_atoms('type {type} and point {x} {y} {z} {co} '.format(**vals))
if len(neighbors):
return True # We found some sample atoms of type ele2 in contact with siatom
return False
class Node(object):
"""
Represents a neighboring silica atom plus additional
attributes for the carving process
"""
def __init__(self, network, index):
"""
:param network: NeighborNetwork containing this node
:param index: index in the list of atoms making up the network
"""
self.network = network
self.index = index
self.neighbors = list()
self.overlaps = False
self.removed = False
def __str__(self):
return 'Node "{}" at index {} wrapping atom {}'.format(self.type, self.index, self.atom)
@property
def atom(self):
return self.network.atom_group[self.index]
@property
def type(self):
return self.atom.type
@property
def max_nn(self):
if self.type == 'Si':
return 4
elif self.type == 'O':
return 2
def insert_neighbor(self, neighbor):
"""
Include neighbor if not already in the list, and if not
reached maximum neighbor limit. Also include self in
the neighbor's list of neighbors
:param neighbor:
:return True if inserted
"""
if len(self.neighbors) < self.max_nn:
if len(neighbor.neighbors) < neighbor.max_nn:
if neighbor not in self.neighbors:
self.neighbors.append(neighbor)
neighbor.neighbors.append(self)
def exclude_neighbor(self, neighbor):
"""
Severe the link between self and neighbor by mutually removing themselves
from the list of neighbors
:param neighbor: node to be excluded
"""
self.neighbors.remove(neighbor)
neighbor.neighbors.remove(self)
def retain_potential(self, querying_node):
"""
Potential for this node not to be removed
Scenario Power
len(neighbors)==1 overlaps 0
len(neighbors)==1 1
len(neighbors)==2 + overlaps + len(otherS.neighbors)==4 2
the following two could be reversed
len(neighbors)==2 + overlaps + len(otherS.neighbors)==3 3
len(neighbors)==2 + len(otherS.neighbors)==4 4
len(neighbors)==2 + len(otherS.neighbors)==3 5
type != 'O' TypeError
:return: (int) potential, the higher the more potential to be retained
:except: (TypeError) the node does not host an oxygen atom
"""
ng = self.neighbors # just a shortcut
if self.type != 'O':
raise TypeError('Node must be of type "O"')
potential = -1
if len(ng) == 1:
if self.overlaps:
potential = 0
else:
potential = 1
elif len(ng) == 2:
other_Si = ng[0] if ng[0] != querying_node else ng[1]
l = len(other_Si.neighbors)
if self.overlaps:
if l == 4:
potential = 2
elif l == 3:
potential = 3
else:
if l == 4:
potential = 4
elif l == 3:
potential = 5
if potential < 0:
raise RuntimeError('Could not assign a retain potential for {}'.format(self))
return potential
def attempt_removal(self):
"""
Check if this Si atom can be removed
:except: TypeError if type of the node is not Si
"""
if self.type != 'Si':
raise TypeError('Atempting to remove a node of type different than "S"')
# find the retain potential of the neighboring oxygen atoms
ngp = [ng.retain_potential(self) for ng in self.neighbors]
# sort neighbors using their retain potential, from lowest to highest
sorted_neighbors = [neig for (pot, neig) in sorted(zip(ngp, self.neighbors))]
# remove this Si node and associated oxygen(s) with smallest
# retain potential to this Si node
self.removed = True
n_o_remove = 2 # number of oxygens to be removed
n_o_remove = 1 if len(sorted_neighbors) == 3 else 2 # Some Si start with only three O neighbors
for i in range(n_o_remove):
o_node = sorted_neighbors[i]
o_node.removed = True
# We have to find the other Si atom bonded to this oxygen,
# and severe the link to this o_node we are removing
onn = o_node.neighbors
if len(onn)>1:
other_Si = onn[0] if onn[0] != self else onn[1] # Si neighbor other than self
o_node.exclude_neighbor(other_Si)
# The other oxygen atoms are not neighbors of this Si node anymore
for i in range(n_o_remove, len(sorted_neighbors)):
o_node = sorted_neighbors[i]
o_node.exclude_neighbor(self)
class SilicaNetwork(object):
"""
The neighbor_silica AtomGroup represented as a network of atoms in contact
"""
def __init__(self, atom_group):
self.atom_group = atom_group
self.nodes = [Node(self, i) for i in range(len(atom_group))]
self._ix2node={atom.index: self.nodes[i] for i, atom in enumerate(atom_group)}
def __len__(self):
return len(self.atom_group)
def __getitem__(self, item):
"""
Fetch a node
:param item: node index or atom
:return: Node object
"""
if isinstance(item, numbers.Integral):
return self.nodes[item]
elif isinstance(item, mda.core.groups.Atom):
return self._ix2node[item.index]
def set_connections(self, cutoff):
"""
Find which nodes are connected with the help of a contact matrix
:param cutoff: maximum distance for connecting two nodes
:return:
"""
xyz = self.atom_group.positions # cartesian coords of the atoms
cm = contact_matrix(xyz, cutoff=cutoff, returntype='sparse')
for i, indices in enumerate(cm.rows):
| node_a = self.nodes[i]
for j in indices:
if i == j:
continue
node_b = self.nodes[j]
node_a.in | sert_neighbor(node_b)
def get_state_i(self, state_attribute, invert=False, atype=None):
"""
:param state_attribute: attribute of node ('overlaps', or 'removed')
:param invert: consider the negative of the value of the state_attribute
:param atype: atom type, all types if None
:return: list of atom_group indices for nodes of type overlapping the sample
"""
indices = list()
for i, node in enumerate(self.nodes):
state = getattr(node, state_attribute)
if invert:
state = not state
if state:
if (not atype) or atype == node.type:
indices.append(i)
return in |
tensorflow/agents | tf_agents/environments/suite_mujoco_test.py | Python | apache-2.0 | 2,182 | 0.003666 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.environments.suite_mujoco."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import numpy as np
from tf_agents.environments import py_environment
from tf_agents.environments import suite_mujoco
from tf_agents.environments import wrappers
from tf_agents.utils import test_utils
class SuiteMujocoTest(test_utils.TestCase):
def setUp(self):
super(SuiteMujocoTest, self).setUp()
if not suite_mujoco.is_available():
self.skipTest('suite_mujoco is not available.')
def tearDown(self):
gin.clear_config()
super(SuiteMujocoTest, self).tearDown()
def testMujocoEnvRegistered(self):
env = suite_mujoco.load('HalfCheetah-v2')
self.assertIsInstance(env, py_environment.PyEnvironment)
self.assertIsInstance(env, wrappers.TimeLimit)
def testObservationSpec(self):
env = suite_mujoco.load('HalfCheetah-v2')
self.assertEqual(np.float32, env.observation_spec().dtype)
self.assertEqual((17,), env.observation_spec().shape)
def testActionSpec(self):
env = suite_mujoco.load('HalfCheetah-v2')
self.assertEqual(np.float32, env.action_spec().dtype)
self.asser | tEqual((6,), env.action_spec().shape)
def testGinConfig(self):
gin.parse_config_file(
test_utils.test_src_dir_path('environments/configs/suite_mujoco.gin')
)
env = suite_mujoco.load()
self.assertIsInstance(env, py_environment.PyEnvironment)
se | lf.assertIsInstance(env, wrappers.TimeLimit)
if __name__ == '__main__':
test_utils.main()
|
zuBux/homepage | app/views.py | Python | gpl-2.0 | 2,716 | 0.011046 | from flask import Flask, render_template, request, session, flash, redirect, url_for
from datetime import datetime
from models import Post, Category
from forms import PostForm, LoginForm
from app import app, db
import hashlib
@app.route('/')
def index():
return render_template('index.html', nodict={})
@app.route('/blog')
def blog():
cat = Category.query.get(1)
posts = cat.posts.order_by(-Post.pub_date)
first = posts.first()
older = posts[1:]
return render_template('blog.html', first=first, posts=older)
@app.route('/research')
def research():
return render_template('research.html', nodict={})
@app.route('/blog/post/<post_id>')
def view_post(post_id):
error = None
try:
post = Post.query.get(post_id)
older = Post.query.filter(Post.id < post_id)[:3]
except:
print "No such post"
return render_template('post.html', post=post, posts=older)
@app.route('/blog/post/add', methods=['GET', 'POST'])
def add_post():
if session['logged_in'] is not True:
return render_template("unauthorized.html", dict={})
form = PostForm()
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
cat_num = request.form['category']
cat = Category.query.get(cat_num)
new_post = Post(title, body, cat)
db.session.add(new_post)
db.session.commit()
return render_template("edit.html", action="Add", form=form)
@app.route('/blog/post/<post_id>/edit', methods=['GET', 'POST'])
def edit_post(post_id):
if session['logged_in'] is not True:
return render_template("unauthorized.html", dict={})
post = Post.query.get(post_id)
form = PostForm(obj=post)
if request.method == 'POST':
post.title = request.form['title']
post.body = request.form['body']
db.session.add(post)
db.session.commit()
return render_template("edit.html", action="Add", form=form)
@app.route('/about')
def about():
return render_template('about.html', nodict={})
# No user model in DB since we require only one user.No plaintext pass though
@app.route('/login', methods=['GET' | , 'POST'])
def login():
form = LoginForm()
error = None
if request.method == 'POST':
hash_pass = hashlib.sha256(request.form['password']).hexdigest()
print "got it"
| if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif hash_pass != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('blog'))
return render_template('login.html', error=error, form=form)
@app.route('/logout')
def logout():
session['logged_in'] = False
return redirect(url_for('index')) |
InterestingLab/elasticmanager | indices/urls.py | Python | mit | 77 | 0 | # from | django.conf.urls import url
# from . import views
url | patterns = [
]
|
asttra/pysces | setup.py | Python | bsd-3-clause | 9,707 | 0.023797 | #!/usr/bin/env python
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2015 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
__doc__ = "PySCeS: the Python Simulator for Cellular Systems setup file"
__version__ = '0.9.2'
import os, re
import fileinput
import ConfigParser
import shutil
try:
print 'Building an egg? %s.' % FRYING_EGGS
except:
FRYING_EGGS = False
try:
from numpy.distutils.core import setup, Extension
except Exception, ex:
print ex
print "PySCeS requires NumPy and SciPy 0.6x+\n"
os.sys.exit(-1)
########## User configuration section ##########
# Install extension modules (zero for skip module) - brett 20040310
pitcon = 1
##Special licence libraries see the relevant readme.txt file for details
nleq2 = 1 # pysces/nleq2/readme.txt
# this is now obsolete with nleq2 4.3
## nleq2_byteorder_override = 0 # allow a user supplied nleq2.f (mach_spec) otherwise PySCeS
use = re.split('\s+', os.getenv('PYSCES_USE', ''))
for e in use:
if e == 'pitcon':
pitcon = 1
elif e == 'nopitcon':
pitcon = 0
elif e == 'nleq2':
nleq2 = 1
elif e == 'nonleq2':
nleq2 = 0
# this is now obsolete with nleq2 4.3
## elif e.startswith('nleq2_byteorder='):
## nleq2_byteorder_override = int(e[16:])
# detects and uses IEEE fp big/little endian
### End user configuration section
########## From here on it's up to distutils ##########
# get the dir of setup.py
local_path = os.path.dirname(os.path.abspath(os.sys.argv[0]))
os.chdir(local_path)
myscripts = []
mydata_files = []
#add some model files into pscmodels
modfold = os.path.join(local_path, 'pysces', 'pscmodels')
mods = os.listdir(modfold)
alist = []
for x in mods:
if x[-4:] != '.psc':
pass
else:
alist.append(os.path.join(modfold,x))
mydata_files.append((os.path.join('pysces','pscmodels'), alist))
# Default configurations for the pyscfg.ini files
if os.sys.platform == 'win32':
#pysces-0.6.8-py2.5-win32.egg
if FRYING_EGGS:
eggdir = 'pysces-%s-py%s.%s-%s.egg' %(__version__, os.sys.version_info[0],\
os.sys.version_info[1], os.sys.platform)
installdir = os.path.join(os.sys.prefix,'lib','site-packages',eggdir,'pysces')
else:
installdir = os.path.join(os.sys.prefix,'lib','site-packages','pysces')
config = {
"install_dir" : installdir,
"model_dir" : "os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Pysces','psc')",
"output_dir" : "os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Pysces')",
"gnuplot_dir" : "None",
"silentstart" : 'False'
}
else:
if hasattr(os.sys, 'lib'):
lib = os.sys.lib
else:
lib = 'lib'
config = {
"install_dir" : os.path.join(os.sys.prefix,lib,"python%d.%d" % tuple(os.sys.version_info[:2]) ,'site-packages','pysces'),
"model_dir" : "os.path.join(os.path.expanduser('~'),'Pysces','psc')",
"output_dir" : "os.path.join(os.path.expanduser('~'),'Pysces')",
"gnuplot_dir" : "None",
"silentstart" : 'False'
}
def writeConfig(local_path, config={}):
cfgfile = open(os.path.join(local_path,'pysces','pyscfg.ini'),'w')
cp = ConfigParser.ConfigParser()
# PySCeS internal setup
cp.add_section('Pysces')
for key in config:
print `key` + ' :: ' + config[key]
cp.set('Pysces',key, config[key])
#add configuration data
cp.add_section('PyscesConfig')
cp.set('PyscesConfig','matplotlib', True)
cp.set('PyscesConfig','matplotlib_backend', 'TkAgg')
cp.set('PyscesConfig','gnuplot', False)
# Built in modules
cp.add_section('PyscesModules')
if pitcon:
cp.set('PyscesModules','pitcon', True)
else:
cp.set('PyscesModules','pitcon', False)
#PySCeS external module setup
cp.add_section('ExternalModules')
if nleq2:
cp.set('ExternalModules','nleq2', True)
mydata_files.append((os.path.join('pysces','nleq2'), [os.path.join(local_path,'pysces','nleq2','nleq2_readme.txt')]))
else:
cp.set('ExternalModules','nleq2', False)
mydata_files.append((os.path.join('pysces','nleq2'), [os.path.join(local_path,'pysces','nleq2','readme.txt')]))
cp.write(cfgfile)
cfgfile.close()
writeConfig(local_path,config)
print 'Default configuration file installed'
# my subpackage list
mypackages= ['pysces','pysces.tests','pysces.lib','pysces.pitcon',\
'pysces.sandbox', 'pysces.contrib','pysces.contrib.demo', 'pysces.core2',\
'pysces.kraken','pysces.kraken.controllers']
#PySCeS modules
mymodules = []
if pitcon:
print '\nBuilding pitcon'
extpath = os.path.join(local_path, 'pysces', 'pitcon')
pitcon = Extension('pysces.pitcon.pitcon',[os.path.join(extpath,'pitcon.pyf'),os.path.join(extpath,'p | con61subd.f'),os.path.join(extpath,'dpcon61.f'),os.path.join(extpath,'dpcon61w.f')])
mymodules.append(pitcon)
#mydata_files.append((os.path.join('pysces','pitcon'), [os.path.join(local_path, 'pysces', 'pitcon','readme.txt'), os.path.join(local_path, 'pysces', 'pitcon','readme.txt')]))
else:
print '\nSkipping pitcon'
if nleq2:
print '\nBuilding nleq2'
# this is now obsolete with nleq2 4.3 ... i hope !
## print 'System ByteOrder', os.sys.byteorder
## if | os.path.exists(os.path.join(local_path, 'pysces', 'nleq2','nleq2.f')) and nleq2_byteorder_override:
## print 'INFO: using user supplied nleq2.f'
## else:
## if os.sys.byteorder == 'little':
## shutil.copyfile(os.path.join(extpath,'nleq2_little.f'), os.path.join(extpath,'nleq2.f'))
## elif os.sys.byteorder == 'big':
## shutil.copyfile(os.path.join(extpath,'nleq2_big.f'), os.path.join(extpath,'nleq2.f'))
extpath = os.path.join(local_path, 'pysces', 'nleq2')
nleq2 = Extension('pysces.nleq2.nleq2',[os.path.join(extpath,'nleq2.pyf'),\
os.path.join(extpath,'nleq2.f'), os.path.join(extpath,'linalg_nleq2.f'),\
os.path.join(extpath,'zibmon.f'), os.path.join(extpath,'zibsec.f'),\
os.path.join(extpath,'zibconst.f'), os.path.join(extpath,'wnorm.f')
])
mymodules.append(nleq2)
mypackages.append('pysces.nleq2')
else:
print '\n'
if len(mymodules) == 0:
noext = Extension('None',[],None) # Not ideal but seems safe
mymodules.append(noext)
# Data files to copy
mydata_files.append((os.path.join('pysces'), [os.path.join(local_path,'pysces','pyscfg.ini')]))
mydata_files.append(('',[os.path.join(local_path,'pysces','pysces.pth')]))
mydata_files.append((os.path.join('pysces','docs'), [os.path.join(local_path,'pysces','docs','userguide.pdf')]))
mydata_files.append((os.path.join('pysces','examples'), [os.path.join(local_path,'pysces','examples',examplefile) for examplefile in os.listdir(os.path.join(local_path,'pysces','examples'))]))
##not sure if this is necessary anymore, removed to test
#if os.sys.platform == 'win32':
# mydata_files.append((os.path.join('pysces','win32'), [os.path.join(local_path,'pysces','win32','libquadmath-0.dll'), os.path.join(local_path,'pysces','win32','libgfortran-3.dll')]))
os.chdir(local_path)
# Install packages and the metatool binaries as "data"
setup(name="pysces",
version = __version__,
description = "The Python Simulator for Cellular Systems - simulation and analysis tools for modelling biological systems",
long_description = """
PySCeS is developed by the Triple-J Group for Molecular Cell Physiology
in order to try model and understand the complex processes and systems
which make up the living cell.
PySCeS features, amongst other things:
- A text based model description language.
- A structural analysis mod |
heraldmatias/dew | django-sunat/src/upc/sunat/admin.py | Python | gpl-2.0 | 532 | 0.011278 | __author__ = 'herald olivares'
# -*- coding: utf-8 -*-
fro | m django.contrib import admin
from upc.sunat.models import Person, Concept, Debt
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'ruc', 'phone', 'type')
class ConceptAdmin(adm | in.ModelAdmin):
pass
class DebtAdmin(admin.ModelAdmin):
list_display = ('concept', 'person', 'period', 'tax_code', 'resolution_number', 'amount')
admin.site.register(Person, PersonAdmin)
admin.site.register(Concept, ConceptAdmin)
admin.site.register(Debt, DebtAdmin)
|
petezybrick/iote2e | iote2e-pyclient/src/iote2epyclient/ws/loginvo.py | Python | apache-2.0 | 1,031 | 0.00485 | # Copyright 2016, 2017 Peter Zybrick and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Licens | e is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations u | nder the License.
"""
LoginVo
:author: Pete Zybrick
:contact: pzybrick@gmail.com
:version: 1.0.0
"""
class LoginVo(object):
'''
Login Value Object
'''
def __init__(self, loginName, passwordEncrypted, sourceName, optionalFilterSensorName=None ):
self.loginName = loginName
self.passwordEncrypted = passwordEncrypted
self.sourceName = sourceName
self.optionalFilterSensorName = optionalFilterSensorName
|
aldryn/aldryn-redirects | aldryn_redirects/__init__.py | Python | bsd-3-clause | 109 | 0 | # -*- cod | ing: | utf-8 -*-
__version__ = '1.3.7'
default_app_config = 'aldryn_redirects.apps.AldrynRedirects'
|
vgamula/sp | server/accounts/tests/test_views.py | Python | mit | 912 | 0 | from server.tests import BaseAsyncTestCase, unittest_run_loop
class AccountViewsTestCase(BaseAsyncTestCase):
@unittest_run_loop |
async def test_simple_test_view(self):
resp = await self.client.get('/test')
assert resp.status == 200
assert await resp.text() == 'Test response'
@unittest_run_loop
async def test_simple_test_view_1(self):
resp = await self.client.get('/test')
assert resp.status == 200
assert await resp.text() == 'Test response'
@unittest_run_loop
async def test_simple_test_view_2(self):
resp = await s | elf.client.get('/test')
assert resp.status == 200
assert await resp.text() == 'Test response'
@unittest_run_loop
async def test_simple_test_view_3(self):
resp = await self.client.get('/test')
assert resp.status == 200
assert await resp.text() == 'Test response'
|
bat-serjo/vivisect | vtrace/tests/test_expressions.py | Python | apache-2.0 | 851 | 0 | import vtrace.tests as vt_tests
breakpoints = {
'windows': 'ntdll.NtTerminateProcess',
'linux': 'libc.exit',
'freebsd': 'libc.exit',
}
class VtraceExpressionTest(vt_tests.VtraceProcessTest):
def test_vtrace_sym(self):
plat = self.trace.getMeta('Platform')
symname = breakpoints.get(plat)
entry = self.trace.parseExpression(symname)
addEntry = self.trace.parseExpression(symname + " + 5")
self.assertTrue(entry + 5 == addEntry)
def test_baselib(self):
plat = self.trace.getMeta('Platform')
libname = breakpoints.get(plat).split('.')[0]
entry = se | lf.trace.parseExpression(libname)
addEntry = self.trace.parseExpression(libname + " | + 5")
# grab a symbol in the library and compare offsets against that?
self.assertTrue(entry + 5 == addEntry)
|
QuLogic/meson | mesonbuild/coredata.py | Python | apache-2.0 | 52,406 | 0.003301 | # Copyright 2012-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import mlog, mparser
import pickle, os, uuid
import sys
from itertools import chain
from pathlib import PurePath
from collections import OrderedDict
from .mesonlib import (
MesonException, EnvironmentException, MachineChoice, PerMachine,
default_libdir, default_libexecdir, default_prefix, split_args,
OptionKey, OptionType,
)
from .wrap import WrapMode
import ast
import argparse
import configparser
import enum
import shlex
import typing as T
if T.TYPE_CHECKING:
from . import dependencies
from .compilers.compilers import Compiler, CompileResult # noqa: F401
from .environment import Environment
from .mesonlib import OptionOverrideProxy
OptionDictType = T.Union[T.Dict[str, 'UserOption[T.Any]'], OptionOverrideProxy]
KeyedOptionDictType = T.Union[T.Dict['OptionKey', 'UserOption[T.Any]'], OptionOverrideProxy]
CompilerCheckCacheKey = T.Tuple[T.Tuple[str, ...], str, str, T.Tuple[str, ...], str]
version = '0.58.999'
backendlist = ['ninja', 'vs', 'vs2010', 'vs2015', 'vs2017', 'vs2 | 019', 'xcode']
default_yielding = False
# Can't bind this near the class method it seems, sadly.
_T = T.TypeVar('_T')
class MesonVersionMismatchException(MesonException):
'''Build directory generated with Meson version is incompatible with current version'''
def __init__(self, old_version: str, current_version: str) -> None:
sup | er().__init__('Build directory has been generated with Meson version {}, '
'which is incompatible with the current version {}.'
.format(old_version, current_version))
self.old_version = old_version
self.current_version = current_version
class UserOption(T.Generic[_T]):
def __init__(self, description: str, choices: T.Optional[T.Union[str, T.List[_T]]], yielding: T.Optional[bool]):
super().__init__()
self.choices = choices
self.description = description
if yielding is None:
yielding = default_yielding
if not isinstance(yielding, bool):
raise MesonException('Value of "yielding" must be a boolean.')
self.yielding = yielding
def printable_value(self) -> T.Union[str, int, bool, T.List[T.Union[str, int, bool]]]:
assert isinstance(self.value, (str, int, bool, list))
return self.value
# Check that the input is a valid value and return the
# "cleaned" or "native" version. For example the Boolean
# option could take the string "true" and return True.
def validate_value(self, value: T.Any) -> _T:
raise RuntimeError('Derived option class did not override validate_value.')
def set_value(self, newvalue: T.Any) -> None:
self.value = self.validate_value(newvalue)
class UserStringOption(UserOption[str]):
def __init__(self, description: str, value: T.Any, yielding: T.Optional[bool] = None):
super().__init__(description, None, yielding)
self.set_value(value)
def validate_value(self, value: T.Any) -> str:
if not isinstance(value, str):
raise MesonException('Value "%s" for string option is not a string.' % str(value))
return value
class UserBooleanOption(UserOption[bool]):
def __init__(self, description: str, value, yielding: T.Optional[bool] = None) -> None:
super().__init__(description, [True, False], yielding)
self.set_value(value)
def __bool__(self) -> bool:
return self.value
def validate_value(self, value: T.Any) -> bool:
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise MesonException(f'Value {value} cannot be converted to a boolean')
if value.lower() == 'true':
return True
if value.lower() == 'false':
return False
raise MesonException('Value %s is not boolean (true or false).' % value)
class UserIntegerOption(UserOption[int]):
def __init__(self, description: str, value: T.Any, yielding: T.Optional[bool] = None):
min_value, max_value, default_value = value
self.min_value = min_value
self.max_value = max_value
c = []
if min_value is not None:
c.append('>=' + str(min_value))
if max_value is not None:
c.append('<=' + str(max_value))
choices = ', '.join(c)
super().__init__(description, choices, yielding)
self.set_value(default_value)
def validate_value(self, value: T.Any) -> int:
if isinstance(value, str):
value = self.toint(value)
if not isinstance(value, int):
raise MesonException('New value for integer option is not an integer.')
if self.min_value is not None and value < self.min_value:
raise MesonException('New value %d is less than minimum value %d.' % (value, self.min_value))
if self.max_value is not None and value > self.max_value:
raise MesonException('New value %d is more than maximum value %d.' % (value, self.max_value))
return value
def toint(self, valuestring: str) -> int:
try:
return int(valuestring)
except ValueError:
raise MesonException('Value string "%s" is not convertible to an integer.' % valuestring)
class OctalInt(int):
# NinjaBackend.get_user_option_args uses str() to converts it to a command line option
# UserUmaskOption.toint() uses int(str, 8) to convert it to an integer
# So we need to use oct instead of dec here if we do not want values to be misinterpreted.
def __str__(self):
return oct(int(self))
class UserUmaskOption(UserIntegerOption, UserOption[T.Union[str, OctalInt]]):
def __init__(self, description: str, value: T.Any, yielding: T.Optional[bool] = None):
super().__init__(description, (0, 0o777, value), yielding)
self.choices = ['preserve', '0000-0777']
def printable_value(self) -> str:
if self.value == 'preserve':
return self.value
return format(self.value, '04o')
def validate_value(self, value: T.Any) -> T.Union[str, OctalInt]:
if value is None or value == 'preserve':
return 'preserve'
return OctalInt(super().validate_value(value))
def toint(self, valuestring: T.Union[str, OctalInt]) -> int:
try:
return int(valuestring, 8)
except ValueError as e:
raise MesonException(f'Invalid mode: {e}')
class UserComboOption(UserOption[str]):
def __init__(self, description: str, choices: T.List[str], value: T.Any, yielding: T.Optional[bool] = None):
super().__init__(description, choices, yielding)
if not isinstance(self.choices, list):
raise MesonException('Combo choices must be an array.')
for i in self.choices:
if not isinstance(i, str):
raise MesonException('Combo choice elements must be strings.')
self.set_value(value)
def validate_value(self, value: T.Any) -> str:
if value not in self.choices:
if isinstance(value, bool):
_type = 'boolean'
elif isinstance(value, (int, float)):
_type = 'number'
else:
_type = 'string'
optionsstring = ', '.join([f'"{item}"' for item in self.choices])
raise MesonException('Value "{}" (of type "{}") for combo option "{}" is not one of the choices.'
' Possible choices are (as string): {}.'.format(
va |
jeremyletang/slut | slut.py | Python | unlicense | 9,623 | 0.006651 | #!/usr/bin/python
import requests
import argparse
import subprocess
import time
import json
import signal
import sys
import os
AdminToken=''
BackupFolderPath='./backup'
CookieFilePath='cookies.txt'
SavedFilesDB='.slut-bak.json'
TeamInfoDb='.team-bak.json'
LsDb='.ls-bak.json'
UserDb='.user-bak.json'
TeamName=''
should_exit=False
def signal_handler(signal, frame):
global should_exit
should_exit = True
print 'slut.py asked to exit, cleaning up'
def ls_db_path():
return '.'+TeamName+'/'+LsDb
def team_info_db_path():
return '.'+TeamName+'/'+TeamInfoDb
def saved_files_db_path():
return '.'+TeamName+'/'+SavedFilesDB
def backup_team_folder_path():
return BackupFolderPath+'/'+TeamName
def user_db_path():
return '.'+TeamName+'/'+UserDb
def ensure_team_folder_exists():
# if not exist create it
if not os.path.exists('.'+TeamName):
os.makedirs('.'+TeamName)
def make_request(request_str):
# try to get the data from the current page
r = requests.get(request_str)
if r.status_code != 200:
print 'cannot request slack information, check your token'
sys.exit(0)
# convert response into json dict
json = r.json()
# if not ok -> not enough access
if json['ok'] != True:
print 'valid request but not enough rights, check your token'
sys.exit(0)
return json
def get_files_for_page(current_page, max_pages):
print 'fetching files from page: {}/{}'.format(current_page, max_pages)
# generete the url for the request
request_str = 'https://slack.com/api/files.list?token={}&page={}'.format(AdminToken,current_page)
json = make_request(request_str)
# our files list
lst = []
# iterate over each files for the curent page
for f in json['files'] | :
lst.append(f)
return lst
def get_all_files_list(pages_count, should_update):
# if data already exist
if not should_update:
if os.path.exists(ls_db_path()):
with open(ls_db_path(), 'rb') a | s f:
j = json.loads(f.read())
return j
# else retrieve data
print 'retrieving list of all available files ({} pages)'.format(pages_count)
files = []
for p in range(1, pages_count+1):
if should_exit:
return []
files = files + get_files_for_page(p, pages_count)
# save in file
with open(ls_db_path(), 'wb') as outfile:
json.dump(files, outfile, indent=2)
return files
def get_team_name():
global TeamName
print 'retrieving team information:',
# if data already exist
if os.path.exists(team_info_db_path()):
with open(team_info_db_path(), 'rb') as f:
j = json.loads(f.read())
print '{}'.format(j['team']['name'])
return
# else
request_str = 'https://slack.com/api/team.info?token={}'.format(AdminToken)
json_value = make_request(request_str)
if json_value['ok'] != True:
print '\nvalid request but not enough rights, check your token'
sys.exit(0)
TeamName = json_value['team']['domain']
print '{}'.format(json_value['team']['name'])
ensure_team_folder_exists()
# save team info
with open(team_info_db_path(), 'wb') as outfile:
json.dump(json_value, outfile, indent=2)
def get_user_list():
# if data already exist
if os.path.exists(user_db_path()):
with open(user_db_path(), 'rb') as f:
j = json.loads(f.read())
return j
# else
request_str = 'https://slack.com/api/users.list?token={}'.format(AdminToken)
json_value = make_request(request_str)
if json_value['ok'] != True:
print 'valid request but not enough rights, check your token'
sys.exit(0)
ensure_team_folder_exists()
# save user info
with open(user_db_path(), 'wb') as outfile:
json.dump(json_value, outfile, indent=2)
return json_value
def get_pages_count():
# generete the url for the request
request_str = 'https://slack.com/api/files.list?token={}'.format(AdminToken)
json = make_request(request_str)
# if not ok -> not enough access
if json['ok'] != True:
print 'valid request but not enough rights, check your token'
sys.exit(0)
return int(json['paging']['pages'])
def parse_args():
# general parser
parser = argparse.ArgumentParser(description='Slack utilities')
subparsers = parser.add_subparsers(help='available commands')
# create the parser for the backup command
parser_backup = subparsers.add_parser('backup', help='backup all files from slack')
parser_backup.add_argument('backup_value', nargs='?', help='backup files from slack')
parser_backup.add_argument('--token', nargs='+', help='the token to use to launch requests')
parser_backup.add_argument('--cookies', nargs='+', help='path to the cookies to retrieve files')
parser_backup.add_argument('--output', nargs='+', help='path to save the files')
parser_backup.add_argument('--update', action='store_true', help='force update of the local db')
# create the parser for the rm command
parser_rm = subparsers.add_parser('rm', help='remove files from slack')
parser_rm.add_argument('rm_value', nargs='?', default='30', help='remove files from slack')
parser_rm.add_argument('--token', nargs='+', help='the token to use to launch requests')
parser_ls = subparsers.add_parser('ls', help='list files uploaded on slack')
parser_ls.add_argument('ls_value', nargs='?', default='30', help='list files uploaded slack')
parser_ls.add_argument('--update', action='store_true', help='force update of the local db')
parser_ls.add_argument('--token', nargs='+', help='the token to use to launch requests')
return parser.parse_args()
def get_saved_files():
if os.path.exists(saved_files_db_path()):
with open(saved_files_db_path(), 'rb') as f:
return json.loads(f.read())
return []
def save_files(files):
with open(saved_files_db_path(), 'wb') as outfile:
json.dump(files, outfile, indent=2)
def file_exist(files_list, f):
for e in files_list:
if e['name'] == f['name'] and e['saved_name'] == f['saved_name'] and e['id'] == f['id'] and e['path'] == f['path']:
return True
return False
def do_backup(files):
# build user specified folder if not exist
if not os.path.exists(BackupFolderPath):
os.makedirs(BackupFolderPath)
# build team folder if not exist
if not os.path.exists(backup_team_folder_path()):
os.makedirs(backup_team_folder_path())
# get list of already saved files
saved_files = get_saved_files()
file_cnt = len(files)
file_it = 1
# get all files
for f in files:
# check if user asked for exit
if should_exit == True:
break;
# get required datas
cur_f = {}
cur_f['name'] = f['name']
cur_f['saved_name'] = u'{}-{}'.format(f['timestamp'], f['name'])
cur_f['id'] = f['id']
cur_f['path'] = '{}/{}-{}'.format(backup_team_folder_path(), f['timestamp'], f['name'])
# file do not exist get + add it to the db
sys.stdout.write(u'{}/{} '.format(file_it, file_cnt))
sys.stdout.flush()
if not file_exist(saved_files, cur_f):
subprocess.call([
u'wget',
u'--no-verbose',
u'--load-cookies={}'.format(CookieFilePath),
u'--output-document={}/{}-{}'.format(backup_team_folder_path(), f['timestamp'], f['name']),
f['url_private']])
saved_files.append(cur_f)
# file exist do nothin
else:
print u'{}/{}-{} already exist.'.format(backup_team_folder_path(), f['timestamp'], f['name'])
file_it+=1
save_files(saved_files)
def user_name_from_id(user_id, users):
for user in users:
if user_id == user['id']:
return user['name']
return ""
def do_ls(files):
users = get_user_list()["members"]
for f in files:
uname = user_name_from_id(f['user'], users)
print u'{} {} {} {}'.form |
littleweaver/django-argus | argus/migrations/0006_auto_20140310_1718.py | Python | bsd-3-clause | 1,256 | 0.000796 | # encoding: utf8
from django.db import models, migrations
def copy_manualness(apps, schema_editor):
Transaction = apps.get_model("argus", "Transaction")
fractions = Transaction.objects.filter(split='manual',
share__fraction_is_manual=True)
fractions.update(split='percent')
amounts = Transaction.objects.filter(split='manual',
share__amount_is_manual=True)
amounts.update(split='amount')
class Migration(migrations.Migration):
dependencies = [
('argus', '0005_switch_to_numerator_denominator'),
]
operations = [
migrations.AlterField(
model_name='transaction',
name='split',
field=models.CharField(default='even', max_length=7, choices=[('simple', u'Simple payment'), ('even', u'Even split'), ('percent', u'Manual percentages'), ('amount', u'Manual amounts'), ('shares', u'Manual shares')]),
),
migrations.RunPython(copy_manualness),
migrations.RemoveField(
model_name='share',
name='amount_is_manual',
),
migrations.RemoveField(
| model_name='sha | re',
name='fraction_is_manual',
),
]
|
darkman66/langcodes | test_multithread.py | Python | mit | 758 | 0.011873 | # -*- coding: utf-8; -*-
"""
This file implements | testing ing langcodes module for multithreaded env
Problem is still there if you try to acccess that module from m | ultiple places at once
"""
import threading
from twisted.internet import reactor
from langcodes.tag_parser import parse_tag
from langcodes import standardize_tag
def parseMe(i, tag):
print i, parse_tag(tag)
def stopMe():
reactor.stop()
def startProcessing():
for i, tag in enumerate(('en_US', 'en', 'en_gb')):
#reactor.callInThread(parseMe, i, tag)
print '-'*10, parseMe(i, tag)
print "script will STOP working after 5s"
#reactor.callLater(5, stopMe)
#reactor.callInThread(startProcessing)
#reactor.run()
startProcessing()
print standardize_tag('eng_US')
|
eyaler/tensorpack | examples/Saliency/CAM-resnet.py | Python | apache-2.0 | 5,641 | 0.001595 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: CAM-resnet.py
import cv2
import sys
import argparse
import numpy as np
import os
import multiprocessing
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.tfutils import optimizer, gradproc
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.utils.gpu import get_num_gpu
from tensorpack.utils import viz
from imagenet_utils import (
fbresnet_augmentor, ImageNetModel)
from resnet_model import (
preresnet_basicblock, preresnet_group)
TOTAL_BATCH_SIZE = 256
DEPTH = None
class Model(ImageNetModel):
def get_logits(self, image):
cfg = {
18: ([2, 2, 2, 2], preresnet_basicblock),
34: ([3, 4, 6, 3], preresnet_basicblock),
}
defs, block_func = cfg[DEPTH]
with argscope(Conv2D, use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')), \
argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format='channels_first'):
convmaps = (LinearWrap(image)
.Conv2D('conv0', 64, 7, strides=2, activation=BNReLU)
.MaxPooling('pool0', 3, strides=2, padding='SAME')
.apply2(preresnet_group, 'group0', block_func, 64, defs[0], 1)
.apply2(preresnet_group, 'group1', block_func, 128, defs[1], 2)
.apply2(preresnet_group, 'group2', block_func, 256, defs[2], 2)
.apply2(preresnet_group, 'group3new', block_func, 512, defs[3], 1)())
print(convmaps)
convmaps = GlobalAvgPooling('gap', convmaps)
logits = FullyConnected('linearnew', convmaps, 1000)
return logits
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.1 | , trainable=False)
opt = tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
gradprocs = [gradproc.ScaleGradient(
[('co | nv0.*', 0.1), ('group[0-2].*', 0.1)])]
return optimizer.apply_grad_processors(opt, gradprocs)
def get_data(train_or_test):
# completely copied from imagenet-resnet.py example
isTrain = train_or_test == 'train'
datadir = args.data
ds = dataset.ILSVRC12(datadir, train_or_test, shuffle=isTrain)
augmentors = fbresnet_augmentor(isTrain)
augmentors.append(imgaug.ToUint8())
ds = AugmentImageComponent(ds, augmentors, copy=False)
if isTrain:
ds = PrefetchDataZMQ(ds, min(25, multiprocessing.cpu_count()))
ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)
return ds
def get_config():
dataset_train = get_data('train')
dataset_val = get_data('val')
return TrainConfig(
model=Model(),
dataflow=dataset_train,
callbacks=[
ModelSaver(),
PeriodicTrigger(InferenceRunner(dataset_val, [
ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]),
every_k_epochs=2),
ScheduledHyperParamSetter('learning_rate',
[(30, 1e-2), (55, 1e-3), (75, 1e-4), (95, 1e-5)]),
],
steps_per_epoch=5000,
max_epoch=105,
)
def viz_cam(model_file, data_dir):
ds = get_data('val')
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(model_file),
input_names=['input', 'label'],
output_names=['wrong-top1', 'group3new/bnlast/Relu', 'linearnew/W'],
return_input=True
)
meta = dataset.ILSVRCMeta().get_synset_words_1000()
pred = SimpleDatasetPredictor(pred_config, ds)
cnt = 0
for inp, outp in pred.get_result():
images, labels = inp
wrongs, convmaps, W = outp
batch = wrongs.shape[0]
for i in range(batch):
if wrongs[i]:
continue
weight = W[:, [labels[i]]].T # 512x1
convmap = convmaps[i, :, :, :] # 512xhxw
mergedmap = np.matmul(weight, convmap.reshape((512, -1))).reshape(14, 14)
mergedmap = cv2.resize(mergedmap, (224, 224))
heatmap = viz.intensity_to_rgb(mergedmap, normalize=True)
blend = images[i] * 0.5 + heatmap * 0.5
concat = np.concatenate((images[i], heatmap, blend), axis=1)
classname = meta[labels[i]].split(',')[0]
cv2.imwrite('cam{}-{}.jpg'.format(cnt, classname), concat)
cnt += 1
if cnt == 500:
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--data', help='ILSVRC dataset dir')
parser.add_argument('--depth', type=int, default=18)
parser.add_argument('--load', help='load model')
parser.add_argument('--cam', action='store_true', help='run visualization')
args = parser.parse_args()
DEPTH = args.depth
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
num_gpu = get_num_gpu()
BATCH_SIZE = TOTAL_BATCH_SIZE // num_gpu
if args.cam:
BATCH_SIZE = 128 # something that can run on one gpu
viz_cam(args.load, args.data)
sys.exit()
logger.auto_set_dir()
config = get_config()
if args.load:
config.session_init = get_model_loader(args.load)
launch_train_with_config(config, SyncMultiGPUTrainerParameterServer(num_gpu))
|
jasonleaster/Machine_Learning | SVM/svm.py | Python | gpl-2.0 | 9,610 | 0.004162 | """
Programmer : EOF |
E-mail : jasonleaster@163.com
File : svm.py
Date : 2015.12.13
You know ... It's hard time but it's not too bad to say give up.
"""
import numpy
class SVM:
def __init__(self, Mat, Tag, C = 2, MAXITER = 200):
self._Mat = numpy.array(Mat)
self._Tag = numpy.array(Tag).flatten()
self | .SampleDem = self._Mat.shape[0]
self.SampleNum = self._Mat.shape[1]
# Castiagte factor
self.C = C
# Each sample point have a lagrange factor
self.alpha = numpy.array([0.0 for i in range(self.SampleNum)])
# The expected weight vector which we want the machine to learn
self.W = numpy.array([0.0 for i in range(self.SampleDem)])
# intercept
self.b = 0.0
# Difference between the expected output and output of current machine
self.E = numpy.array([0.0 for i in range(self.SampleNum)])
self.Kernel = self.Linear_Kernel
# Bool value for sample point is a Supported Vector or not
self.SupVec = [False for i in range(self.SampleNum)]
# Points which are selected in current time.
self.P1 = None
self.P2 = None
#Max times for training SVM
self.MAXITER = MAXITER
"""
Linear Kernel which will compute the
inner product of point @i and @j. K(i,j)
"""
def Linear_Kernel(self, i, j):
summer = 0.0
for d in range(self.SampleDem):
summer += self._Mat[d][i] * self._Mat[d][j]
return summer
"""
Current output for sample point @i
"""
def G(self, i):
summer = 0.0
for j in range(self.SampleNum):
summer += self.alpha[j] * self._Tag[j] * self.Kernel(i,j)
summer += self.b
return summer
"""
update the cost for prediction when x-i(Mat[:, i]) as input.
where @i is not the index of current selected point(P1, P2).
"""
def updateE(self, i):
self.E[i] = self.G(i) - self._Tag[i]
"""
@findFirstVar() function will help us to find the first Variable
which's alpha value wanted to be updated. We return the index of
that point as @P1
"""
def findFirstVar(self):
firstPointIndex = None
b_KKTcond_Points = []
for i in range(self.SampleNum):
if i == self.P1 or i == self.P2:
continue
self.updateE(i)
for i in range(self.SampleNum):
if 0 < self.alpha[i] and self.alpha[i] < self.C:
if self.G(i) * self._Tag[i] != 1:
b_KKTcond_Points.append(i)
# if there is not point on the boundary break the KKT-condition
if len(b_KKTcond_Points) == 0:
for i in range(self.SampleNum):
if self.alpha[i] == 0 and self._Tag[i] * self.G(i) < 1:
b_KKTcond_Points.append(i)
elif self.alpha[i] == self.C and self._Tag[i] * self.G(i) > 1:
b_KKTcond_Points.append(i)
maxE = 0.0
for i in b_KKTcond_Points:
if abs(maxE) < abs(self.E[i]):
firstPointIndex = i
maxE = self.E[i]
return firstPointIndex
"""
Find the second variable which's alpha value want to be updated
"""
def findSecondVar(self, firstPointIndex):
secondPointIndex = None
val = 0
if self.E[firstPointIndex] < 0:
maxVal = self.E[firstPointIndex]
for i in range(self.SampleNum):
if self.E[i] > maxVal:
maxVal = self.E[i]
secondPointIndex = i
else:
minVal = self.E[firstPointIndex]
for i in range(self.SampleNum):
if self.E[i] < minVal:
minVal = self.E[i]
secondPointIndex = i
return secondPointIndex
"""
@optimal() function will update the alpha value of the
two selected points which could be indexed by @P1 and @P2.
@P1 and @P2 are index of the first selected point
and the second selected point. You can get the point
by self._Mat[:, P1] and self._Mat[:, P2]
@L : lowest boundary of current optimal problem
@H : highest boundary of current optimal problem
"""
def optimal(self, P1, P2):
if self._Tag[P1] != self._Tag[P2]:
k = self.alpha[P2] - self.alpha[P1]
L = max(0.0, k)
H = min(self.C, self.C + k)
else:
k = self.alpha[P2] + self.alpha[P1]
L = max(0.0, k - self.C)
H = min(self.C, k)
K11 = self.Kernel(P1, P1)
K22 = self.Kernel(P2, P2)
K12 = self.Kernel(P1, P2)
yita = K11 + K22 - 2*K12
old_alpha_P1 = self.alpha[P1]
old_alpha_P2 = self.alpha[P2]
# candidate for new alpha_2
new_alpha_unc_P2 = old_alpha_P2 + \
(self._Tag[P2] * (self.E[P1] - self.E[P2]) /yita)
if new_alpha_unc_P2 > H:
new_alpha_P2 = H
elif new_alpha_unc_P2 < L:
new_alpha_P2 = L
else:
new_alpha_P2 = new_alpha_unc_P2
new_alpha_P1 = old_alpha_P1 + self._Tag[P1] * self._Tag[P2] * \
(old_alpha_P2 - new_alpha_P2)
b_P1_new = - self.E[P1]\
- self._Tag[P1] * K11 * (new_alpha_P1 - old_alpha_P1) \
- self._Tag[P2] * K12 * (new_alpha_P2 - old_alpha_P2) \
+ self.b
b_P2_new = - self.E[P2] \
- self._Tag[P1] * K12 * (new_alpha_P1 - old_alpha_P1) \
- self._Tag[P2] * K22 * (new_alpha_P2 - old_alpha_P2) \
+ self.b
"""
Attention!
If there difference between the old alpha and the new alpha,
we should choose another P1 or P2. We DON'T need to drop ALL
two old selected point but anyone also will be ok.
"""
if new_alpha_P1 == self.alpha[P1] or new_alpha_P2 == self.alpha[P2]:
old_P1 = P1
old_P2 = P2
while P1 == P2 or (P1 == old_P1 and P2 == old_P2):
P1 = numpy.random.randint(self.SampleNum)
P2 = numpy.random.randint(self.SampleNum)
self.P1 = P1
self.P2 = P2
# optimal the alpha for selected P1 and P2 recusively.
self.optimal(P1, P2)
return
if 0 < new_alpha_P1 and new_alpha_P1 < self.C and \
0 < new_alpha_P2 and new_alpha_P2 < self.C:
if abs(b_P1_new - b_P2_new) > 0.01:
print "Attention! Maybe ERROR :( b1 == b2"
if new_alpha_P1 == 0 or new_alpha_P1 == self.C or \
new_alpha_P2 == 0 or new_alpha_P2 == self.C:
self.b = (b_P1_new + b_P2_new)/2
else:
self.b = b_P1_new
self.alpha[P1] = new_alpha_P1
self.alpha[P2] = new_alpha_P2
for i in range(self.SampleNum):
if 0 < self.alpha[i] and self.alpha[i] < self.C:
self.SupVec[i] = True
else:
self.SupVec[i] = False
"""
update the new cost value E-i for P1 and P2
"""
summer = 0.0
for j in range(self.SampleNum):
if self.SupVec[j] == True:
summer += self.alpha[j] * self._Tag[j] * self.Kernel(P1, j)
new_E_P1 = summer + self.b - self._Tag[P1]
summer = 0.0
for j in range(self.SampleNum):
if self.SupVec[j] == True:
summer += self.alpha[j] * self._Tag[j] * self.Kernel(P2, j)
new_E_P2 = summer + self.b - self._Tag[P2]
self.E[P1] = new_E_P1
self.E[P2] = new_E_P2
def train(self):
times = 0
while self.run_or_not():
times += 1
print "Training time:", times
if times == self.MAXITER:
break
P1 = self.findFirstVar()
P2 = self.findSecondVar(P1)
self.P1 = P1
self.P2 = P2
self.optimal(P1, P2)
"""
|
ecohealthalliance/EpiTator | epitator/structured_data_annotator.py | Python | apache-2.0 | 5,239 | 0.002481 | #!/usr/bin/env python
from __future__ import absolute_import
from .annotator import Annotator, AnnoTier, AnnoSpan
import re
import pyparsing as pypar
def word_token_regex(disallowed_delimiter):
return pypar.Regex(r"[^\s\n" + re.escape(disallowed_delimiter) + r"]+")
pypar.ParserElement.setDefaultWhitespaceChars(" \t")
table_parser = pypar.NoMatch()
table_cell_separators = ["|", "/", ","]
for separator in table_cell_separators:
value = pypar.Combine(
word_token_regex(separator) * (0, 10),
joinString=' ',
adjacent=False)
value.setParseAction(lambda start, tokens: (start, tokens[0]))
empty = pypar.Empty()
empty.setParseAction(lambda start, tokens: (start, tokens))
value = pypar.Group(value + empty)
row = pypar.Group(pypar.Optional(separator).suppress() +
(value + pypar.Literal(separator).suppress()) * (1, None) +
pypar.Optional(value) +
(pypar.StringEnd() | pypar.Literal("\n")).suppress() +
pypar.Optional("\n").suppress())
table_parser ^= (
(pypar.LineStart() + pypar.Optional(pypar.White())).suppress() +
# Allow line breaks for table headings
row + pypar.Optional(pypar.Regex(r"[\-_=]{3,}") + pypar.Literal("\n") * (1, 2)).suppress() +
row * (0, None)).setResultsName("delimiter:" + separator)
table_parser.parseWithTabs()
key_value_separators = [":", "-", ">"]
key_value_list_parser = pypar.NoMatch()
for separator in key_value_separators:
value = pypar.Combine(
word_token_regex(separator) * (1, 10),
joinString=' ',
adjacent=False)
value.setParseAction(lambda start, tokens: (start, tokens[0]))
empty = pypar.Empty()
empty.setParseAction(lambda start, tokens: (start, tokens))
value = pypar.Group(value + empty)
row = pypar.Group(value + pypar.Literal(separator).suppress() + value +
(pypar.StringEnd() | pypar.Literal("\n")).suppress() +
pypar.Optional("\n").suppress())
key_value_list_parser ^= (
(pypar.LineStart() + pypar.Optional(pypar.White())).suppr | ess() +
row * (2, None)).setResultsName("delimiter:" + separator)
key_value_list_parser.parseWithTabs()
class StructuredDataAnnotator(Annotator):
"""
Annotates tables and key value lists embedded in documents.
"""
def annotate(self, doc):
doc_text_len = len(doc.text)
def create_trimmed_annospan_for | _doc(start, end, label=None, metadata=None):
return AnnoSpan(
start,
min(doc_text_len, end),
doc,
label=label,
metadata=metadata).trimmed()
spans = []
value_spans = []
for token, start, end in table_parser.scanString(doc.text):
data = [[
create_trimmed_annospan_for_doc(value_start, value_end)
for ((value_start, value), (value_end, _)) in row] for row in token]
new_value_spans = [value for row in data for value in row]
# Skip tables with one row and numeric/empty columns since they are likely
# to be confused with unstructured text punctuation.
if len(data) == 1:
if len(new_value_spans) < 3:
continue
elif any(re.match(r"\d*$", value.text) for value in new_value_spans):
continue
# Skip tables with differing numbers of columns in each row
else:
row_lengths = sorted([len(row) for row in data])
# Determine the min and max difference between any two row lengths.
max_diff = row_lengths[-1] - row_lengths[0]
min_diff = max_diff
for row_len, next_row_len in zip(row_lengths, row_lengths[1:]):
len_diff = next_row_len - row_len
if len_diff < min_diff:
min_diff = len_diff
if min_diff > 0 and max_diff > 1:
continue
spans.append(create_trimmed_annospan_for_doc(start, end, "table", metadata={
"type": "table",
"data": data,
"delimiter": next(k.split("delimiter:")[1] for k in token.keys() if k.startswith("delimiter:"))
}))
value_spans += new_value_spans
for token, start, end in key_value_list_parser.scanString(doc.text):
data = {
create_trimmed_annospan_for_doc(key_start, key_end): create_trimmed_annospan_for_doc(value_start, value_end)
for (((key_start, key), (key_end, _)), ((value_start, value), (value_end, _2))) in token
}
spans.append(create_trimmed_annospan_for_doc(start, end, "keyValuePairs", metadata={
"type": "keyValuePairs",
"data": data,
"delimiter": next(k.split("delimiter:")[1] for k in token.keys() if k.startswith("delimiter:"))
}))
value_spans += data.values()
return {
'structured_data': AnnoTier(spans),
'structured_data.values': AnnoTier(value_spans)
}
|
kiddinn/plaso | tests/containers/windows_events.py | Python | apache-2.0 | 1,527 | 0.004584 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Windows event data attribute containers."""
import unittest
import uuid
from plaso.containers import windows_events
from tests import test_lib as shared_test_lib
class WindowsDistributedLinkTrackingEventDataTest(shared_test_lib.BaseTestCase):
"""Tests for the Windows distributed link event data attribute container."""
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
test_uuid = uuid.UUID(uuid.uuid1().hex)
attribute_container = (
windows_events.WindowsDistributedLinkTrackingEventData(test_uuid, None))
expected_attribute_names = [
'_event_data_stream_row_identifier', 'data_type', 'mac_ad | dress',
'origin', 'parser', 'uuid']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
class WindowsVolumeEventDataTest(shared_test_lib.BaseTestCase):
"""Tests for the Windows volume event data attribute container."""
def testGetAttributeNames(self):
"""Tests the GetAttribu | teNames function."""
attribute_container = windows_events.WindowsVolumeEventData()
expected_attribute_names = [
'_event_data_stream_row_identifier', 'data_type', 'device_path',
'origin', 'parser', 'serial_number']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
if __name__ == '__main__':
unittest.main()
|
ifearcompilererrors/fle_redesign | fle_redesign/apps/radpress/tests/__init__.py | Python | mit | 324 | 0 | from dja | ngo.conf import settings
from radpress.tests.base import BaseTest, RestructuredtextTest
from radpress.tests.md import MarkdownTest
if 'django.contrib.admin' in settings.INSTALLED_APPS:
from radpress.tests.admin import AdminTest
else:
print("`django.contrib.admin` is not installed, passed admin tests...")
| |
EDRN/labcas-backend | common/src/main/python/gov/nasa/jpl/edrn/labcas/client/examples/upload_hanash.py | Python | apache-2.0 | 1,674 | 0.015532 | # Example Python script to upload Hanash data
from gov.nasa.jpl.edrn.labcas.labcas_client import LabcasClient
if __name__ == '__main__':
# datasetId must match the directory name where the data is staged on the server: $LABCAS_STAGING/$datasetId
datasetId = 'FHCRCHanashAnnexinLamr'
labcasClient = LabcasClient()
# product type metadata (to be submitted as part of upload workflow)
metadata = {
# required
'DatasetName':'Autoantibody Biomarkers',
'ProtocolId':'138',
| 'ProtocolName':'Validation of Protein Markers for Lung Cancer Using CARET Sera and Proteomics Techniques',
'LeadPI':'Samir Hanash',
'DataCustodian':'Ji Qiu',
'DataCustodianEmail':'djiqiu@fhcrc.org',
'CollaborativeGroup':'Lung and Upper Aerodigestive',
'OwnerPrincipal':'/Samir/Hanash',
# optional
'OrganSite':'L | ung',
'SiteName':'Fred Hutchinson Cancer Research Center (Biomarker Developmental Laboratories)',
'SiteShortName':'FHCRC',
'QAState':'Accepted',
'PubMedId':'http://www.ncbi.nlm.nih.gov/pubmed/18794547',
'DateDatasetFrozen':'2007/05/29',
}
# upload dataset staged in directory 'mydata'
labcasClient.uploadDataset(datasetId, metadata)
# query the product types from the XML/RPC File Manager interface
labcasClient.getProductTypeByName(datasetId)
# list all products for given dataset == product type
labcasClient.listProducts(datasetId) |
gibil5/openhealth | models/order/__init__.py | Python | agpl-3.0 | 535 | 0.005607 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
#from . import report_order_line
#from . import | order_report_nex # Estado de Cuenta - Used by Patient - Moved
from . import report_sale_product
from . import order_admin
from . import ticket
from . import order
from . import order_business
from . import order_controller
from . import order_extra
from . import order_line
from . import order_line_pl
from . import payment_method
from . import payment_method_line
#from . import c | losing
from . import card
|
fpeder/pyXKin | xkin/calib_params.py | Python | bsd-2-clause | 1,484 | 0.003369 | #!/usr/bin/env python
# -*-: coding: utf-8 -*-
import numpy as np
depth_cal = np.array([5.9421434211923247e+02,
5.9104053696870778e+02,
3.3930780975300314e+02,
2.4273913761751615e+02,
-2.6386489753128833e-01,
9.9966832163729757e-01,
9.9966832163729757e-01,
5.0350940090814270e-03,
-1.3053628089976321e+00])
rgb_cal = np.array([5.2921508098293293e+02,
5.2556393630057437e+02,
3.2894272028759258e+02,
2.6748068171871557e+02,
2.6451622333009589e-01,
-8.3990749424620825e-01,
| -1.9922302173693159e-03,
1.4371995932897616e-03,
9.1192465078713847e-01])
T = np.array([1.9985242312092553e-02,
-7.4423738761617583e-04,
-1.0916736334336222e-02])
R = np.array([[9.9984628826577793e-01,
1.2635359098409581e-03,
-1.7487233004436643e-02],
[-1.4779096108364480e-03,
9.9992385683542895e-01,
-1.22513801076 | 79535e-02],
[1.7470421412464927e-02,
1.2275341476520762e-02,
9.9977202419716948e-01]])
calib = {'depth':depth_cal, 'rgb':rgb_cal, 'T':T, 'R':R}
import pickle
pickle.dump(calib, open('calib.pck', 'wb'))
|
h2oai/h2o-3 | h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_levels.py | Python | apache-2.0 | 683 | 0.01757 | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.utils.typechecks import assert_is_type
from random import randrange
import numpy as np
def | h2o_H2OFrame_levels():
"""
Python API test: h2o.frame.H2OFrame.levels()
"""
python_lists = np.random.randint(-2,2, (10000,2))
h2oframe = h2o.H2OFrame(python_obj=python_lists, column_types=['enum', 'enum'])
clist = h2oframe.levels()
assert_is_type(clist, list) # check return type
assert len(clist)==2, "h2o.H2OFrame.levels() command is not working." # check list length
pyunit_utils.standalone_test(h2o_H2 | OFrame_levels)
|
clemus90/competitive-programming | hackerRank/crackingTheCodingInterview/time_complexity_primality.py | Python | mit | 622 | 0.033762 | def checkPrime(primes, test):
i = 0
isPrime = True
while(i<= len(primes) | and primes[i]<= int(test ** (1/2))):
if(test % primes[i] == 0):
isPrime = False
break
i+=1
return isPrime
primes = [2]
i = 3
lastTest = int((2 * (10**9))**(1/2)) #Square Root of 2 * 10 ^9
#build an array of primes up to the lastTest
while(i<=lastTest):
if(checkPrime(primes, i)):
primes.append(i)
i+=1
n = int(input())
for i in range(n):
test = int(input())
if(test <= lastTest):
print("Prim | e" if test in primes else "Not prime")
else:
print("Prime" if checkPrime(primes, test) else "Not prime")
|
sagiss/sardana | src/sardana/taurus/qt/qtgui/extra_macroexecutor/sequenceeditor/model.py | Python | lgpl-3.0 | 14,281 | 0.002311 | #!/usr/bin/env python
##############################################################################
##
## This file is part of Sardana
##
## http://www.sardana-controls.org/
##
## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
## Sardana is free software: you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Sardana is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""
model.py:
"""
from lxml import etree
from taurus.external.qt import Qt
from sardana.taurus.core.tango.sardana import macro
class MacroSequenceTreeModel(Qt.QAbstractItemModel):
def __init__(self, parent=None):
Qt.QAbstractItemModel.__init__(self, parent)
self.columns = 4
self.setRoot(macro.SequenceNode())
self.headers = ["Macro", "Parameters", "Progress", "Pause"]
def root(self):
return self._root
def setRoot(self, root):
self._root = root
self.reset()
def clearSequence(self):
self.setRoot(macro.SequenceNode())
def isEmpty(self):
return len(self.root()) == 0
def flags(self, index):
column = index.column()
node = self.nodeFromIndex(index)
flags = Qt.Qt.ItemIsEnabled
if column == 0:
flags |= Qt.Qt.ItemIsSelectable
elif column == 1:
if isinstance(node, macro.SingleParamNode) and \
not node.type() == "User":
flags |= Qt.Qt.ItemIsEditable
else:
flags |= Qt.Qt.ItemIsSelectable
elif column == 2:
flags |= Qt.Qt.ItemIsSelectable
elif index.column() == 3:
flags |= (Qt.Qt.ItemIsSelectable | Qt.Qt.ItemIsEditable)
if isinstance(node, macro.MacroNode):
flags |= Qt.Qt.ItemIsDragEnabled
if node.isAllowedHooks():
flags |= Qt.Qt.ItemIsDropEnabled
return flags
def _insertRow(self, parentIndex, node=None, row=-1):
parentNode = self.nodeFromIndex(parentIndex)
if row == -1: row = len(parentNode)
if isinstance(parentNode, macro.RepeatParamNode):
if node == None: node = parentNode.newRepeat()
self.beginInsertRows(parentIndex, row, row)
row = parentNode.insertChild(node, row)
self.endInsertRows()
return self.index(row, 0, parentIndex)
def _removeRow(self, index):
"""This method is used remove macro (pased via index)"""
node = self.nodeFromIndex(index)
parentIndex = index.parent()
parentNode = self.nodeFromIndex(parentIndex)
row = parentNode.rowOfChild(node)
self.beginRemoveRows(parentIndex, row, row)
parentNode.removeChild(node)
self.endRemoveRows()
def _upRow(self, index):
node = self.nodeFromIndex(index)
parentIndex = index.parent()
parentNode = self.nodeFromIndex(parentIndex)
row = parentNode.rowOfChild(node)
self._removeRow(index)
newIndex = self._insertRow(parentIndex, node, row - 1)
if isinstance(parentNode, macro.RepeatParamNode):
parentNode.arrangeIndexes()
return newIndex
def _downRow(self, index):
node = self.nodeFromIndex(index)
parentIndex = index.parent()
parentNode = self.nodeFromIndex(parentIndex)
row = parentNode.rowOfChild(node)
self._removeRow(index)
newIndex = self._insertRow(parentIndex, node, row + 1)
if isinstance(parentNode, macro.RepeatParamNode):
parentNode.arrangeIndexes()
return newIndex
def _leftRow(self, index):
"""This method is used to move selected macro (pased via index)
to it's grandparent's hook list. In tree representation it basically move macro to the left"""
node = self.nodeFromIndex(index)
parentIndex = index.parent()
grandParentIndex = parentIndex.parent()
self._removeRow(index)
return self._insertRow(grandParentIndex, node)
def _rightRow(self, index):
"""This method is used to move selected macro (pased via index)
to it's grandparent's hook list. In tree representation it basically move macro to the left"""
node = self.nodeFromIndex(index)
parentIndex = index.parent()
row = index.row()
self._removeRow(index)
newParentIndex = self.index(row, 0, parentIndex)
return self._insertRow(newParentIndex, node)
def rowCount(self, parent):
branchNode = self.nodeFromIndex(parent)
return len(branchNode)
def columnCount(self, parent):
return self.columns
def data(self, index, role):
if role == Qt.Qt.DisplayRole:
node = self.nodeFromIndex(index)
if index.column() == 0:
return Qt.QVariant(node.name())
elif index.column() == 1:
return Qt.QVariant(str(node.value()))
elif index.column() == 2:
if isinstance(node, macro.MacroNode):
return Qt.QVariant(node.progress())
elif role == Qt.Qt.DecorationRole:
node = self.nodeFromIndex(index)
if index.column() == 3:
if isinstance(node, macro.MacroNode):
if node.isPause():
return Qt.QVariant(Qt.QIcon(":/actions/media-playback-pause.svg"))
return Qt.QVariant()
def setData (self, index, value, role=Qt.Qt.EditRole):
node = self.nodeFromIndex(index)
if index.column() == 1:
if isinstance(node, macro.SingleParamNode):
node.setValue(Qt. | from_qvariant(value, str)) |
self.emit(Qt.SIGNAL("dataChanged(QModelIndex,QModelIndex)"), index, index)
while True:
index = index.parent()
node = self.nodeFromIndex(index)
if isinstance(node, macro.MacroNode):
self.emit(Qt.SIGNAL("dataChanged(QModelIndex,QModelIndex)"), index, index.sibling(index.row(), self.columnCount(index) - 1))
break
elif index.column() == 2:
progress = Qt.from_qvariant(value, float)
node.setProgress(progress)
self.emit(Qt.SIGNAL("dataChanged(QModelIndex,QModelIndex)"), index, index)
elif index.column() == 3:
node.setPause(Qt.from_qvariant(value, bool))
self.emit(Qt.SIGNAL("dataChanged(QModelIndex,QModelIndex)"), index, index)
return True
def headerData(self, section, orientation, role):
if orientation == Qt.Qt.Horizontal and role == Qt.Qt.DisplayRole:
return Qt.QVariant(self.headers[section])
return Qt.QVariant()
def index(self, row, column, parent):
assert self.root() is not None
branchNode = self.nodeFromIndex(parent)
assert branchNode is not None
return self.createIndex(row, column, branchNode.child(row))
def parent(self, child):
node = self.nodeFromIndex(child)
if node is None:
return Qt.QModelIndex()
parent = node.parent()
if parent is None:
return Qt.QModelIndex()
grandparent = parent.parent()
if grandparent is None:
return Qt.QModelIndex()
row = grandparent.rowOfChild(parent)
assert row != -1
return self.createIndex(row, 0, parent)
def nodeFromIndex(self, index):
if index.isValid():
return index.internalPointer()
else:
return self.root()
def toXmlString(self, prett |
er432/TASSELpy | TASSELpy/test/net/maizegenetics/analysis/association/associationTestSuite.py | Python | bsd-3-clause | 472 | 0.006356 | import unittest
from TASSELpy.TASSELbridge import TASSELbridge
from TASSELpy.test.net.maizegenetics.analysis.association.FixedEffectLMPlugin import easy_GLMTest
class associationTestSuite(unittest.TestSuite):
def __init__(self):
| super(associationTestSuite, self).__init__()
self.addTest(unittest.makeSuite(easy_GLMTest))
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(association | TestSuite())
TASSELbridge.stop()
|
yunlzheng/tomatodo | tt/application.py | Python | mit | 1,854 | 0.001618 | # coding: utf-8
import os
from os.path import abspath, dirname
import tornado.web
import tornado.httpserver
im | port tornado.ioloop
from tornado.log import app_log
from | tornado.options import define, options
from mongoengine import connect
from tt.handle import MainHandler, MongoBackboneHandler, LoginHandler, RegisterHandler, LogoutHandler
PROJECT_DIR = dirname(dirname(abspath(__file__)))
TEMPLATE_DIR = os.path.join(PROJECT_DIR, 'templates')
STATIC_DIR = os.path.join(PROJECT_DIR, 'static')
CONF_DIR = os.path.join(PROJECT_DIR, 'conf')
CONF_FILE = CONF_DIR+os.path.sep+"application.conf"
define("debug", default=True, type=bool)
define("port", default=8181, type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = {
(r'/', MainHandler),
(r'/sigin', LoginHandler),
(r'/sigup', RegisterHandler),
(r'/sigout',LogoutHandler),
(r'/rest/([a-z]+)', MongoBackboneHandler),
(r'/rest/([a-z]+)/(.+)', MongoBackboneHandler)
}
settings = dict(
template_path=TEMPLATE_DIR,
static_path=STATIC_DIR,
login_url="/sigin",
register_url='/sigup',
logout_url='/sigout',
debug=options.debug,
cookie_secret="123456"
)
connect('test', host="mongodb://localhost:27017")
tornado.web.Application.__init__(self, handlers, **settings)
def run():
tornado.options.parse_command_line()
tornado.options.parse_config_file(CONF_FILE)
port = os.environ.get("PORT", options.port)
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(port)
app_log.info("application run on {0}".format(port))
tornado.ioloop.IOLoop.instance().start() |
pi19404/robosub-1 | src/movement/physical/fuzzy_logic_defuzzifier.py | Python | gpl-3.0 | 1,604 | 0.004364 | # COPYRIGHT: Robosub Club of the Palouse under the GPL v3
import argparse
import time
import os
import sys
| from copy import deepcopy
from random import random
sys.path.append(os.path.abspath("../.."))
from util.communication.grapevine import Communicator
# TODO: This module should take the fuzzy sets produced by
# movement/stabilization and should translate them into raw digital
# values that can be sent over the serial interface.
# microcontroller_interface.py currently does much of this processing,
# | but it shouldn't. microcontroller_interface.py should figure out how
# to send data over the serial interface and how to receive data over
# the serial interface. Anything that is beyond that scope, such as
# translating a magnitude into a raw value, should be moved into this
# module.
def main(args):
com = Communicator("movement/physical")
last_packet_time = 0.0
while True:
rx_packet = com.get_last_message("movement/stabilization")
if rx_packet and rx_packet['timestamp'] > last_packet_time:
last_packet_time = rx_packet['timestamp']
tx_packet = {
'vector': rx_packet['vector'],
'rotation': rx_packet['rotation']}
com.publish_message(tx_packet)
time.sleep(args.epoch)
def commandline():
parser = argparse.ArgumentParser(description='Mock module.')
parser.add_argument('-e', '--epoch', type=float,
default=0.05,
help='Sleep time per cycle.')
return parser.parse_args()
if __name__ == '__main__':
args = commandline()
main(args)
|
isudox/leetcode-solution | python-algorithm/leetcode/problem_38.py | Python | mit | 1,320 | 0 | """38. Count and Say
https://leetcode.com/problems/count-and-say/description/
The count-and-say sequen | ce is the sequence of integers with the first five
terms as following:
1. 1
2. 11
3. 21
4. 1211
5. 111221
1 is read off as " | one 1" or 11.
11 is read off as "two 1s" or 21.
21 is read off as "one 2, then one 1" or 1211.
Given an integer n where 1 ≤ n ≤ 30, generate the n^th term of the
count-and-say sequence.
Note: Each term of the sequence of integers will be represented as a
string.
Example 1:
Input: 1
Output: "1"
Example 2:
Input: 4
Output: "1211"
"""
class Solution:
def count_and_say(self, n: int) -> str:
assert 1 <= n <= 30
if n == 1:
return "1"
def say(num_str: str) -> str:
res = ""
cur_digit = num_str[0]
cur_digit_count = 1
for i in range(1, len(num_str)):
if num_str[i] == cur_digit:
cur_digit_count += 1
else:
res += str(cur_digit_count) + cur_digit
cur_digit = num_str[i]
cur_digit_count = 1
res += str(cur_digit_count) + cur_digit
return res
ans = "1"
for i in range(1, n):
ans = say(ans)
return ans
|
imiolek-ireneusz/eduActiv8 | game_boards/game017.py | Python | gpl-3.0 | 14,410 | 0.002984 | # -*- coding: utf-8 -*-
import math
import os
import pygame
import random
import sys
import classes.board
import classes.extras as ex
import classes.game_driver as gd
import classes.level_controller as lc
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self, mainloop, 1, 1)
gd.BoardGame.__init__(self, mainloop, speaker, config, screen_w, screen_h, 11, 9)
def create_game_objects(self, level=1):
self.allow_unit_animations = False
self.board.draw_grid = False
hue = random.randrange(0, 225)
card_font_color = ex.hsv_to_rgb(hue, 255, 140)
arrow_color = ex.hsv_to_rgb(hue, 200, 200)
font_color2 = ex.hsv_to_rgb(hue, 255, 50)
outline_color2 = (255, 102, 0)
if self.mainloop.scheme is not None:
card_color = self.mainloop.scheme.u_color
else:
card_color = (255, 255, 255)
if self.lang.lang == 'fr':
alc = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z']
uc = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']
else:
alc = self.lang.alphabet_lc
if self.lang.has_uc:
uc = self.lang.alphabet_uc
self.abc_len = len(alc)
h = int(math.ceil(self.abc_len / 3.0))
data = [16, h]
# stretch width to fit the screen size
x_count = self.get_x_count(data[1], even=True)
if x_count < 16:
data[0] = 16
else:
data[0] = x_count
self.data = data
self.card_font_size_top = 0
if self.mainloop.lang.lang == "lkt":
self.card_font_size_top = 1
self.vis_buttons = [0, 0, 0, 0, 1, 0, 1, 0, 0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.layout.update_layout(data[0], data[1])
scale = self.layout.scale
self.board.level_start(data[0], data[1], scale)
self.unit_mouse_over = None
self.units = []
self.board.board_bg.update_me = True
self.board.board_bg.line_color = (20, 20, 20)
self.base26 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z']
self.font_size = 17
self.word_list = self.lang.d['abc_flashcards_word_sequence']
self.pword_list = self.lang.dp['abc_flashcards_word_sequence']
self.frame_flow = self.lang.d['abc_flashcards_frame_sequence']
if self.lang.lang == "el":
self.font_size = 16
if self.lang.ltr_text:
x = 0
else:
x = data[0] - 2
y = 0
label_color = ex.hsv_to_rgb(hue, self.mainloop.cl.bg_color_s, self.mainloop.cl.bg_color_v)
font_color = [ex.hsv_to_rgb(hue, self.mainloop.cl.font_color_s, self.mainloop.cl.font_color_v), ]
fg_tint_color = ex.hsv_to_rgb(hue, self.mainloop.cl.fg_hover_s, self.mainloop.cl.fg_hover_v)
self.bg_color_active = ex.hsv_to_rgb(hue, 200, 255)
self.bg_color_done = ex.hsv_to_rgb(hue, 50, 255)
if self.mainloop.scheme is None: |
dc_img_src = os.path.join('unit_bg', "universal_r2x1_dc.png")
else:
dc_img_src = None
| if self.mainloop.scheme.dark:
self.bg_color_active = ex.hsv_to_rgb(hue, 255, 200)
self.bg_color_done = ex.hsv_to_rgb(hue, 255, 55)
bg_img_src = os.path.join('unit_bg', "universal_r2x1_bg_s150.png")
for i in range(self.abc_len):
if self.lang.has_uc:
caption = uc[i] + alc[i]
else:
caption = alc[i]
self.board.add_universal_unit(grid_x=x, grid_y=y, grid_w=2, grid_h=1, txt=caption,
fg_img_src=bg_img_src,
bg_img_src=bg_img_src,
dc_img_src=dc_img_src,
bg_color=(0, 0, 0, 0),
border_color=None, font_color=font_color,
bg_tint_color=label_color,
fg_tint_color=fg_tint_color,
txt_align=(0, 0), font_type=1, multi_color=False, alpha=True,
immobilized=True,
fg_as_hover=True)
self.units.append(self.board.ships[-1])
y += 1
if y >= data[1]:
if i > 2 * data[1] - 2:
if self.lang.ltr_text:
x = 4
else:
x = data[0] - 6
y = 0
else:
if self.lang.ltr_text:
x = 2
else:
x = data[0] - 4
y = 0
if self.lang.ltr_text:
x = (data[0] - 4 + 3 + 3) // 2
else:
x = (data[0] - 10) // 2
if self.lang.has_cursive:
y = 1
else:
y = 2
# Card
if self.lang.has_uc:
w = 8
xd = 0
else:
w = 6
xd = 1
if self.lang.has_uc:
img_plus = 0
self.board.add_unit(x, y, 2, 1, classes.board.Label, uc[0], card_color, "", self.card_font_size_top)
if self.lang.has_cursive:
self.board.add_unit(x - 2, y + 1, 2, 3, classes.board.Label, uc[0], card_color, "", self.font_size)
self.board.add_unit(x + 2 - xd, y, 2, 1, classes.board.Label, alc[0], card_color, "", self.card_font_size_top)
if self.lang.has_cursive:
self.board.add_unit(x + 4 - xd, y + 1, 2, 3, classes.board.Label, alc[0], card_color, "",
self.font_size)
else:
img_plus = 1
if self.lang.has_cursive:
self.board.add_unit(x + 1 - xd, y, 2, 1, classes.board.Label, alc[0], card_color, "", self.card_font_size_top)
self.board.add_unit(x + 3 - xd, y, 2, 1, classes.board.Label, alc[0], card_color, "", self.font_size)
else:
self.board.add_unit(x + 2 - xd, y, 2, 1, classes.board.Label, alc[0], card_color, "", self.card_font_size_top)
# frame size 288 x 216
img_src = os.path.join('fc', "fc%03i.jpg" % self.frame_flow[0])
self.board.add_unit(x - xd + img_plus, y + 1, 4, 3, classes.board.ImgShip, self.word_list[0], card_color,
img_src)
self.board.ships[-1].speaker_val = self.pword_list[0]
self.board.ships[-1].speaker_val_update = False
# TO DO adjust for color schemes
font_colors = ((200, 0, 0), font_color2)
if self.mainloop.scheme is not None:
if self.mainloop.scheme.dark:
font_colors = (self.mainloop.scheme.u_font_color3, self.mainloop.scheme.u_font_color)
if self.lang.ltr_text:
self.board.add_unit(x - 2 + xd, y + 4, w, 1, classes.board.MultiColorLetters, self.word_list[0], card_color,
"", 2)
self.board.ships[-1].set_font_colors(font_colors[0], font_colors[1])
else:
self.board.add_unit(x - 2 + xd, y + 4, w, 1, classes.board.Letter, self.word_list[0], card_color, "", 2)
self.board.ships[-1].speaker_val = self.pword_list[0]
self.board.ships[-1].speaker_val_update = False
if self.lang.has_cursive:
if self.lang.ltr_text:
self.board.add_unit(x - 2 + xd, y + 5, w, 2, classes.board.MultiColorLetters, self.word_list[0],
card_color, "", self.font_size)
self.board.ships[-1].set_font_colors(font_colors[0], font_colors[1])
|
johanesmikhael/ContinuityAnalysis | slice_visualization_ui.py | Python | mit | 1,409 | 0.002129 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'slice_visualization.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_slice_visualization_gui(object):
def setupUi(self, slice_visualization_gui):
slice_visualization_gui.setObjectName("slice_visualization_gui")
slice_visualization_gui.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(slice_visualization_gui)
self.centralwidget.setObjectName("centralwidget")
slice_visualization_gui.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(slice_visualization_gui)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
slice_visualization_gui.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(slice_visualization_gui)
| self.statusbar.setObjectName("statusbar")
slice_visualization_gui.setStatusBar(self.statusbar)
self.retranslateUi(slice_visualization_gui)
QtCore.QMetaObject.connectSlotsByName(sli | ce_visualization_gui)
def retranslateUi(self, slice_visualization_gui):
_translate = QtCore.QCoreApplication.translate
slice_visualization_gui.setWindowTitle(_translate("slice_visualization_gui", "MainWindow"))
|
mclois/iteexe | twisted/internet/_posixserialport.py | Python | gpl-2.0 | 2,116 | 0.016541 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Serial Port Protocol
"""
# system imports
import os, errno
# dependent on pyserial ( http://pyserial.sf.net/ )
# only tested w/ 1.18 (5 Dec 2002)
import serial
from serial import PARITY_NONE, PARITY_EVEN, PARITY_ODD
from serial import STOPBITS_ONE, STOPBITS_TWO
from serial import FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS
from serialport import BaseSerialPort
# twisted imports
from twisted.internet import abstract, fdesc, main
class SerialPort(BaseSerialPort, abstract.FileDescriptor):
"""A select()able serial device, acting as a transport."""
connected = 1
def __init__(self, protocol, deviceNameOrPortNumber, reactor,
baudrate = 9600, bytesize = EIGHTBITS, parity = PARITY_NONE,
stopbits = STOPBITS_ONE, timeout = 0, xonxoff = 0, rtscts = 0):
abstract.FileDescriptor.__init__(self, reactor)
self._serial = serial.Serial(deviceNameOrPortNumber, baudrate = baudrate, bytesize = bytesize, parity = parity, stopbits = stopbits, timeout = timeout, xonxoff = xonxoff, rtscts = rtscts)
self.reactor = reactor
self.flushInput()
self.flushOutput()
self.protocol = protocol |
self.protocol.makeConnection(self)
self.startReading()
def fileno(self):
return self._serial.fd
def writeSomeData(self, data):
"""Write some data to the serial device.
"""
try:
return os.write(self.fileno(), data)
except IOError, io:
if io.args[0] == errno.EAGAIN:
return 0
return main.CONNECTION_LOST
| except OSError, ose:
if ose.errno == errno.EAGAIN:
# I think most systems use this one
return 0
raise
def doRead(self):
"""Some data's readable from serial device.
"""
return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived)
def connectionLost(self, reason):
abstract.FileDescriptor.connectionLost(self, reason)
self._serial.close()
|
Bugfry/exercises | exercism/python/rna-transcription/dna.py | Python | mit | 129 | 0.015504 | def to_rna(strain):
mapping = {"G": "C" | , "C": "G", "A": "U", "T": "A"}
return "".join(map(lambda c: ma | pping.get(c), strain))
|
Firefly-Automation/Firefly | Firefly/automation/nest_eco_window/metadata.py | Python | apache-2.0 | 862 | 0.00348 | AUTHOR = 'Zachary Priddy. (me@zpriddy.com)'
TITLE = 'Nest Eco Window'
METADATA = {
'title': TITLE,
'author': AUTHOR,
'commands': [],
'interface': {
'devices': {
"windows": {
'context': 'Windows that will trigger this automation.',
'type': 'deviceList',
'filter': {
| 'request': ['contact']
}
},
},
'send_messages': {
"send": {
'context': 'Send message when chaning the mode of the Nest.',
'type': 'boolean'
}
},
'delays': | {
'delayed': {
'context': 'Time to delay after window closes before changing Nest mode. (seconds)',
'type': 'number'
},
'initial': {
'context': 'Time to delay after window opens before changing Nest mode. (seconds)',
'type': 'number'
}
}
}
} |
Jokeren/neon | neon/visualizations/data.py | Python | apache-2.0 | 7,474 | 0.002944 | # ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from __future__ import division
from builtins import str
import h5py
import numpy as np
def create_minibatch_x(minibatches, minibatch_markers, epoch_axis):
"""
Helper function to build x axis for data captured per minibatch.
Arguments:
minibatches (int): how many total minibatches
minibatch_markers (int array): cumulative number of minibatches complete at a given epoch
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
"""
if epoch_axis:
x = np.zeros((minibatches,))
last_e = 0
for e_idx, e in enumerate(minibatch_markers):
e_minibatches = e - last_e
x[last_e:e] = e_idx + (np.arange(float(e_minibatches)) / e_minibatches)
last_e = e
else:
x = np.arange(minibatches)
return x
def create_epoch_x(points, epoch_freq, minibatch_markers, epoch_axis):
"""
Helper function to build x axis for points captured per epoch.
Arguments:
points (int): how many data points need a corresponding x axis points
epoch_freq (int): are points once an epoch or once every n epochs?
minibatch_markers (int array): cumulative number of minibatches complete at a given epoch
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
"""
if epoch_axis:
x = np.zeros((points,))
last_e = 0
for e_idx, e in enumerate(minibatch_markers):
e_minibatches = e - last_e
if (e_idx + 1) % epoch_freq == 0:
x[e_idx // epoch_freq] = e_idx + ((e_minibatches - 1) // e_minibatches)
last_e = e
else:
x = minibatch_markers[(epoch_freq - 1)::epoch_freq] - 1
return x
def h5_cost_data(filename, epoch_axis=True):
"""
Read cost data from hdf5 file. Generate x axis data for each cost line.
Arguments:
filename (str): Filename with hdf5 cost data
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
Returns:
list of tuples of (name, x data, y data)
"""
ret = list()
with h5py.File(filename, "r") as f:
config, cost, time_markers = [f[x] for x in ['config', 'cost', 'time_markers']]
total_epochs = config.attrs['total_epochs']
total_minibatches = config.attrs['total_minibatches']
minibatch_markers = time_markers['minibatch']
for name, ydata in cost.items():
y = ydata[...]
if ydata.attrs['time_markers'] == 'epoch_freq':
y_epoch_freq = ydata.attrs['epoch_freq']
assert len(y) == total_epochs // y_epoch_freq
x = create_epoch_x(len(y), y_epoch_freq, minibatch_markers, epoch_axis)
elif ydata.attrs['time_markers'] == 'minibatch':
assert len(y) == total_minibatches
x = create_minibatch_x(total_minibatches, minibatch_markers, epoch_axis)
else:
raise TypeError('Unsupported data format for h5_cost_data')
ret.append((name, x, y))
return ret
def h5_hist_data(filename, epoch_axis=True):
"""
Read histogram data from hdf5 file. Generate x axis data for each hist line.
Arguments:
filename (str): Filename with hdf5 cost data
epoch_axis (bool): whether to render epoch or minibatch as the integer step in the x axis
Returns:
list of tuples of (name, data, dh, dw, bins, offset)
"""
ret = list()
with h5py.File(filename, "r") as f:
if 'hist' in f:
hists, config = [f[x] for x in ['hist', 'config']]
bins, offset, time_markers = [hists.attrs[x]
for x in ['bins', 'offset', 'time_markers']]
total_epochs = config.attrs['total_epochs']
total_minibatches = config.attrs['total_minibatches']
for hname, hdata in hists.items():
dw = total_epochs if (time_markers == 'epoch_freq') else total_minibatches
dh = bins
ret.append((hname, hdata[...], dh, dw, bins, offset))
return ret
def convert_rgb_to_bokehrgba(img_data, downsample=1):
"""
Convert RGB image to two-dimensional array of RGBA values (encoded as 32-bit integers)
(required by Bokeh). The functionality is currently not available in Bokeh.
An issue was raised here: https://github.com/bokeh/bokeh/issues/1699 and this function is a
modified version of the suggested solution.
Arguments:
img_data: img (ndarray, shape: [N, M, 3], dtype: uint8): image data
dh: height of image
dw: width of image
Returns:
img (ndarray): 2D image array of RGBA values
"""
if img_data.dtype != np.uint8:
raise NotImplementedError
if img_data.ndim != 3:
raise NotImplementedError
# downsample for render performance, v-flip since plot origin is bottom left
# img_data = np.transpose(img_data, (1,2,0))
img_data = img_data[::-downsample, ::downsample, :]
img_h, img_w, C = img_data.shape
# add an alpha channel to the image and r | ecast from pixels of u8u8u8u8 to u32
bokeh_img = np.dstack([img_data, 255 * np.ones((img_h, img_w), np.uint8)])
final_image = bokeh_img.reshape(img_h, img_w * (C + 1)).view(np.uint32)
return final_image
def h5_deconv_data | (filename):
"""
Read deconv visualization data from hdf5 file.
Arguments:
filename (str): Filename with hdf5 deconv data
Returns:
list of lists. Each inner list represents one layer, and consists of
tuples (fm, deconv_data)
"""
ret = list()
with h5py.File(filename, "r") as f:
if 'deconv' not in list(f.keys()):
return None
act_data = f['deconv/max_act']
img_data = f['deconv/img']
for layer in list(act_data.keys()):
layer_data = list()
for fm in range(act_data[layer]['vis'].shape[0]):
# to avoid storing entire dataset, imgs are cached as needed, have to look up
batch_ind, img_ind = act_data[layer]['batch_img'][fm]
img_store = img_data['batch_{}'.format(batch_ind)]
img_cache_ofs = img_store.attrs[str(img_ind)]
# have to convert from rgb to rgba and cast as uint32 dtype for bokeh
plot_img = convert_rgb_to_bokehrgba(img_store['HWC_uint8'][:, :, :, img_cache_ofs])
plot_deconv = convert_rgb_to_bokehrgba(act_data[layer]['vis'][fm])
layer_data.append((fm, plot_deconv, plot_img))
ret.append((layer, layer_data))
return ret
|
wcy940418/CRNN-end-to-end | src/test.py | Python | mit | 1,976 | 0.034919 | from __future__ import print_function
from model import CRNN, CtcCriterion
from dataset import DatasetLmdb
import os
import tensorflow as tf
import numpy as np
class Conf:
def __init__(self):
self.nClasses = 36
self.trainBatchSize = 100
self.testBatchSize = 200
self.maxIteration = 1000
self.displayInterval = 200
self.testInteval = 100
self.modelParFile = './crnn.model'
self.dataSet = '../data'
self.maxLength = 24
def labelIn | t2Char(n):
if n >= 0 and n <=9:
c = chr(n + 48)
elif n >= 10 and n<= 35:
c = chr(n + 97 - 10)
elif n == 36:
c = ''
return c
def convertSparseArrayToStrs(p):
print(p[0].shape, p[1].shape, p[2].shape)
print(p[2][0], p[2][1])
results = []
labels = []
for i in range(p[2][0]):
results.append([36 for x in range(p[2][1])])
for i in range(p[0].shape[0]):
x, y = p[0][i]
results[x][y] = p[1][i]
for i in | range(len(results)):
label = ''
for j in range(len(results[i])):
label += labelInt2Char(results[i][j])
labels.append(label)
return labels
if __name__ == '__main__':
gConfig = Conf()
sess = tf.InteractiveSession()
weights = None
if os.path.isfile(gConfig.modelParFile+'.index'):
weights = gConfig.modelParFile
imgs = tf.placeholder(tf.float32, [None, 32, 100])
labels = tf.sparse_placeholder(tf.int32)
batches = tf.placeholder(tf.int32, [None])
isTraining = tf.placeholder(tf.bool)
crnn = CRNN(imgs, gConfig, isTraining, weights, sess)
ctc = CtcCriterion(crnn.prob, labels, batches)
data = DatasetLmdb(gConfig.dataSet)
testSeqLength = [gConfig.maxLength for i in range(10)]
batchSet, labelSet = data.nextBatch(10)
p = sess.run(ctc.decoded, feed_dict={
crnn.inputImgs:batchSet,
crnn.isTraining:False,
ctc.target:labelSet,
ctc.nSamples:testSeqLength
})
original = convertSparseArrayToStrs(labelSet)
predicted = convertSparseArrayToStrs(p[0])
for i in range(len(original)):
print("original: %s, predicted: %s" % (original[i], predicted[i])) |
kpech21/Greek-Stemmer | greek_stemmer/closets/rules.py | Python | lgpl-3.0 | 7,944 | 0.008683 | # -*- coding: utf-8 -*-
# extracted rules for stemming
rules = {
'verbs': {
'irregular': {
'type_1': ['ΕΙΜΑΙ', 'ΕΙΣΑΙ', 'ΕΙΝΑΙ', 'ΕΙΜΑΣΤΕ', 'ΕΙΣΤΕ', 'ΕΙΣΑΣΤΕ'],
'type_2': ['ΗΜΟΥΝ', 'ΗΣΟΥΝ', 'ΗΤΑΝΕ', 'ΗΜΟΥΝΑ', 'ΗΣΟΥΝΑ', 'ΗΜΑΣΤΕ', 'ΗΣΑΣΤΕ', 'ΗΜΑΣΤΑΝ', 'ΗΣΑΣΤΑΝ', 'ΗΤΑΝ',
'ΔΩ', 'ΔΕΙΣ', 'ΔΕΙ', 'ΔΟΥΜΕ', 'ΔΕΙΤΕ', 'ΔΟΥΝ', 'ΠΩ', 'ΠΕΙΣ', 'ΠΕΙ', 'ΠΟΥΜΕ', 'ΠΕΙΤΕ', 'ΠΟΥΝ',
'ΖΩ', 'ΖΕΙΣ', 'ΖΕΙ', 'ΖΟΥΜΕ', 'ΖΕΙΤΕ', 'ΖΟΥΝ', 'ΖΟΥΝΕ', 'ΖΟΥΣΑ', 'ΖΟΥΣΕΣ', 'ΖΟΥΣΕ', 'ΖΟΥΣΑΜΕ',
'ΖΟΥΣΑΤΕ', 'ΖΟΥΣΑΝΕ', 'ΖΟΥΣΑΝ']
},
'singular': ['ΙΟΜΟΥΝΑ', 'ΙΟΣΟΥΝΑ', 'ΟΥΜΟΥΝΑ', 'ΟΥΣΟΥΝΑ', 'ΙΟΜΟΥΝ', 'ΙΟΣΟΥΝ', 'ΙΟΤΑΝΕ', 'ΟΥΣΟΥΝ', 'ΟΥΜΟΥΝ',
'ΟΜΟΥΝΑ', 'ΟΣΟΥΝΑ', 'ΑΡΗΣΕΣ', 'ΩΝΤΑΣ', 'ΟΝΤΑΣ', 'ΟΜΟΥΝ', 'ΟΣΟΥΝ', 'ΟΤΑΝΕ', 'ΟΥΣΑΙ', 'ΟΥΤΑΙ',
'ΟΥΣΕΣ', 'ΑΡΕΙΣ', 'ΙΕΜΑΙ', 'ΙΕΣΑΙ', 'ΙΕΤΑΙ', 'ΟΥΜΑΙ', 'ΕΙΣΑΙ', 'ΕΙΤΑΙ', 'ΙΟΤΑΝ', 'ΑΡΗΣΕ', 'ΑΡΗΣΑ',
'ΕΣΑΙ', 'ΕΤΑΙ', 'ΗΚΕΣ', 'ΟΜΑΙ', 'ΟΤΑΝ', 'ΟΥΣΑ', 'ΟΥΣΕ', 'ΑΓΕΣ', 'ΩΜΑΙ', 'ΑΣΑΙ', 'ΑΤΑΙ', 'ΑΡΕΣ',
'ΑΡΕΙ', 'ΜΑΙ', 'ΣΑΙ', 'ΤΑΙ', 'ΜΗΝ', 'ΗΚΑ', 'ΗΚΕ', 'ΕΙΣ', 'ΑΕΙ', 'ΑΓΑ', 'ΑΓΕ', 'ΟΙΣ', 'ΑΡΩ', 'ΑΡΑ',
'ΑΡΕ', 'ΟΥ', 'ΗΝ', 'ΗΣ', 'ΕΙ', 'ΑΩ', 'ΑΣ', 'ΕΣ', 'ΟΙ', 'ΣΟ', 'ΤΟ', 'Ω', 'Α', 'Ε', 'Η'],
'plural': ['ΙΟΝΤΟΥΣΑΝ', 'ΙΟΜΑΣΤΑΝ', 'ΙΟΣΑΣΤΑΝ', 'ΙΟΥΝΤΑΝΕ', 'ΟΥΜΑΣΤΑΝ', 'ΟΥΣΑΣΤΑΝ', 'ΟΝΤΟΥΣΑΝ', 'ΟΜΑΣΤΑΝ',
'ΟΥΝΤΑΝΕ', 'ΟΣΑΣΤΑΝ', 'ΑΡΗΣΑΜΕ', 'ΑΡΗΣΑΤΕ', 'ΙΟΜΑΣΤΕ', 'ΙΟΣΑΣΤΕ', 'ΙΟΥΝΤΑΙ', 'ΟΥΜΑΣΤΕ', 'ΙΟΝΤΑΝΕ',
'ΙΟΥΝΤΑΝ', 'ΑΓΑΜΕ', 'ΑΓΑΤΕ', 'ΟΥΣΘΕ', 'ΩΜΕΘΑ', 'ΑΡΕΤΕ', 'ΑΡΟΥΝ', 'ΩΝΤΑΣ', 'ΩΝΤΑΙ', 'ΑΡΑΜΕ', 'ΑΡΑΤΕ',
'ΑΡΑΝΕ', 'ΟΝΤΑΣ', 'ΗΚΑΜΕ', 'ΕΙΣΤΕ', 'ΟΝΤΑΙ', 'ΗΚΑΤΕ', 'ΗΚΑΝΕ', 'ΑΓΑΝΕ', 'ΟΝΤΑΝ', 'ΙΕΣΤΕ', 'ΟΥΤΑΝ',
'ΟΥΣΙΝ', 'ΟΥΣΑΝ', 'ΟΥΤΕ', 'ΜΕΘΑ', 'ΝΤΑΙ', 'ΗΜΕΝ', 'ΗΣΕΝ', 'ΗΣΑΝ', 'ΗΚΑΝ', 'ΟΥΜΕ', 'ΟΥΝΕ', 'ΕΙΤΕ',
'ΑΣΘΕ', 'ΑΓΑΝ', 'ΕΣΤΕ', 'ΑΡΑΝ', 'ΩΜΕΝ', 'ΟΥΣΙ', 'ΟΜΕ', 'ΕΤΕ', 'ΑΜΕ', 'ΑΤΕ', 'ΑΝΕ', 'ΟΥΝ', 'ΗΤΕ',
'ΣΘΕ', 'ΝΤΟ', 'ΑΝ', 'ΤΕ']
},
'non_verbs': {
'neuter_noun': {
'matos': ['ΜΑΤΟΣ', 'ΜΑΤΩΝ', 'ΜΑΤΑ', 'ΜΑ']
},
'propername': ['ΟΝΟΣ', 'ΩΝΟΣ', 'ΟΡΟΣ', 'ΕΥΣ', 'ΕΩΣ', 'ΟΝΤΟΣ', 'ΚΤΟΣ', 'ΟΥΣ', 'ΩΝ', 'ΩΡ', 'ΙΣ', 'ΩΣ', 'Ξ', 'Ω'],
'adjectives': ['ΟΥΣΤΕΡΟΥΣ', 'ΟΥΣΤΑΤΟΥΣ', 'ΟΥΣΤΕΡΟΥ', 'ΟΥΣΤΕΡΟΣ', 'ΕΣΤΕΡΟΥΣ', 'ΟΥΣΤΕΡΗΣ', 'ΕΣΤΑΤΟΥΣ', 'ΟΥΣΤΕΡΩΝ',
'ΟΥΣΤΑΤΕΣ', 'ΟΥΣΤΕΡΕΣ', 'ΟΥΣΤΕΡΟΙ', 'ΑΙΤΕΡΟΥΣ', 'ΟΥΣΤΑΤΟΣ', 'ΟΥΣΤΑΤΟΥ', 'ΟΥΣΤΑΤΗΣ', 'ΟΥΣΤΑΤΩΝ',
'ΥΤΕΡΟΥΣ', 'ΕΣΤΕΡΟΙ', 'ΕΣΤΕΡΩΝ', 'ΕΣΤΕΡΕΣ', 'ΟΥΣΤΕΡΗ', 'ΩΜΕΝΟΥΣ', 'ΕΣΤΑΤΗΣ', 'ΕΣΤΕΡΑΣ',
'ΕΣΤΕΡΗΣ', 'ΟΥΣΤΕΡΟ', 'ΑΣΜΕΝΟΙ', 'ΟΤΕΡΟΥΣ', 'ΕΣΤΑΤΟΥ', 'ΕΣΤΑΤΟΣ', 'ΟΥΣΤΕΡΑ', 'ΕΣΤΑΤΕΣ',
'ΥΤΑΤΟΥΣ', 'ΕΣΤΕΡΟΥ', 'ΕΣΤΕΡΟΣ', 'ΑΙΤΕΡΟΣ', 'ΑΙΤΕΡΟΥ', | 'ΕΣΤΑΤΟΙ', 'ΑΙΤΕΡΟΙ', 'ΑΙΤΕΡΩΝ',
'ΑΙΤΕΡΗΣ', 'ΑΙΤΕΡΑΣ', 'ΟΥΜΕΝΟΥ', 'ΟΥΜΕΝΟΣ', 'ΟΥΜΕΝΗΣ', 'ΟΥΜΕΝΩΝ', 'ΟΥΜΕΝΕΣ', 'ΟΜΕΝΟΥΣ',
'ΕΣΤΑΤΩΝ', 'ΕΣΤΕΡΟΝ', 'ΗΜΕΝΟΥΣ', 'ΟΥΣΤΑΤΗ', 'ΟΥΣΤΑΤΑ', 'ΕΣΤΕΡΟΝ', 'ΟΥΣΤΑΤΟ', 'ΩΤΕΡΟΥΣ',
'ΩΤΑΤΟΥΣ', 'ΥΤΕΡΕΣ', 'ΩΜΕΝΟΥ', 'ΟΤΑΤΩΝ', 'ΕΣΤΑΤΟ', 'ΕΣΤΑΤΗ', | 'ΥΤΑΤΩΝ', 'ΥΤΕΡΗΣ', 'ΟΜΕΝΟΣ',
'ΟΤΕΡΟΙ', 'ΟΤΕΡΩΝ', 'ΥΤΑΤΟΣ', 'ΥΤΑΤΟΥ', 'ΕΣΤΑΤΑ', 'ΥΤΑΤΗΣ', 'ΟΤΕΡΟΣ', 'ΟΤΕΡΟΥ', 'ΥΤΑΤΕΣ',
'ΟΤΕΡΕΣ', 'ΥΤΕΡΟΙ', 'ΥΤΕΡΩΝ', 'ΑΙΤΕΡΟ', 'ΟΤΕΡΗΣ', 'ΥΤΕΡΟΣ', 'ΑΙΤΕΡΗ', 'ΑΙΤΕΡΑ', 'ΜΕΝΟΥΣ',
'ΥΤΕΡΟΥ', 'ΩΜΕΝΗΣ', 'ΩΜΕΝΩΝ', 'ΩΜΕΝΕΣ', 'ΟΥΜΕΝΟ', 'ΟΥΜΕΝΗ', 'ΟΥΜΕΝΑ', 'ΟΜΕΝΕΣ', 'ΩΜΕΝΟΣ',
'ΟΜΕΝΗΣ', 'ΟΜΕΝΩΝ', 'ΕΣΤΕΡΟ', 'ΕΣΤΕΡΗ', 'ΕΣΤΕΡΑ', 'ΟΤΑΤΟΣ', 'ΟΤΑΤΗΣ', 'ΟΜΕΝΟΥ', 'ΟΤΑΤΟΙ',
'ΥΤΑΤΟΙ', 'ΟΤΑΤΟΥ', 'ΗΜΕΝΗΣ', 'ΟΜΕΝΟΙ', 'ΗΜΕΝΟΥ', 'ΗΜΕΝΟΙ', 'ΗΜΕΝΩΝ', 'ΜΕΝΟΥΣ', 'ΗΜΕΝΟΣ',
'ΩΜΕΝΟΙ', 'ΟΤΑΤΕΣ', 'ΩΤΕΡΟΣ', 'ΩΤΕΡΟΥ', 'ΩΤΕΡΟΝ', 'ΩΤΕΡΟΙ', 'ΩΤΕΡΩΝ', 'ΩΤΕΡΗΣ', 'ΩΤΕΡΕΣ',
'ΩΤΕΡΑΣ', 'ΩΤΑΤΟΣ', 'ΩΤΑΤΟΥ', 'ΩΤΑΤΟΙ', 'ΩΤΑΤΩΝ', 'ΩΤΑΤΗΣ', 'ΩΤΑΤΕΣ', 'ΜΕΝΟΥ', 'ΜΕΝΗΣ',
'ΜΕΝΟΙ', 'ΜΕΝΩΝ', 'ΩΜΕΝΟ', 'ΩΜΕΝΗ', 'ΩΜΕΝΑ', 'ΥΤΕΡΑ', 'ΥΤΑΤΑ', 'ΥΤΕΡΟ', 'ΟΤΑΤΗ', 'ΜΕΝΕΣ',
'ΟΜΕΝΑ', 'ΩΜΕΝΟ', 'ΩΜΕΝΗ', 'ΟΤΕΡΟ', 'ΟΤΕΡΗ', 'ΕΙΣΕΣ', 'ΟΜΕΝΟ', 'ΟΜΕΝΗ', 'ΥΤΕΡΗ', 'ΟΤΕΡΑ',
'ΜΕΝΟΙ', 'ΥΤΑΤΗ', 'ΟΤΑΤΟ', 'ΟΤΑΤΑ', 'ΜΕΝΟΥ', 'ΜΕΝΟΣ', 'ΗΜΕΝΗ', 'ΜΕΝΩΝ', 'ΜΕΝΗΣ', 'ΗΜΕΝΟ',
'ΗΜΕΝΑ', 'ΟΝΤΑΣ', 'ΩΝΤΑΣ', 'ΩΤΕΡΟ', 'ΩΤΕΡΕ', 'ΩΤΕΡΗ', 'ΩΤΕΡΑ', 'ΩΤΑΤΟ', 'ΩΤΑΤΕ', 'ΩΤΑΤΗ',
'ΩΤΑΤΑ', 'ΜΕΝΟ', 'ΜΕΝΗ', 'ΜΕΝΑ', 'ΕΙΕΣ', 'ΕΙΩΝ', 'ΟΥΣ', 'ΕΩΣ', 'ΕΟΣ', 'ΩΣΑ', 'ΟΥΝ', 'ΕΙΣ', 'ΟΥΣ',
'ΕΩΝ', 'ΙΣ', 'ΟΣ', 'ΥΣ', 'ΟΥ', 'ΑΣ', 'ΗΣ', 'ΟΣ', 'ΕΣ', 'ΕΑ', 'ΩΝ', 'ΤΙ', 'ΕΙ', 'ΟΝ', 'ΑΝ', 'ΕΝ',
'ΙΝ', 'ΟΙ', 'Η', 'Α', 'Ο', 'Ι', 'Υ', 'Ε'],
'singular_noun': ['ΟΥΣ', 'ΕΩΣ', 'ΕΟΣ', 'ΟΥΝ', 'ΕΙΣ', 'ΥΣ', 'ΩΣ', 'ΟΥ', 'ΑΣ', 'ΗΣ', 'ΟΣ', 'ΕΣ', 'ΩΝ', 'ΕΙ', 'ΟΝ',
'ΑΝ', 'ΕΝ', 'ΙΝ', 'ΟΙ', 'ΙΣ', 'Η', 'Α', 'Ω', 'Ο', 'Ι', 'Ε'],
'plural_noun': ['ΕΙΣΕΣ', 'ΕΙΣΩΝ', 'ΙΑΔΕΣ', 'ΙΑΔΩΝ', 'ΟΥΔΕΣ', 'ΟΥΔΩΝ', 'ΙΜΑΤΑ', 'ΟΥΣ', 'ΕΙΣ', 'ΕΩΝ', 'ΟΙ', 'ΩΝ',
'ΕΣ', 'ΕΑ', 'Α', 'Η'],
'adverb': ['ΟΥΣΤΑΤΑ', 'ΑΙΤΕΡΑ', 'ΑΙΤΕΡΩΣ', 'ΟΤΑΤΑ', 'ΕΣΤΑΤΑ', 'ΥΤΑΤΑ', 'ΟΤΕΡΟ', 'ΟΤΕΡΑ', 'ΕΣΤΕΡΑ', 'ΥΤΕΡΑ',
'ΑΣΙΑ', 'ΜΕΝΑ', 'ΕΩΣ', 'ΤΑΤΑ', 'ΩΣ', 'ΟΥ', 'Α', 'Υ', 'Ο'],
'irregular_adjective': ['ΤΕΡΟΥΣ', 'ΤΕΡΟΣ', 'ΤΕΡΟΝ', 'ΤΕΡΟΥ', 'ΤΕΡΗΣ', 'ΤΕΡΟΙ', 'ΤΕΡΩΝ', 'ΤΕΡΕΣ', 'ΤΑΤΟΣ',
'ΤΑΤΟΥ', 'ΤΑΤΗΣ', 'ΤΑΤΟΙ', 'ΤΑΤΩΝ', 'ΤΑΤΕΣ', 'ΤΕΡΟ', 'ΤΕΡΗ', 'ΤΕΡΑ', 'ΤΑΤΟ', 'ΤΑΤΗ',
'ΤΑΤΑ']
}
}
|
Tomsod/gemrb | gemrb/GUIScripts/iwd/CharGen.py | Python | gpl-2.0 | 92,820 | 0.040509 | # -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003-2005 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Character Generation
###################################################
import GemRB
from GUIDefines import *
from ie_stats import *
from ie_spells import LS_MEMO
import GUICommon
import Spellbook
import CommonTables
import LUSkillsSelection
CharGenWindow = 0
CharGenState = 0
TextArea = 0
PortraitButton = 0
AcceptButton = 0
GenderButton = 0
GenderWindow = 0
GenderTextArea = 0
GenderDoneButton = 0
Portrait = 0
PortraitsTable = 0
PortraitPortraitButton = 0
RaceButton = 0
RaceWindow = 0
RaceTextArea = 0
RaceDoneButton = 0
ClassButton = 0
ClassWindow = 0
ClassTextArea = 0
ClassDoneButton = 0
ClassMultiWindow = 0
ClassMultiTextArea = 0
ClassMultiDoneButton = 0
KitTable = 0
KitWindow = 0
KitTextArea = 0
KitDoneButton = 0
AlignmentButton = 0
AlignmentWindow = 0
AlignmentTextArea = 0
AlignmentDoneButton = 0
AbilitiesButton = 0
AbilitiesWindow = 0
AbilitiesTable = 0
AbilitiesRaceAddTable = 0
AbilitiesRaceReqTable = 0
AbilitiesClassReqTable = 0
AbilitiesMinimum = 0
AbilitiesMaximum = 0
AbilitiesModifier = 0
AbilitiesTextArea = 0
AbilitiesRecallButton = 0
AbilitiesDoneButton = 0
SkillsButton = 0
SkillsWindow = 0
SkillsTable = 0
SkillsTextArea = 0
SkillsDoneButton = 0
SkillsPointsLeft = 0
SkillsState = 0
RacialEnemyButton = 0
RacialEnemyWindow = 0
RacialEnemyTable = 0
RacialEnemyTextArea = 0
RacialEnemyDoneButton = 0
ProficienciesWindow = 0
ProficienciesTable = 0
ProfsMaxTable = 0
ProficienciesTextArea = 0
ProficienciesDoneButton = 0
ProficienciesPointsLeft = 0
MageSpellsWindow = 0
MageSpellsTextArea = 0
MageSpellsDoneButton = 0
MageSpellsSelectPointsLeft = 0
MageMemorizeWindow = 0
MageMemorizeTextArea = 0
MageMemorizeDoneButton = 0
MageMemorizePointsLeft = 0
PriestMemorizeWindow = 0
PriestMemorizeTextArea = 0
PriestMemorizeDoneButton = 0
PriestMemorizePointsLeft = 0
AppearanceButton = 0
AppearanceWindow = 0
AppearanceTable = 0
AppearanceAvatarButton = 0
AppearanceHairButton = 0
AppearanceSkinButton = 0
AppearanceMajorButton = 0
AppearanceMinorButton = 0
HairColor = 0
SkinColor = 0
MajorColor = 0
MinorColor = 0
CharSoundWindow = 0
CharSoundTable = 0
CharSoundStrings = 0
BiographyButton = 0
BiographyWindow = 0
BiographyField = 0
NameButton = 0
NameWindow = 0
NameField = 0
NameDoneButton = 0
SoundIndex = 0
VerbalConstants = None
HasStrExtra = 0
MyChar = 0
ImportedChar = 0
def OnLoad():
global CharGenWindow, CharGenState, TextArea, PortraitButton, AcceptButton
global GenderButton, RaceButton, ClassButton, AlignmentButton
global AbilitiesButton, SkillsButton, AppearanceButton, BiographyButton, NameButton
global KitTable, ProficienciesTable, RacialEnemyTable
global AbilitiesTable, SkillsTable, PortraitsTable
global MyChar, ImportedChar
KitTable = GemRB.LoadTable ("magesch")
ProficienciesTable = GemRB.LoadTable ("weapprof")
RacialEnemyTable = GemRB.LoadTable ("haterace")
AbilitiesTable = GemRB.LoadTable ("ability")
SkillsTable = GemRB.LoadTable ("skills")
PortraitsTable = GemRB.LoadTable ("pictures")
GemRB.LoadWindowPack ("GUICG", 640, 480)
CharGenWindow = GemRB.LoadWindow (0)
CharGenWindow.SetFrame ()
CharGenState = 0
MyChar = GemRB.GetVar ("Slot")
ImportedChar = 0
GenderButton = CharGenWindow.GetControl (0)
GenderButton.SetState (IE_GUI_BUTTON_ENABLED)
GenderButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
GenderButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, GenderPress)
GenderButton.SetText (11956)
RaceButton = CharGenWindow.GetControl (1)
RaceButton.SetState (IE_GUI_BUTTON_DISABLED)
RaceButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, RacePress)
RaceButton.SetText (11957)
ClassButton = CharGenWindow.GetC | ontrol (2)
ClassButton.SetState (IE_GUI_BUTTON_DISABLED)
ClassButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, ClassPress)
ClassButton.SetText (11959)
AlignmentButton = CharGenWindow.GetControl (3)
AlignmentButton.SetState (IE_GUI_BUTTON_DISABLED) |
AlignmentButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, AlignmentPress)
AlignmentButton.SetText (11958)
AbilitiesButton = CharGenWindow.GetControl (4)
AbilitiesButton.SetState (IE_GUI_BUTTON_DISABLED)
AbilitiesButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, AbilitiesPress)
AbilitiesButton.SetText (11960)
SkillsButton = CharGenWindow.GetControl (5)
SkillsButton.SetState (IE_GUI_BUTTON_DISABLED)
SkillsButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, SkillsPress)
SkillsButton.SetText (11983)
AppearanceButton = CharGenWindow.GetControl (6)
AppearanceButton.SetState (IE_GUI_BUTTON_DISABLED)
AppearanceButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, AppearancePress)
AppearanceButton.SetText (11961)
BiographyButton = CharGenWindow.GetControl (16)
BiographyButton.SetState (IE_GUI_BUTTON_DISABLED)
BiographyButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, BiographyPress)
BiographyButton.SetText (18003)
NameButton = CharGenWindow.GetControl (7)
NameButton.SetState (IE_GUI_BUTTON_DISABLED)
NameButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, NamePress)
NameButton.SetText (11963)
BackButton = CharGenWindow.GetControl (11)
BackButton.SetState (IE_GUI_BUTTON_ENABLED)
BackButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, BackPress)
BackButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
PortraitButton = CharGenWindow.GetControl (12)
PortraitButton.SetFlags (IE_GUI_BUTTON_PICTURE|IE_GUI_BUTTON_NO_IMAGE, OP_SET)
PortraitButton.SetState (IE_GUI_BUTTON_LOCKED)
ImportButton = CharGenWindow.GetControl (13)
ImportButton.SetState (IE_GUI_BUTTON_ENABLED)
ImportButton.SetText (13955)
ImportButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, ImportPress)
CancelButton = CharGenWindow.GetControl (15)
CancelButton.SetState (IE_GUI_BUTTON_ENABLED)
CancelButton.SetText (13727)
CancelButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, CancelPress)
AcceptButton = CharGenWindow.GetControl (8)
AcceptButton.SetState (IE_GUI_BUTTON_DISABLED)
AcceptButton.SetText (11962)
AcceptButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, AcceptPress)
TextArea = CharGenWindow.GetControl (9)
TextArea.SetText (16575)
CharGenWindow.SetVisible (WINDOW_VISIBLE)
return
def BackPress():
global CharGenWindow, CharGenState, SkillsState
global GenderButton, RaceButton, ClassButton, AlignmentButton, AbilitiesButton, SkillsButton, AppearanceButton, BiographyButton, NameButton
if CharGenState > 0:
CharGenState = CharGenState - 1
else:
CancelPress()
return
if CharGenState > 6:
CharGenState = 6
GemRB.SetToken ("CHARNAME","")
if CharGenState == 0:
RaceButton.SetState (IE_GUI_BUTTON_DISABLED)
RaceButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_NAND)
GenderButton.SetState (IE_GUI_BUTTON_ENABLED)
GenderButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
elif CharGenState == 1:
ClassButton.SetState (IE_GUI_BUTTON_DISABLED)
ClassButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_NAND)
RaceButton.SetState (IE_GUI_BUTTON_ENABLED)
RaceButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
elif CharGenState == 2:
AlignmentButton.SetState (IE_GUI_BUTTON_DISABLED)
AlignmentButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_NAND)
ClassButton.SetState (IE_GUI_BUTTON_ENABLED)
ClassButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
elif CharGenState == 3:
AbilitiesButton.SetState (IE_GUI_BUTTON_DISABLED)
AbilitiesButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_NAND)
AlignmentButton.SetState (IE_GUI_BUTTON_ENABLED)
AlignmentButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
elif CharGenState == 4:
SkillsButton.SetState (IE_GUI_BUTTON_DISABLED)
SkillsButton.SetFlags (IE_GU |
brandonw/personal-site | docs/conf.py | Python | bsd-3-clause | 7,764 | 0.007728 | # -*- coding: utf-8 -*-
#
# Personal Site documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Personal Site'
copyright = u'2014, Brandon Waskiewicz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'personal-sitedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'personal-site.tex', u'Personal Site Documentation',
u'Brandon Waskiewicz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'personal-site', u'Personal Site Documentation',
[u'Brandon Waskiewicz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls | = False
# -- Options for Texinfo output ------------------------------------------------ |
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'personal-site', u'Personal Site Documentation',
u'Brandon Waskiewicz', 'Personal Site',
'My personal website.','Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote' |
miso-belica/jusText | justext/paragraph.py | Python | bsd-2-clause | 1,667 | 0 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import re
from .utils import normalize_whitespace
HEADINGS_PATTERN = re.compile(r"\bh\d\b")
class Paragraph( | object):
"""Object representing one block of text in HTML."""
def __init__(self, path):
self.dom_path = path.dom
self.xpath = path.xpath
self.text_nodes = []
self.chars_count_in_links = 0
self.tags_count = 0
s | elf.class_type = "" # short | neargood | good | bad
@property
def is_heading(self):
return bool(HEADINGS_PATTERN.search(self.dom_path))
@property
def is_boilerplate(self):
return self.class_type != "good"
@property
def text(self):
text = "".join(self.text_nodes)
return normalize_whitespace(text.strip())
def __len__(self):
return len(self.text)
@property
def words_count(self):
return len(self.text.split())
def contains_text(self):
return bool(self.text_nodes)
def append_text(self, text):
text = normalize_whitespace(text)
self.text_nodes.append(text)
return text
def stopwords_count(self, stopwords):
return sum(word.lower() in stopwords for word in self.text.split())
def stopwords_density(self, stopwords):
if self.words_count == 0:
return 0
return self.stopwords_count(stopwords) / self.words_count
def links_density(self):
text_length = len(self.text)
if text_length == 0:
return 0
return self.chars_count_in_links / text_length
|
Lamecarlate/gourmet | gourmet/gtk_extras/timeEntry.py | Python | gpl-2.0 | 4,398 | 0.012278 | ### Copyright (C) 2005 Thomas M. Hinkle
### Copyright (C) 2009 Rolf Leggewie
###
### This library is free software; you can redistribute it and/or
### modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation; either version 2 of the
### License, or (at your option) any later version.
###
### This library is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
### General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this library; if not, write to the Free Software
### Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
### USA
import gtk
from gettext import gettext as _
import gourmet.convert as convert
import validatingEntry
TIME_TO_READ = 1000
class TimeEntry (validatingEntry.ValidatingEntry):
__gtype_name__ = 'TimeEntry'
def __init__ (self, conv=None):
if not conv: self.conv = convert.get_converter()
else: self.conv = conv
validatingEntry.ValidatingEntry.__init__(self)
self.entry.get_value = self.get_value
self.entry.set_value = self.set_value
def find_errors_in_progress (self, txt):
if (not txt) or self.conv.timestring_to_seconds(txt):
return None
elif not convert.NUMBER_MATCHER.match(txt.split()[0]):
return _('Time must begin with a number or fraction followed by a unit (minutes, hours, etc.).')
else:
words = txt.split()
#if len(words) == 1:
# self._hide_warning_slowly()
# return
if convert.NUMBER_MATCHER.match(words[-1]):
return None
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
return None
#self._hide_warning_slowly()
#return
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
#else:
# self.set_warning_text("Invalid or incomplete time")
# self._show_warning()
def find_completed_errors (self,*args):
txt = self.entry.get_text()
if txt and not self.conv.timestring_to_seconds(txt):
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
words = txt.split()
if len(words) == 1:
self._hide_warning_slowly()
return
elif convert.NUMBER_MATCHER.match(words[-1]):
return
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
self._hide_warning_slowly()
return
self.valid = False
self.warn = True
self.set_warning_text('Invalid input.' + 'Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
def set_value (self,seconds):
self.entry.set_text(
convert.seconds_to_timestring(seconds,
fractions=convert.FRACTIONS_ASCII)
)
def get_value (self):
return self.conv.timestring_to_seconds(self.entry.get_text())
def make_time_entry():
te=TimeEntry()
te.show()
return te
if __name__ == '__main__':
w=g | tk.Window()
vb = gtk.VBox()
hb = gtk.HBox()
l=gtk.Label('_Label')
l.set_use_underline(True)
l.set_alignment(0,0.5)
hb.pack_start(l)
te=TimeEntry()
import sys
te.connect('changed',lambda w: sys.stderr.write('Time value: %s'%w.get_value()))
l.set_mnemonic_w | idget(te)
hb.pack_start(te,expand=False,fill=False)
vb.add(hb)
qb = gtk.Button(stock=gtk.STOCK_QUIT)
vb.add(qb)
l.show()
hb.show()
qb.show()
te.show()
vb.show()
qb.connect('clicked',lambda *args: w.hide() and gtk.main_quit() or gtk.main_quit())
w.add(vb)
w.show()
w.connect('delete_event',gtk.main_quit)
gtk.main()
|
quantumlib/OpenFermion-Cirq | openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard.py | Python | apache-2.0 | 10,098 | 0.000891 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A variational ansatz based on a linear swap network Trotter step."""
from typing import Iterable, Optional, Sequence, Tuple, cast
import numpy
import sympy
import cirq
from openfermioncirq import swap_network
from openfermioncirq.variational.ansatz import VariationalAnsatz
from openfermioncirq.variational.letter_with_subscripts import (
LetterWithSubscripts)
class SwapNetworkTrotterHubbardAnsatz(VariationalAnsatz):
"""A Hubbard model ansatz based on the fermionic swap network Trotter step.
Each Trotter step includes 3 parameters: one for the horizontal hopping
terms, one for the vertical hopping terms, and one for the on-site
interaction. This ansatz is similar to the one used | in arXiv:1507.08969,
but corresponds to a different ordering for simulating the Hami | ltonian
terms.
"""
def __init__(self,
x_dim: float,
y_dim: float,
tunneling: float,
coulomb: float,
periodic: bool=True,
iterations: int=1,
adiabatic_evolution_time: Optional[float]=None,
qubits: Optional[Sequence[cirq.Qid]]=None
) -> None:
"""
Args:
iterations: The number of iterations of the basic template to
include in the circuit. The number of parameters grows linearly
with this value.
adiabatic_evolution_time: The time scale for Hamiltonian evolution
used to determine the default initial parameters of the ansatz.
This is the value A from the docstring of this class.
If not specified, defaults to the sum of the absolute values
of the entries of the two-body tensor of the Hamiltonian.
qubits: Qubits to be used by the ansatz circuit. If not specified,
then qubits will automatically be generated by the
`_generate_qubits` method.
"""
self.x_dim = x_dim
self.y_dim = y_dim
self.tunneling = tunneling
self.coulomb = coulomb
self.periodic = periodic
self.iterations = iterations
if adiabatic_evolution_time is None:
adiabatic_evolution_time = 0.1*abs(coulomb)*iterations
self.adiabatic_evolution_time = cast(float, adiabatic_evolution_time)
super().__init__(qubits)
def params(self) -> Iterable[sympy.Symbol]:
"""The parameters of the ansatz."""
for i in range(self.iterations):
if self.x_dim > 1:
yield LetterWithSubscripts('Th', i)
if self.y_dim > 1:
yield LetterWithSubscripts('Tv', i)
yield LetterWithSubscripts('V', i)
def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]:
"""Bounds on the parameters."""
bounds = []
for param in self.params():
s = 1.0 if param.letter == 'V' else 2.0
bounds.append((-s, s))
return bounds
def _generate_qubits(self) -> Sequence[cirq.Qid]:
"""Produce qubits that can be used by the ansatz circuit."""
n_qubits = 2*self.x_dim*self.y_dim
return cirq.LineQubit.range(n_qubits)
def operations(self, qubits: Sequence[cirq.Qid]) -> cirq.OP_TREE:
"""Produce the operations of the ansatz circuit."""
for i in range(self.iterations):
# Apply one- and two-body interactions with a swap network that
# reverses the order of the modes
def one_and_two_body_interaction(p, q, a, b) -> cirq.OP_TREE:
th_symbol = LetterWithSubscripts('Th', i)
tv_symbol = LetterWithSubscripts('Tv', i)
v_symbol = LetterWithSubscripts('V', i)
if _is_horizontal_edge(
p, q, self.x_dim, self.y_dim, self.periodic):
yield cirq.ISwapPowGate(exponent=-th_symbol).on(a, b)
if _is_vertical_edge(
p, q, self.x_dim, self.y_dim, self.periodic):
yield cirq.ISwapPowGate(exponent=-tv_symbol).on(a, b)
if _are_same_site_opposite_spin(p, q, self.x_dim*self.y_dim):
yield cirq.CZPowGate(exponent=v_symbol).on(a, b)
yield swap_network(
qubits, one_and_two_body_interaction, fermionic=True)
qubits = qubits[::-1]
# Apply one- and two-body interactions again. This time, reorder
# them so that the entire iteration is symmetric
def one_and_two_body_interaction_reversed_order(p, q, a, b
) -> cirq.OP_TREE:
th_symbol = LetterWithSubscripts('Th', i)
tv_symbol = LetterWithSubscripts('Tv', i)
v_symbol = LetterWithSubscripts('V', i)
if _are_same_site_opposite_spin(p, q, self.x_dim*self.y_dim):
yield cirq.CZPowGate(exponent=v_symbol).on(a, b)
if _is_vertical_edge(
p, q, self.x_dim, self.y_dim, self.periodic):
yield cirq.ISwapPowGate(exponent=-tv_symbol).on(a, b)
if _is_horizontal_edge(
p, q, self.x_dim, self.y_dim, self.periodic):
yield cirq.ISwapPowGate(exponent=-th_symbol).on(a, b)
yield swap_network(
qubits, one_and_two_body_interaction_reversed_order,
fermionic=True, offset=True)
qubits = qubits[::-1]
def default_initial_params(self) -> numpy.ndarray:
"""Approximate evolution by H(t) = T + (t/A)V.
Sets the parameters so that the ansatz circuit consists of a sequence
of second-order Trotter steps approximating the dynamics of the
time-dependent Hamiltonian H(t) = T + (t/A)V, where T is the one-body
term and V is the two-body term of the Hamiltonian used to generate the
ansatz circuit, and t ranges from 0 to A, where A is equal to
`self.adibatic_evolution_time`. The number of Trotter steps
is equal to the number of iterations in the ansatz. This choice is
motivated by the idea of state preparation via adiabatic evolution.
The dynamics of H(t) are approximated as follows. First, the total
evolution time of A is split into segments of length A / r, where r
is the number of Trotter steps. Then, each Trotter step simulates H(t)
for a time length of A / r, where t is the midpoint of the
corresponding time segment. As an example, suppose A is 100 and the
ansatz has two iterations. Then the approximation is achieved with two
Trotter steps. The first Trotter step simulates H(25) for a time length
of 50, and the second Trotter step simulates H(75) for a time length
of 50.
"""
total_time = self.adiabatic_evolution_time
step_time = total_time / self.iterations
params = []
for param, scale_factor in zip(self.params(),
self.param_scale_factors()):
if param.letter == 'Th' or param.letter == 'Tv':
params.append(_canonicalize_exponent(
-self.tunneling * step_time / numpy.pi, 4) / scale_factor)
elif param.letter == 'V':
i, = param.subscripts
# Use the midpoint of the time segment
interpolation_progress = 0.5 * (2 * i + 1) / self.iterations
params.append(_canonicalize_exp |
queirozfcom/spam-filter | lib/validation.py | Python | mit | 269 | 0.02974 | def val | idate_cross_validation(rounds,train_to_test_ratio):
# the number of turns must be exactly equal to the number
# of "parts" you'll split your data into
res = rounds * (1 - train_to_test_ratio)
# comparando floats na marra.
ass | ert ( abs(res - 1) < 0.0001 )
|
buck06191/BayesCMD | bayescmd/abc/dtaidistance/__init__.py | Python | gpl-2.0 | 771 | 0.005188 | import logging
logger = logging.getLogger("be.kuleuven.dtai.distance")
from . import dtw
try:
from . import dtw_c
except ImportError:
import os
# Try to compile automatically
# try:
# import numpy as np
# import pyximport
# pyximport.install(setup_args={'include_dirs': n | p.get_include()})
# from . import dtw_c
# except ImportError:
dtaidistance_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir)
logger.warning("\nDTW C variant not available.\n\n" +
"If you want to use the C libraries (not required, depends on cython), " +
| "then run `cd {};python3 setup.py build_ext --inplace`.".format(dtaidistance_dir))
dtw_c = None
__version__ = "0.1.6"
|
numericube/twistranet | twistranet/core/caches.py | Python | agpl-3.0 | 1,772 | 0.009029 | """
Various caching help functions and classes.
"""
from django.core.cache import cache
DEFAULT_CACHE_DELAY = 60 * 60 # Default cache delay is 1hour. It's quite long.
USERACCOUNT_CACHE_DELAY = 60 * 3 # 3 minutes here. This is used to know if a user is online or not.
class _AbstractCache(object):
"""
Abstract cache management class.
A cache class manages data about a whole population.
It is instanciated with a specific instance of this cache object.
"""
delay = DEFAULT_CACHE_DELAY
def __init__(self, key_prefix):
"""
We store the key prefix for easy values retrieval
"""
# Save key profile AND save an empty cache value to use as an optional global timeout
self.key_prefix = key_prefix
def _get(self, attr, default = None):
"""
Return attr from the cache or default value
"""
return cache.get("%s#%s" % (self.key_prefix, attr), default)
def _set(self, attr, value):
cache.set("%s#%s" % (self.key_prefix, attr), value, self.delay)
class UserAccountCache(_AbstractCache):
delay = USERACCOUNT_CACHE_DELAY
def __init__(self, useraccount_or_id):
"""
Instanciate a cache from given user (or userid)
"""
# Instanciate cache
from twistranet.twistapp import Twistable
if isinstance(useraccount_or_id, Twistable):
useraccount_ | or_id = useraccount_or_id.id
super(UserAccountCache, self).__init__("UA%d" % useraccount_or_id)
# Online information
def get_online(self): return self._get("online", False)
def set_online(self, v): return self._set("online", | v)
online = property(get_online, set_online)
|
PowerDNS/pdns | regression-tests.recursor-dnssec/test_KeepOpenTCP.py | Python | gpl-2.0 | 2,856 | 0.001401 | import dns
import os
import socket
import struct
from recursortests import RecursorTest
class testKeepOpenTCP(RecursorTest):
_confdir = 'KeepOpenTCP'
_config_template = """dnssec=validate
packetcache-ttl=10
packetcache-servfail-ttl=10
auth-zones=authzone.example=configs/%s/authzone.zone""" % _confdir
@classmethod
def generateRecursorConfig(cls, confdir):
authzonepath = os.path.join(confdir, 'authzone.zone')
with open(authzonepath, 'w') as authzone:
authzone.write("""$ORIGIN authzone.example.
@ 3600 IN SOA {soa}
@ 3600 IN A 192.0.2.88
""".format(soa=cls._SOA))
super(testKeepOpenTCP, cls).generate | RecursorConfig(confdir)
def sendTCPQueryKeepOpen(cls, sock, query, timeout=2.0):
try:
wire = query.to_wire()
sock.send(struct.pack("!H", len(wire)))
sock.send(wire)
data = sock.recv(2)
if data:
(datalen,) = struct.unpack("!H", data)
data = sock.recv(datalen)
excep | t socket.timeout as e:
print("Timeout: %s" % (str(e)))
data = None
except socket.error as e:
print("Network error: %s" % (str(e)))
data = None
message = None
if data:
message = dns.message.from_wire(data)
return message
def testNoTrailingData(self):
count = 10
sock = [None] * count
expected = dns.rrset.from_text('ns.secure.example.', 0, dns.rdataclass.IN, 'A', '{prefix}.9'.format(prefix=self._PREFIX))
query = dns.message.make_query('ns.secure.example', 'A', want_dnssec=True)
query.flags |= dns.flags.AD
for i in range(count):
sock[i] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock[i].settimeout(2.0)
sock[i].connect(("127.0.0.1", self._recursorPort))
res = self.sendTCPQueryKeepOpen(sock[i], query)
self.assertMessageIsAuthenticated(res)
self.assertRRsetInAnswer(res, expected)
self.assertMatchingRRSIGInAnswer(res, expected)
sock[i].settimeout(0.1)
try:
data = sock[i].recv(1)
self.assertTrue(False)
except socket.timeout as e:
print("ok")
for i in range(count):
sock[i].settimeout(2.0)
res = self.sendTCPQueryKeepOpen(sock[i], query)
self.assertMessageIsAuthenticated(res)
self.assertRRsetInAnswer(res, expected)
self.assertMatchingRRSIGInAnswer(res, expected)
sock[i].settimeout(0.1)
try:
data = sock[i].recv(1)
self.assertTrue(False)
except socket.timeout as e:
print("ok")
for i in range(count):
sock[i].close()
|
KWARC/mwetoolkit | bin/combine_freqs.py | Python | gpl-3.0 | 12,449 | 0.01952 | #!/usr/bin/python
# -*- coding:UTF-8 -*-
################################################################################
#
# Copyright 2010-2012 Carlos Ramisch, Vitor De Araujo
#
# combine_freqs.py is part of mwetoolkit
#
# mwetoolkit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mwetoolkit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mwetoolkit. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
"""
This script combines several frequency sources. For instance, if each n-gram
was counted in three different frequency sources (corpora or web), it is
possible to combine them in a single frequency with a given combination
heuristic. Three combination heuristics are implemented: uniform, inverse
and backoff.
For more information, call the script with no parameter and read the
usage instructions.
"""
import sys
import math
from libs.base.frequency import Frequency
from libs.base.corpus_size import CorpusSize
from libs.util import usage, read_options, treat_options_simplest, verbose
from libs import filetype
################################################################################
# GLOBALS
usage_string = """Usage:
python {program} OPTIONS <candidates.xml>
The <candidates.xml> file must be valid XML (dtd/mwetoolkit-candidates.dtd).
OPTIONS may be:
-c <comb> OR --combination <comb>
The name of the frequency combination heuristics that will be calculated. If
the option is not defined, the script calculates all available combinations.
Combination names should be separated by colon ":" and should be in the list
of supported combination heuristics below:
uniform -- Same uniform weight 1/n for all n frequency sources
inverse -- Weight inversely proportional to the corpus size of the freq.
source
backoff -- If main_freq is below automatically calculated threshold, use the
web frequencies. A web freq. contains "google" or "yahoo" in its name.
-o <name> OR --original <name>
The name of the frequency source from which the candidates were extracted
originally. This is only necessary if you are using backoff to combine the
counts. In this case, you MUST define the original count source and it must
be a valid name described through a <corpussize> element in the meta header.
{common_options}
"""
supported_combination = [ "uniform", "inverse", "backoff" ]
corpussize_dict = {}
combination = supported_combination
################################################################################
def backoff_threshold( corpus_size ):
"""
Based on the corpus size, calculates automatically a threshold below
which the original frequency will be discarded and the mean of web
frequencies will be used instead.
@param corpus_size Integer with the size (nb. of word tokens) of
original frequency source
@return A threshold value below which the original count is replaced by
backed-off count.
"""
return math.log( float( corpus_size ) / 100000.0, 2 )
################################################################################
def web_freqs( freqs ) :
"""
Given a list of strings, returns a sub-list containing only those
strings that correspond to the names of Web-based counts. This is
totally hard-coded because there's no information in the freq element
that tells whether the count comes from a corpus or from the Web (this
should be easy to modify in the DTD and in the count script, though)
@param freqs A list of the names of all frequency sources in the file
@return A list containing a subset of the input list, corresponding to
the names of web frequencies.
"""
result = {}
for (name, freq) in freqs.items() :
if "yahoo" in name.lower() or "google" in name.lower() :
result[ name ] = freq
return result
################################################################################
def combine( method, freqs ):
"""
Generates a unique count using a given combination heuristic and a list
of original counts.
@param method A string with the name of the combination heuristic to
use. This name may be one of the following: "uniform", "inverse",
"backoff". All other values will be ignored.
@param freqs A list of integers containing the original wor | d counts. The
| list contains as many elements as there are frequency sources in the
candidates list.
@return A tuple cotaining (combined_count, backed_off). The former is a
float containing the combined count using a given method, the latter is
a boolean flag that indicates that the combined count was backed off.
"""
global corpussize_dict
global main_freq
# Weight of each corpus is its size
if method =="uniform" :
avg_count = float( sum( freqs.values() ) ) / len( freqs.values() )
return ( avg_count, False )
# Corpora have all the same weight, frequencies are 0..1
elif method == "inverse" :
result = 0.0
total_size = float( sum( corpussize_dict.values() ) )
for ( name, freq ) in freqs.items() :
weight = ( ( total_size - corpussize_dict[ name ] ) / total_size )
result += weight * freq
return ( result, False )
elif method == "backoff" :
for (name, freq ) in freqs.items() :
if name == main_freq :
if freq < backoff_threshold( corpussize_dict[ name ] ) :
backed_off = True
w_freqs = web_freqs( freqs )
# The minus is to signal that we backed off. It will be
# ignored since abs value is taken to calculate association
# measures. However, it is important that the association
# measures script knows that this is a back-off, since the
# value of N is different for "backed off" and "did not back
# off".
avg_web = - ( sum(w_freqs.values()) / len(w_freqs.values()))
return ( avg_web, backed_off )
else :
backed_off = False
return ( freq, backed_off )
################################################################################
class FreqCombinerHandler(filetype.ChainedInputHandler):
def before_file(self, fileobj, info={}):
if not self.chain:
self.chain = self.make_printer(info, None)
self.chain.before_file(fileobj, info)
def handle_meta(self, meta, info={}) :
"""
Adds new meta-features corresponding to the new frequency sources that
are being added to the corpus. The new corpus sizes are calculated based
on the combination heuristics.
@param meta The `Meta` header that is being read from the XML file.
"""
global corpussize_dict, combination
for corpus_size in meta.corpus_sizes :
corpussize_dict[ corpus_size.name ] = float(corpus_size.value)
for comb in combination :
if comb == "backoff" :
w_freqs = web_freqs( corpussize_dict )
combined = int( combine( "uniform", w_freqs )[ 0 ] )
meta.add_corpus_size( CorpusSize( comb, combined ) )
else :
combined = int( combine( comb, corpussize_dict )[ 0 ] )
meta.a |
anhstudios/swganh | data/scripts/templates/object/tangible/wearables/shirt/shared_shirt_s16.py | Python | mit | 478 | 0.031381 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS | MAY BE LOST IF DONE IMPROPERLY
#### PLEA | SE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/shirt/shared_shirt_s16.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","shirt_s16")
#### BEGIN MODIFICATIONS ####
result.max_condition = 1000
#### END MODIFICATIONS ####
return result
|
zvolsky/akce | controllers/keys.py | Python | agpl-3.0 | 1,007 | 0.004965 | # -*- coding: utf-8 -*-
@auth.requires_membership('admin')
def starts():
grid = SQLFORM.grid(db.typ_zacatku,
showbuttontext=False)
return dict(grid=grid)
@auth.requires_membership('admin')
def contacts():
grid = SQLFORM.grid(db.typ_kontaktu,
showbuttontext=False)
return dict(grid=grid)
@auth.req | uires_membership('admin')
def places():
grid = SQLFORM.grid(db | .typ_mista,
showbuttontext=False)
return dict(grid=grid)
@auth.requires_membership('admin')
def links():
grid = SQLFORM.grid(db.typ_odkaz,
showbuttontext=False)
return dict(grid=grid)
@auth.requires_membership('admin')
def uploads():
grid = SQLFORM.grid(db.typ_upload,
showbuttontext=False)
return dict(grid=grid)
@auth.requires_membership('admin')
def partitipations():
grid = SQLFORM.grid(db.typ_ucasti,
showbuttontext=False)
return dict(grid=grid)
|
liaozhida/liaozhida.github.io | _posts/pythonbak/Atest.py | Python | apache-2.0 | 1,140 | 0.073345 | # -*- coding: utf-8 -*-
import json
import os
import re
def jsonTokv():
file = open('zhihu_cookies', 'r')
try:
cookies = json.load(file)
# print len(cookies)
e | xcept ValueError,e:
print 'cache-cookie is None'
cookiesStr = ''
for key in cookies:
cookiesStr += key+'='+cookies[key]+';'
print c | ookiesStr[0:-1]
return cookiesStr[0:-1]
def jsonDelete():
draftData = {
"do": "saveArticle",
"type": "1",
"title": "如何正确的发布md",
"text": "# 这是标题",
"weibo": "0",
"blogId": "0",
"aticleId": "",
"id": "",
"tags[]": "1040000000366352",
"url": ""
}
del draftData['do']
print draftData
def demo():
file = open('demo.md').read()
print file[0:1000]
print '----------------'
pattern = re.compile(r"---(\n(.{0,}))*---")
print re.sub(pattern,'tihuan',file[0:1000])
# print 'result is : ' + result
# print len(result)
# map = {}
# map['a'] = 1
# print map
#
# print 'test:'
# for line in os.listdir('../'):
# print line
# print os.path.abspath(os.path.join('/Users/zhidaliao/Desktop/zhida_blog/_posts/docker',line))
# jsonTokv()
# jsonDelete()
demo()
|
ashishthedev/gae-django-skeleton | src/project_name/settings/gae.py | Python | mit | 2,540 | 0.009449 | #!/usr/bin/env python
import os
# Load production settings when running on GAE or SETTINGS_MODE is prod
# else, load loc | al setting | s
if (os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine') or os.getenv('SETTINGS_MODE') == 'prod'):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_name.settings.production")
from production import *
########## DATABASE CONFIGURATION
# TODO: Enter your application id below. If you have signed up
if os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine'):
# Running on production App Engine, so use a Google Cloud SQL database.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '/cloudsql/your-application-id-here:cloud-sql-instance-name-here',
'NAME': 'database-name-here',
'USER': 'root',
}
}
elif os.getenv('SETTINGS_MODE') == 'prod':
# Running in development, but want to access the Google Cloud SQL instance
# in production.
DATABASES = {
'default': {
'ENGINE': 'google.appengine.ext.django.backends.rdbms',
'INSTANCE': 'your-application-id-here:cloud-sql-instance-name-here',
'NAME': 'database-name-here',
'USER': 'root',
}
}
########## END DATABASE CONFIGURATION
########## EMAIL CONFIGURATION
EMAIL_BACKEND = 'gae_django.mail.EmailBackend'
########## END EMAIL CONFIGURATION
########## HOST CONFIGURATION
# TODO: Enter your application id below. If you have signed up
# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = [
'your-application-id-here.appspot.com',
]
########## END HOST CONFIGURATION
else:
from local import *
########## STORAGE CONFIGURATION
# See: https://github.com/ckopanos/django-google-cloud-storage
DEFAULT_FILE_STORAGE = 'gae_django.storage.googleCloud.GoogleCloudStorage'
# TODO: Enter your bucket name below. If you have signed up
GOOGLE_CLOUD_STORAGE_BUCKET = '/name-of-your-bucket-here' # the name of the bucket you have created from the google cloud storage console
GOOGLE_CLOUD_STORAGE_URL = 'http://storage.googleapis.com/bucket' #whatever the url for accessing your cloud storgage bucket
GOOGLE_CLOUD_STORAGE_DEFAULT_CACHE_CONTROL = 'public, max-age: 7200' # default cache control headers for your files
########## END STORAGE CONFIGURATION |
UltrosBot/Ultros3K | src/ultros/networks/irc/connectors/plain.py | Python | artistic-2.0 | 310 | 0.003226 | # coding=utf-8
import asyncio
from ultros.networks.irc.connectors.base import BaseIRCConnector
__author__ = "Gareth Coles"
class PlainIRCConnector(BaseIRCConnector | ):
async def do_connect(self):
transport | , _ = await asyncio.get_event_loop().create_connection(lambda: self, self.host, self.port)
|
jtpaasch/armyguys | armyguys/aws/ecs/taskdefinition.py | Python | mit | 3,013 | 0 | # -*- coding: utf-8 -*-
"""Utilities for working with ECS task definitions."""
import json
import os
from .. import client as boto3client
def create(profile, contents=None, filepath=None):
"""Upload a task definition to ECS.
Args:
profile
A profile to connect to AWS with.
contents
The contents of the task definition you want to upload.
You must specify this OR a filepath.
filepath
The path to a task definition *.json file you want to upload.
You must specify this OR a filepath.
Returns:
The data returned by boto3.
"""
if contents:
data = contents
elif filepath:
norm_path = os.path.normpath(filepath)
normpath = norm_path.rstrip(os.path.sep)
with open(filepath) as f:
data = json.load(f)
client = boto3client.get("ecs", profile)
params = {}
params["family"] = data.get("family")
params["contain | erDefinitions"] = data.get("containerDefinitions")
params["volumes"] = data.get("volumes")
if params["volumes"] is None:
params["volumes"] = []
return client.register_task_definition(**params)
def delete(profile, name):
"""Delete an ECS task definition.
Args:
profile
A profil | e to connect to AWS with.
name
The full name, i.e., family:revision.
Returns:
The data returned by boto3.
"""
client = boto3client.get("ecs", profile)
params = {}
params["taskDefinition"] = name
return client.deregister_task_definition(**params)
def get_arns(profile, family=None):
"""Get ECS task definition arns.
Args:
profile
A profile to connect to AWS with.
family
A family of task definitions to get.
Returns:
A list of data returned by boto3.
"""
client = boto3client.get("ecs", profile)
params = {}
if family:
params["familyPrefix"] = family
return client.list_task_definitions(**params)
def get_families(profile, family=None):
"""Get ECS task definition families.
Args:
profile
A profile to connect to AWS with.
family
A family of task definitions to get.
Returns:
A list of data returned by boto3.
"""
client = boto3client.get("ecs", profile)
params = {}
if family:
params["familyPrefix"] = family
return client.list_task_definition_families(**params)
def get(profile, task_definition):
"""Get an ECS task definition.
Args:
profile
A profile to connect to AWS with.
task_definition
A task definition to get, specified by its full name,
i.e., family:revision.
Returns:
The data returned by boto3.
"""
client = boto3client.get("ecs", profile)
params = {}
params["taskDefinition"] = task_definition
return client.describe_task_definition(**params)
|
spulec/moto | tests/test_dax/test_dax.py | Python | apache-2.0 | 19,374 | 0.001032 | """Unit tests for dax-supported APIs."""
import boto3
import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
from moto import mock_dax
from moto.core import ACCOUNT_ID
# See our Development Tips on writing tests for hints on how to write good tests:
# http://docs.getmoto.org/en/latest/docs/contributing/development_tips/tests.html
@mock_dax
def test_create_cluster_minimal():
client = boto3.client("dax", region_name="us-east-2")
iam_role_arn = f"arn:aws:iam::{ACCOUNT_ID}:role/aws-service-role/dax.amazonaws.com/AWSServiceRoleForDAX"
created_cluster = client.create_cluster(
ClusterName="daxcluster",
NodeType="dax.t3.small",
ReplicationFactor=3,
IamRoleArn=iam_role_arn,
)["Cluster"]
described_cluster = client.describe_clusters(ClusterNames=["daxcluster"])[
"Clusters"
][0]
for cluster in [created_cluster, described_cluster]:
cluster["ClusterName"].should.equal("daxcluster")
cluster["ClusterArn"].should.equal(
f"arn:aws:dax:us-east-2:{ACCOUNT_ID}:cache/daxcluster"
)
cluster["TotalNodes"].should.equal(3)
cluster["ActiveNodes"].should.equal(0)
cluster["NodeType"].should.equal("dax.t3.small")
cluster["Status"].should.equal("creating")
cluster["ClusterDiscoveryEndpoint"].should.equal({"Port": 8111})
cluster["PreferredMaintenanceWindow"].should.equal("thu:23:30-fri:00:30")
cluster["SubnetGroup"].should.equal("default")
cluster["SecurityGroups"].should.have.length_of(1)
cluster["IamRoleArn"].should.equal(iam_role_arn)
cluster.should.have.key("ParameterGroup")
cluster["ParameterGroup"].should.have.key("ParameterGroupName").equals(
"default.dax1.0"
)
cluster["SSEDescription"].should.equal({"Status": "DISABLED"})
cluster.should.have.key("ClusterEndpointEncryptionType").equals("NONE")
@mock_dax
def test_create_cluster_description():
client = boto3.client("dax", region_name="us-east-2")
iam_role_arn = f"arn:aws:iam::{ACCOUNT_ID}:role/aws-service-role/dax.amazonaws.com/AWSServiceRoleForDAX"
created_cluster = client.create_cluster(
ClusterName="daxcluster",
Description="my cluster",
NodeType="dax.t3.small",
ReplicationFactor=3,
IamRoleArn=iam_role_arn,
)["Cluster"]
described_cluster = client.describe_clusters(ClusterNames=["daxcluster"])[
"Clusters"
][0]
for cluster in [created_cluster, described_cluster]:
cluster["ClusterName"].should.equal("daxcluster")
cluster["Description"].should.equal("my cluster")
@mock_dax
def test_create_cluster_with_sse_enabled():
client = boto3.client("dax", region_name="us-east-2")
iam_role_arn = f"arn:aws:iam::{ACCOUNT_ID}:role/aws-service-role/dax.amazonaws.com/AWSServiceRoleForDAX"
created_cluster = client.create_cluster(
ClusterName="daxcluster",
NodeType="dax.t3.small",
ReplicationFactor=3,
IamRoleArn=iam_role_arn,
SSESpecification={"Enabled": True},
)["Cluster"]
described_cluster = client.describe_clusters(ClusterNames=["daxcluster"])[
"Clusters"
][0]
for cluster in [created_cluster, described_cluster]:
cluster["ClusterName"].should.equal("daxcluster")
cluster["SSEDescription"].should.equal({"Status": "ENABLED"})
@mock_dax
def test_create_cluster_invalid_arn():
client = boto3.client("dax", region_name="eu-west-1")
with pytest.raises(ClientError) as exc:
client.create_cluster(
ClusterName="1invalid",
NodeType="dax.t3.small",
ReplicationFactor=3,
IamRoleArn="n/a",
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterValueException")
err["Message"].should.equal("ARNs must start with 'arn:': n/a")
@mock_dax
def test_create_cluster_invalid_arn_no_partition():
client = boto3.client("dax", region_name="eu-west-1")
with pytest.raises(ClientError) as exc:
client.create_cluster(
ClusterName="1invalid",
NodeType="dax.t3.small",
ReplicationFactor=3,
IamRoleArn="arn:sth",
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterValueException")
err["Message"].should.equal("Second colon partition not found: arn:sth")
@mock_dax
def test_create_cluster_invalid_arn_no_vendor():
client = boto3.client("dax", region_name="eu-west-1")
with pytest.raises(ClientError) as exc:
client.create_cluster(
ClusterName="1invalid",
NodeType="dax.t3.small",
ReplicationFactor=3,
IamRoleArn="arn:sth:aws",
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterValueException")
err["Message"].should.equal("Third colon vendor not found: arn:sth:aws")
@mock_dax
def test_create_cluster_invalid_arn_no_region():
client = boto3.client("dax", region_name="eu-west-1")
with pytest.raises(ClientError) as exc:
client.create_cluster(
ClusterName="1invalid",
NodeType="dax.t3.small",
ReplicationFactor=3,
IamRol | eArn="arn:sth:aws:else",
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterValueException")
err["Message"].should.equal(
"Fourth colon (region/namespace delimiter) not found: arn:sth:aws:else"
)
@mock_dax
def test_create_cluster_invalid_arn_no_namespace():
client = boto3.client("dax", region_name="eu-west-1")
with pytest.raises(ClientError) as exc:
client.creat | e_cluster(
ClusterName="1invalid",
NodeType="dax.t3.small",
ReplicationFactor=3,
IamRoleArn="arn:sth:aws:else:eu-west-1",
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterValueException")
err["Message"].should.equal(
"Fifth colon (namespace/relative-id delimiter) not found: arn:sth:aws:else:eu-west-1"
)
@mock_dax
@pytest.mark.parametrize(
"name", ["1invalid", "iИvalid", "in_valid", "invalid-", "in--valid"]
)
def test_create_cluster_invalid_name(name):
client = boto3.client("dax", region_name="eu-west-1")
with pytest.raises(ClientError) as exc:
client.create_cluster(
ClusterName=name,
NodeType="dax.t3.small",
ReplicationFactor=3,
IamRoleArn="arn:aws:iam::486285699788:role/apigatewayrole",
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterValueException")
err["Message"].should.equal(
"Cluster ID specified is not a valid identifier. Identifiers must begin with a letter; must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens."
)
@mock_dax
@pytest.mark.parametrize(
"name", ["1invalid", "iИvalid", "in_valid", "invalid-", "in--valid"]
)
def test_describe_clusters_invalid_name(name):
client = boto3.client("dax", region_name="eu-west-1")
with pytest.raises(ClientError) as exc:
client.describe_clusters(ClusterNames=[name])
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterValueException")
err["Message"].should.equal(
"Cluster ID specified is not a valid identifier. Identifiers must begin with a letter; must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens."
)
@mock_dax
def test_delete_cluster_unknown():
client = boto3.client("dax", region_name="eu-west-1")
with pytest.raises(ClientError) as exc:
client.delete_cluster(ClusterName="unknown")
err = exc.value.response["Error"]
err["Code"].should.equals("ClusterNotFoundFault")
err["Message"].should.equal("Cluster not found.")
@mock_dax
def test_delete_cluster():
client = boto3.client("dax", region_name="eu-west-1")
iam_role_arn = f"arn:aws:iam::{ACCOUNT |
keras-team/keras | keras/integration_test/gradient_checkpoint_test.py | Python | apache-2.0 | 6,728 | 0.012931 | # Copyright 2020 The TensorFlow Author | s. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF | ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import gc
import tensorflow.compat.v2 as tf
from tensorflow.python.framework import test_util as tf_test_utils # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.platform import test as test_lib
layers = tf.keras.layers
optimizers = tf.keras.optimizers
def _get_big_cnn_model(img_dim, n_channels, num_partitions,
blocks_per_partition):
"""Creates a test model whose activations are significantly larger than model size."""
model = tf.keras.Sequential()
model.add(layers.Input(shape=(img_dim, img_dim, n_channels)))
for _ in range(num_partitions):
for _ in range(blocks_per_partition):
model.add(layers.Conv2D(10, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Conv2D(40, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Conv2D(20, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Flatten())
model.add(layers.Dense(32, activation=tf.nn.relu))
model.add(layers.Dense(10))
return model
def _get_split_cnn_model(img_dim, n_channels, num_partitions,
blocks_per_partition):
"""Creates a test model that is split into `num_partitions` smaller models."""
models = [tf.keras.Sequential() for _ in range(num_partitions)]
models[0].add(layers.Input(shape=(img_dim, img_dim, n_channels)))
for i in range(num_partitions):
model = models[i]
if i > 0:
last_shape = models[i - 1].layers[-1].output_shape
model.add(layers.Input(shape=last_shape[1:]))
for _ in range(blocks_per_partition):
model.add(layers.Conv2D(10, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Conv2D(40, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
model.add(layers.Conv2D(20, 5, padding='same', activation=tf.nn.relu))
model.add(layers.MaxPooling2D((1, 1), padding='same'))
models[-1].add(layers.Flatten())
models[-1].add(layers.Dense(32, activation=tf.nn.relu))
models[-1].add(layers.Dense(10))
return models
def _compute_loss(logits, labels):
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
def _limit_gpu_memory():
"""Helper function to limit GPU memory for testing."""
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)])
return True
return False
def _get_dummy_data(img_dim, n_channels, batch_size):
inputs = tf.ones([batch_size, img_dim, img_dim, n_channels])
labels = tf.ones([batch_size], dtype=tf.int64)
return inputs, labels
def _train_no_recompute(n_steps):
"""Trains a single large model without gradient checkpointing."""
img_dim, n_channels, batch_size = 256, 1, 4
x, y = _get_dummy_data(img_dim, n_channels, batch_size)
model = _get_big_cnn_model(
img_dim, n_channels, num_partitions=3, blocks_per_partition=2)
optimizer = optimizers.SGD()
losses = []
tr_vars = model.trainable_variables
for _ in range(n_steps):
with tf.GradientTape() as tape:
logits = model(x)
loss = _compute_loss(logits, y)
losses.append(loss)
grads = tape.gradient(loss, tr_vars) # tr_vars
optimizer.apply_gradients(zip(grads, tr_vars))
del grads
return losses
def _train_with_recompute(n_steps):
"""Trains a single large model with gradient checkpointing using tf.recompute_grad."""
img_dim, n_channels, batch_size = 256, 1, 4
x, y = _get_dummy_data(img_dim, n_channels, batch_size)
# This model is the same model as _get_big_cnn_model but split into 3 parts.
models = _get_split_cnn_model(
img_dim, n_channels, num_partitions=3, blocks_per_partition=2)
model1, model2, model3 = models
# Apply gradient checkpointing to the submodels using tf.recompute_grad.
model1_re = tf.recompute_grad(model1)
model2_re = tf.recompute_grad(model2)
model3_re = tf.recompute_grad(model3)
optimizer = optimizers.SGD()
tr_vars = (
model1.trainable_variables + model2.trainable_variables +
model3.trainable_variables)
losses = []
for _ in range(n_steps):
with tf.GradientTape() as tape:
logits1 = model1_re(x)
logits2 = model2_re(logits1)
logits3 = model3_re(logits2)
loss = _compute_loss(logits3, y)
losses.append(loss)
grads = tape.gradient(loss, tr_vars) # tr_vars
optimizer.apply_gradients(zip(grads, tr_vars))
del grads
return losses
@tf_test_utils.with_eager_op_as_function
class GradientCheckpointTest(tf.test.TestCase):
def test_raises_oom_exception(self):
if not _limit_gpu_memory():
self.skipTest('No virtual GPUs found')
with self.assertRaises(Exception) as context:
_train_no_recompute(1)
self.assertIsInstance(context.exception, tf.errors.ResourceExhaustedError)
@tf_test_utils.disable_xla(
'xla does not support searching for memory-limited solvers.')
def test_does_not_raise_oom_exception(self):
if not _limit_gpu_memory():
self.skipTest('No virtual GPUs found')
if test_lib.is_built_with_rocm():
self.skipTest(
'ROCm MIOpen does not support searching for memory-limited'
'solvers yet so skip the subtest which would result in OOM.')
n_step = 2
losses = _train_with_recompute(n_step)
self.assertLen(losses, n_step)
def tearDown(self):
super(GradientCheckpointTest, self).tearDown()
# Make sure all the models created in keras has been deleted and cleared
# from the global keras grpah, also do a force GC to recycle the GPU memory.
tf.keras.backend.clear_session()
gc.collect()
if __name__ == '__main__':
tf.test.main()
|
webjunkie/python-social-auth | social/apps/django_app/default/migrations/0003_alter_email_max_length.py | Python | bsd-3-clause | 548 | 0 | # -*- coding: utf-8 -*-
fro | m __future__ import unicode_literals
from django.conf import settings
from django.db import models, migrations
from social.utils import setting_name
EMAIL_LENGTH = getattr(settings, setting_name('EMAIL_LENGTH'), 254)
class Migration(migrations.Migration):
dependencies = [
('default', '0002_add_relat | ed_name'),
]
operations = [
migrations.AlterField(
model_name='code',
name='email',
field=models.EmailField(max_length=EMAIL_LENGTH),
),
]
|
ecell/ecell3 | ecell/frontend/model-editor/ecell/ui/model_editor/NestedListEditor.py | Python | lgpl-3.0 | 4,695 | 0.017891 | #::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2016 Keio University
# Copyright (C) 2008-2016 RIKEN
# Copyright (C) 2005-2009 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
#
#'Design: Gabor Bereczki <gabor@e-cell.org>',
#'Design and application Framework: Koichi Takahashi <shafi@e-cell.org>',
#'Programming: Gabor Bereczki' at
# E-CELL Project, Lab. for Bioinformatics, Keio University.
#
im | port os
import os.path
import sys
import gtk
impo | rt gobject
from ecell.ui.model_editor.Utils import *
from ecell.ui.model_editor.Constants import *
from ecell.ui.model_editor.ModelEditor import *
from ecell.ui.model_editor.ViewComponent import *
class BadNestedList( Exception ):
def __init__( self, badString ):
self.args = "%s\n cannot be parsed as nestedlist!"%badString
class NestedListEditor(ViewComponent):
#######################
# GENERAL CASES #
#######################
def __init__( self, aParentWindow, pointOfAttach ):
self.theParentWindow = aParentWindow
# call superclass
ViewComponent.__init__( self, pointOfAttach, 'attachment_box' )
self.theNestedList = copyValue( self.theParentWindow.thePropertyValue )
self.theTextView = self['textview']
self.textBuffer = gtk.TextBuffer()
self.theTextView.set_buffer( self.textBuffer )
self.textBuffer.set_text( self.__nestedListToString( self.theNestedList,0 ) )
def getValue( self ):
aText = self.textBuffer.get_text( self.textBuffer.get_start_iter(), self.textBuffer.get_end_iter())
try:
aValue= self.__stringToNestedList( aText)
except BadNestedList:
self.theParentWindow.theModelEditor.printMessage( ''.join(sys.exc_value), ME_ERROR )
aValue = None
return aValue
def __nestedListToString( self, aNestedList, level = 1 ):
if type(aNestedList ) == type(''):
return aNestedList
stringList = []
for aSubList in aNestedList:
stringList.append( self.__nestedListToString( aSubList ) )
if level == 0:
separator = '\n,'
else:
separator = ', '
return '( ' + separator.join( stringList ) + ' ) '
def __stringToNestedList( self, aString ):
# should return a nestedlist if string format is OK
# should return None if string format is not OK, should display an error message in this case.
aString=aString.strip()
# decide whether list or string
if aString.__contains__(',') or aString.__contains__('(') or aString.__contains__(')'):
#must be list
if not (aString.startswith('(') and aString.endswith(')') ):
raise BadNestedList( aString )
stringList = self.__split(aString[1:len(aString)-1].strip())
parsedList = map( self.__stringToNestedList, stringList )
if len(parsedList) == 1 and type( parsedList[0]) != type(parsedList ):
return stringList[0]
return parsedList
else:
return aString
def __split( self, aString ):
openPara = 0
returnList = []
actualWord = ''
for aChar in aString:
if aChar == ',' and openPara == 0:
returnList.append( actualWord )
actualWord = ''
elif aChar == '(':
openPara +=1
actualWord += aChar
elif aChar == ')':
openPara -=1
actualWord += aChar
else:
actualWord += aChar
if openPara!=0:
raise BadNestedList( aString )
returnList.append( actualWord )
return returnList
|
peiwei/zulip | zerver/views/__init__.py | Python | apache-2.0 | 58,738 | 0.006742 | from __future__ import absolute_import
from typing import Any
from django.utils.translation import ugettext as _
from django.conf import settings
from django.contrib.auth import authenticate, login, get_backends
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse
from django.shortcuts import redirect
from django.template import RequestContext, loader
from django.utils.timezone import now
from django.utils.cache import patch_cache_control
from django.core.exceptions import ValidationError
from django.core import validators
from django.contrib.auth.views import login as django_login_page, \
logout_then_login as django_logout_then_login
from django.forms.models import model_to_dict
from django.core.mail import send_mail
from django.middleware.csrf import get_token
from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \
Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmAlias, \
RealmFilter, \
PreregistrationUser, get_client, MitUser, UserActivity, PushDeviceToken, \
get_stream, UserPresence, get_recipi | ent, \
split_email_to_domain, resolve_email_to_domain, email_to_username, get_realm, \
completely_open, get_unique_open_realm, remote_user_to_email, email_allowed_for_realm
from zerver.lib.actions import do_change_password, do_change_full_name, d | o_change_is_admin, \
do_activate_user, do_create_user, \
internal_send_message, update_user_presence, do_events_register, \
get_status_dict, do_change_enable_offline_email_notifications, \
do_change_enable_digest_emails, do_set_realm_name, do_set_realm_restricted_to_domain, \
do_set_realm_invite_required, do_set_realm_invite_by_admins_only, \
do_set_realm_create_stream_by_admins_only, get_default_subs, \
user_email_is_unique, do_invite_users, do_refer_friend, compute_mit_user_fullname, \
do_set_muted_topics, clear_followup_emails_queue, do_update_pointer, realm_user_count
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.forms import RegistrationForm, HomepageForm, ToSForm, \
CreateUserForm, is_inactive, OurAuthenticationForm
from django.views.decorators.csrf import csrf_exempt
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from zerver.lib import bugdown
from zerver.lib.validator import check_string, check_list, check_bool
from zerver.decorator import require_post, authenticated_json_post_view, \
has_request_variables, authenticated_json_view, to_non_negative_int, \
JsonableError, get_user_profile_by_email, REQ, require_realm_admin, \
zulip_login_required
from zerver.lib.avatar import avatar_url
from zerver.lib.upload import upload_message_image_through_web_client, \
get_signed_upload_url, get_realm_for_filename
from zerver.lib.response import json_success, json_error
from zerver.lib.utils import statsd, generate_random_token
from zproject.backends import password_auth_enabled, dev_auth_enabled
from confirmation.models import Confirmation
import requests
import subprocess
import calendar
import datetime
import ujson
import simplejson
import re
from six.moves import urllib
import base64
import time
import logging
import jwt
import hashlib
import hmac
from zproject.jinja2 import render_to_response
from zerver.lib.rest import rest_dispatch as _rest_dispatch
rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs)))
def name_changes_disabled(realm):
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
@require_post
def accounts_register(request):
key = request.POST['key']
confirmation = Confirmation.objects.get(confirmation_key=key)
prereg_user = confirmation.content_object
email = prereg_user.email
mit_beta_user = isinstance(confirmation.content_object, MitUser)
try:
existing_user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
existing_user_profile = None
validators.validate_email(email)
unique_open_realm = get_unique_open_realm()
if unique_open_realm:
realm = unique_open_realm
domain = realm.domain
elif not mit_beta_user and prereg_user.referred_by:
# If someone invited you, you are joining their realm regardless
# of your e-mail address.
#
# MitUsers can't be referred and don't have a referred_by field.
realm = prereg_user.referred_by.realm
domain = realm.domain
if not email_allowed_for_realm(email, realm):
return render_to_response("zerver/closed_realm.html", {"closed_domain_name": realm.name})
elif not mit_beta_user and prereg_user.realm:
# You have a realm set, even though nobody referred you. This
# happens if you sign up through a special URL for an open
# realm.
domain = prereg_user.realm.domain
realm = get_realm(domain)
else:
domain = resolve_email_to_domain(email)
realm = get_realm(domain)
if realm and realm.deactivated:
# The user is trying to register for a deactivated realm. Advise them to
# contact support.
return render_to_response("zerver/deactivated.html",
{"deactivated_domain_name": realm.name,
"zulip_administrator": settings.ZULIP_ADMINISTRATOR})
try:
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
# Mirror dummy users to be activated must be inactive
is_inactive(email)
else:
# Other users should not already exist at all.
user_email_is_unique(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' + urllib.parse.quote_plus(email))
name_validated = False
full_name = None
if request.POST.get('from_confirmation'):
try:
del request.session['authenticated_full_name']
except KeyError:
pass
if domain == "mit.edu":
hesiod_name = compute_mit_user_fullname(email)
form = RegistrationForm(
initial={'full_name': hesiod_name if "@" not in hesiod_name else ""})
name_validated = True
elif settings.POPULATE_PROFILE_VIA_LDAP:
for backend in get_backends():
if isinstance(backend, LDAPBackend):
ldap_attrs = _LDAPUser(backend, backend.django_to_ldap_username(email)).attrs
try:
request.session['authenticated_full_name'] = ldap_attrs[settings.AUTH_LDAP_USER_ATTR_MAP['full_name']][0]
name_validated = True
# We don't use initial= here, because if the form is
# complete (that is, no additional fields need to be
# filled out by the user) we want the form to validate,
# so they can be directly registered without having to
# go through this interstitial.
form = RegistrationForm(
{'full_name': request.session['authenticated_full_name']})
# FIXME: This will result in the user getting
# validation errors if they have to enter a password.
# Not relevant for ONLY_SSO, though.
break
except TypeError:
# Let the user fill out a name and/or try another backend
form = RegistrationForm()
elif 'full_name' in request.POST:
form = RegistrationForm(
initial={'full_name': request.POST.get('full_name')}
)
else:
form = RegistrationForm()
else:
postdata = request.POST.copy()
if name_changes_disabled(realm):
# If we populate profile information via LDAP and we have a
# verified name from you on file, use that. Otherwise, fall
|
citrix-openstack-build/os-brick | os_brick/tests/test_exception.py | Python | apache-2.0 | 2,260 | 0 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s | oftware
# d | istributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from os_brick import exception
from os_brick.tests import base
class BrickExceptionTestCase(base.TestCase):
def test_default_error_msg(self):
class FakeBrickException(exception.BrickException):
message = "default message"
exc = FakeBrickException()
self.assertEqual(six.text_type(exc), 'default message')
def test_error_msg(self):
self.assertEqual(six.text_type(exception.BrickException('test')),
'test')
def test_default_error_msg_with_kwargs(self):
class FakeBrickException(exception.BrickException):
message = "default message: %(code)s"
exc = FakeBrickException(code=500)
self.assertEqual(six.text_type(exc), 'default message: 500')
def test_error_msg_exception_with_kwargs(self):
class FakeBrickException(exception.BrickException):
message = "default message: %(mispelled_code)s"
exc = FakeBrickException(code=500)
self.assertEqual(six.text_type(exc),
'default message: %(mispelled_code)s')
def test_default_error_code(self):
class FakeBrickException(exception.BrickException):
code = 404
exc = FakeBrickException()
self.assertEqual(exc.kwargs['code'], 404)
def test_error_code_from_kwarg(self):
class FakeBrickException(exception.BrickException):
code = 500
exc = FakeBrickException(code=404)
self.assertEqual(exc.kwargs['code'], 404)
|
bitcoinfees/bitcoin-feemodel | feemodel/tests/test_txrate.py | Python | mit | 6,229 | 0 | from __future__ import division
import os
import unittest
import threading
import logging
from random import expovariate, random
from math import log
from feemodel.tests.config import (test_memblock_dbfile as dbfile, txref,
tmpdatadir_context)
from feemodel.txmempool import MemBlock, MemEntry
from feemodel.estimate import RectEstimator, ExpEstimator
from feemodel.simul.simul import SimMempool
logging.basicConfig(level=logging.DEBUG)
class RectEstimatorTest(unittest.TestCase):
def setUp(self):
self.blockrange = (333931, 333954)
def test_basic(self):
print("Starting RectEstimator test")
tr = RectEstimator(maxsamplesize=10000)
tr.start(self.blockrange, dbfile=dbfile)
print(repr(tr))
uniquetxs = set([(tx.feerate, tx.size) for tx in tr.txsample])
print("unique ratio is {}".format(len(uniquetxs) / len(tr.txsample)))
print(tr)
def test_limit_sample(self):
maxsamplesize = 1000
tr = RectEstimator(maxsamplesize=maxsamplesize)
tr.start(self.blockrange, dbfile=dbfile)
print(repr(tr))
print(tr)
def test_stop(self):
tr = RectEstimator(maxsamplesize=1000)
stopflag = threading.Event()
threading.Timer(0.01, stopflag.set).start()
self.assertRaises(StopIteration, tr.start, self.blockrange,
stopflag=stopflag, dbfile=dbfile)
class ExpEstimatorTest(unittest.TestCase):
def setUp(self):
self.blockrange = (333931, 333954)
def test_basic(self):
print("Starting ExpEstimator test")
tr = ExpEstimator(3600)
tr.start(self.blockrange[1]-1, dbfile=dbfile)
print(repr(tr))
uniquetxs = set([(tx.feerate, tx.size) for tx in tr.txsample])
print("unique ratio is {}".format(len(uniquetxs) / len(tr.txsample)))
print(tr)
def test_stop(self):
tr = ExpEstimator(3600)
stopflag = threading.Event()
threading.Timer(0.01, stopflag.set).start()
with self.assertRaises(StopIteration):
tr.start(self.blockrange[1]-1, stopflag=stopflag, dbfile=dbfile)
class SamplingTest(unittest.TestCase):
'''Generate and re-estimate.'''
def test_A(self):
TESTFEERATES = range(0, 55000, 5000)
# _dum, txref_rates = txref.get_byterates(feerates=FEERATES)
refbyteratefn = txref.get_byteratefn()
with tmpdatadir_context() as datadir:
# RectEstimator
self.gen_blockrange = (0, 100)
self.tmpdbfile = os.path.join(datadir, '_tmp.db')
self.populate_testdb()
| tr = RectEstimator(maxsamplesize=100000)
print("Starting estimation from generated...")
tr.start(self.gen_blockrange, dbfile=self.tmpdbfile)
print("Rect estimation from generated:")
print("===============================")
print("Test:")
| print(repr(tr))
print(tr)
print("Ref:")
print(repr(txref))
print(txref)
# _dum, byterates = tr.get_byterates(feerates=FEERATES)
testbyteratefn = tr.get_byteratefn()
# for test, target in zip(byterates, txref_rates):
for feerate in TESTFEERATES:
test = testbyteratefn(feerate)
ref = refbyteratefn(feerate)
diff = abs(log(test) - log(ref))
self.assertLess(diff, 0.2)
print("Diff is {}".format(diff))
diff = abs(log(tr.txrate) - log(txref.txrate))
print("txrate log diff is {}".format(diff))
self.assertLess(diff, 0.1)
# ExpEstimator
tr = ExpEstimator(86400)
print("Starting estimation from generated...")
tr.start(self.gen_blockrange[-1]-1, dbfile=self.tmpdbfile)
print("Exp estimation from generated:")
print("===============================")
print("Test:")
print(repr(tr))
print(tr)
print("Ref:")
print(repr(txref))
print(txref)
# _dum, byterates = tr.get_byterates(feerates=FEERATES)
# for test, target in zip(byterates, txref_rates):
testbyteratefn = tr.get_byteratefn()
for feerate in TESTFEERATES:
test = testbyteratefn(feerate)
ref = refbyteratefn(feerate)
diff = abs(log(test) - log(ref))
self.assertLess(diff, 0.2)
print("Diff is {}".format(diff))
diff = abs(log(tr.txrate) - log(txref.txrate))
print("txrate log diff is {}".format(diff))
self.assertLess(diff, 0.1)
def populate_testdb(self):
t = 0
mempool = SimMempool({})
tx_emitter = txref.get_emitter(mempool)
print("txref is {}".format(txref))
for height in range(*self.gen_blockrange):
blockinterval = expovariate(1/600)
t += blockinterval
tx_emitter(blockinterval)
mempool_entries = mempool.get_entries()
entries = {}
for txid, entry in mempool_entries.items():
# Dummy fields
mementry = MemEntry()
mementry.startingpriority = 0
mementry.currentpriority = 0
mementry.fee = entry.feerate*entry.size
mementry.feerate = entry.feerate
mementry.leadtime = 0
mementry.isconflict = False
mementry.inblock = False
# Relevant fields
mementry.time = t - random()*blockinterval
mementry.height = height
entries[str(height)+txid] = mementry
mementry.size = entry.size
b = MemBlock()
b.height = height - 1
b.blockheight = height
b.time = t
b.blocksize = sum([
entry.size for entry in mempool_entries.values()])
b.entries = entries
b.write(self.tmpdbfile, 2000)
mempool.reset()
if __name__ == '__main__':
unittest.main()
|
sutartmelson/girder | girder/models/setting.py | Python | apache-2.0 | 15,828 | 0.001516 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific languag | e governing permissions and
# limitations under the License.
###############################################################################
from collections import OrderedDict
import cherrypy
import pymongo
import six
from ..constants import GIRDER_ROUTE_ID, GIRDER_STATIC_ROUTE_ID, SettingDefault, Sett | ingKey
from .model_base import Model, ValidationException
from girder import logprint
from girder.utility import config, plugin_utilities, setting_utilities
from girder.utility.model_importer import ModelImporter
from bson.objectid import ObjectId
class Setting(Model):
"""
This model represents server-wide configuration settings as key/value pairs.
"""
def initialize(self):
self.name = 'setting'
# We had been asking for an index on key, like so:
# self.ensureIndices(['key'])
# We really want the index to be unique, which could be done:
# self.ensureIndices([('key', {'unique': True})])
# We can't do it here, as we have to update and correct older installs,
# so this is handled in the reconnect method.
def reconnect(self):
"""
Reconnect to the database and rebuild indices if necessary. If a
unique index on key does not exist, make one, first discarding any
extant index on key and removing duplicate keys if necessary.
"""
super(Setting, self).reconnect()
try:
indices = self.collection.index_information()
except pymongo.errors.OperationFailure:
indices = []
hasUniqueKeyIndex = False
presentKeyIndices = []
for index in indices:
if indices[index]['key'][0][0] == 'key':
if indices[index].get('unique'):
hasUniqueKeyIndex = True
break
presentKeyIndices.append(index)
if not hasUniqueKeyIndex:
for index in presentKeyIndices:
self.collection.drop_index(index)
duplicates = self.collection.aggregate([{
'$group': {'_id': '$key',
'key': {'$first': '$key'},
'ids': {'$addToSet': '$_id'},
'count': {'$sum': 1}}}, {
'$match': {'count': {'$gt': 1}}}])
for duplicate in duplicates:
logprint.warning(
'Removing duplicate setting with key %s.' % (
duplicate['key']))
# Remove all of the duplicates. Keep the item with the lowest
# id in Mongo.
for duplicateId in sorted(duplicate['ids'])[1:]:
self.collection.delete_one({'_id': duplicateId})
self.collection.create_index('key', unique=True)
def validate(self, doc):
"""
This method is in charge of validating that the setting key is a valid
key, and that for that key, the provided value is valid. It first
allows plugins to validate the setting, but if none of them can, it
assumes it is a core setting and does the validation here.
"""
key = doc['key']
validator = setting_utilities.getValidator(key)
if validator:
validator(doc)
else:
raise ValidationException('Invalid setting key "%s".' % key, 'key')
return doc
def get(self, key, default='__default__'):
"""
Retrieve a setting by its key.
:param key: The key identifying the setting.
:type key: str
:param default: If no such setting exists, returns this value instead.
:returns: The value, or the default value if the key is not found.
"""
setting = self.findOne({'key': key})
if setting is None:
if default is '__default__':
default = self.getDefault(key)
return default
else:
return setting['value']
def set(self, key, value):
"""
Save a setting. If a setting for this key already exists, this will
replace the existing value.
:param key: The key identifying the setting.
:type key: str
:param value: The object to store for this setting.
:returns: The document representing the saved Setting.
"""
setting = self.findOne({'key': key})
if setting is None:
setting = {
'key': key,
'value': value
}
else:
setting['value'] = value
return self.save(setting)
def unset(self, key):
"""
Remove the setting for this key. If no such setting exists, this is
a no-op.
:param key: The key identifying the setting to be removed.
:type key: str
"""
for setting in self.find({'key': key}):
self.remove(setting)
def getDefault(self, key):
"""
Retrieve the system default for a value.
:param key: The key identifying the setting.
:type key: str
:returns: The default value if the key is present in both SettingKey
and referenced in SettingDefault; otherwise None.
"""
if key in SettingDefault.defaults:
return SettingDefault.defaults[key]
else:
fn = setting_utilities.getDefaultFunction(key)
if callable(fn):
return fn()
return None
@staticmethod
@setting_utilities.validator(SettingKey.SECURE_COOKIE)
def validateSecureCookie(doc):
if not isinstance(doc['value'], bool):
raise ValidationException('Secure cookie option must be boolean.', 'value')
@staticmethod
@setting_utilities.default(SettingKey.SECURE_COOKIE)
def defaultSecureCookie():
return config.getConfig()['server']['mode'] == 'production'
@staticmethod
@setting_utilities.validator(SettingKey.PLUGINS_ENABLED)
def validateCorePluginsEnabled(doc):
"""
Ensures that the set of plugins passed in is a list of valid plugin
names. Removes any invalid plugin names, removes duplicates, and adds
all transitive dependencies to the enabled list.
"""
if not isinstance(doc['value'], list):
raise ValidationException('Plugins enabled setting must be a list.', 'value')
# Add all transitive dependencies and store in toposorted order
doc['value'] = list(plugin_utilities.getToposortedPlugins(doc['value']))
@staticmethod
@setting_utilities.validator(SettingKey.ADD_TO_GROUP_POLICY)
def validateCoreAddToGroupPolicy(doc):
doc['value'] = doc['value'].lower()
if doc['value'] not in ('never', 'noadmin', 'nomod', 'yesadmin', 'yesmod', ''):
raise ValidationException(
'Add to group policy must be one of "never", "noadmin", '
'"nomod", "yesadmin", or "yesmod".', 'value')
@staticmethod
@setting_utilities.validator(SettingKey.COLLECTION_CREATE_POLICY)
def validateCoreCollectionCreatePolicy(doc):
value = doc['value']
if not isinstance(value, dict):
raise ValidationException('Collection creation policy must be a JSON object.')
for i, groupId in enumerate(value.get('groups', ())):
ModelImporter.model('group').load(groupId, force=True, exc=True)
value['groups'][i] = ObjectId(value['groups'][i])
for i, userId in enumerate(value.get('users', ())) |
rembo10/headphones | lib/beets/__init__.py | Python | gpl-3.0 | 1,380 | 0 | # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (th | e
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, | and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import confuse
from sys import stderr
__version__ = '1.6.0'
__author__ = 'Adrian Sampson <adrian@radbox.org>'
class IncludeLazyConfig(confuse.LazyConfig):
"""A version of Confuse's LazyConfig that also merges in data from
YAML files specified in an `include` setting.
"""
def read(self, user=True, defaults=True):
super().read(user, defaults)
try:
for view in self['include']:
self.set_file(view.as_filename())
except confuse.NotFoundError:
pass
except confuse.ConfigReadError as err:
stderr.write("configuration `import` failed: {}"
.format(err.reason))
config = IncludeLazyConfig('beets', __name__)
|
mbi/django-rosetta | rosetta/storage.py | Python | mit | 4,395 | 0.00182 | import hashlib
import importlib
import time
from django.conf import settings
from django.core.cache import caches
from django.core.exceptions import ImproperlyConfigured
from .conf import settings as rosetta_settings
cache = caches[rosetta_settings.ROSETTA_CACHE_NAME]
class BaseRosettaStorage(object):
def __init__(self, request):
self.request = request
def get(self, key, default=None):
raise NotImplementedError
def set(self, key, val):
raise NotImplementedError
def has(self, key):
raise NotImplementedError
def delete(self, key):
raise NotImplementedError
class DummyRosettaStorage(BaseRosettaStorage):
def get(self, key, default=None):
return default
def set(self, key, val):
pass
def has(self, key):
return False
def delete(self, key):
pass
class SessionRosettaStorage(BaseRosettaStorage):
def __init__(self, request):
super(SessionRosettaStorage, self).__init__(request)
if (
'signed_cookies' in settings.SESSION_ENGINE
and 'pickle' not in settings.SESSION_SERIALIZER.lower()
):
raise ImproperlyConfigured(
"Sorry, but django-rosetta doesn't support the `signed_cookies` SESSION_ENGINE, because rosetta specific session files cannot be serialized."
)
def get(self, key, default=None):
if key in self.request.session:
return self.request.session[key]
return default
def set(self, key, val):
self.request.session[key] = val
def has(self, key):
return key in self.request.session
def delete(self, key):
del self.request.session[key]
class CacheRosettaStorage(BaseRosettaStorage):
# unlike the session storage backend, cache is shared among all users
# so we need to per-user key prefix, which we store in the session
def __init__(self, request):
super(CacheRosettaStorage, self).__init__(request)
if 'rosetta_cache_storage_key_prefix' in self.request.session:
self._key_prefix = self.request.session['rosetta_cache_storage_key_prefix']
else:
self._key_prefix = hashlib.new(
'sha1', str(time.time()).encode('utf8')
).hexdigest()
self.request.session['rosetta_cache_storage_key_prefix'] = self._key_prefix |
if self.request.session['rosetta_cache_storage_key_prefix' | ] != self._key_prefix:
raise ImproperlyConfigured(
"You can't use the CacheRosettaStorage because your Django Session storage doesn't seem to be working. The CacheRosettaStorage relies on the Django Session storage to avoid conflicts."
)
# Make sure we're not using DummyCache
if (
'dummycache'
in settings.CACHES[rosetta_settings.ROSETTA_CACHE_NAME]['BACKEND'].lower()
):
raise ImproperlyConfigured(
"You can't use the CacheRosettaStorage if your cache isn't correctly set up (you are using the DummyCache cache backend)."
)
# Make sure the cache actually works
try:
self.set('rosetta_cache_test', 'rosetta')
if not self.get('rosetta_cache_test') == 'rosetta':
raise ImproperlyConfigured(
"You can't use the CacheRosettaStorage if your cache isn't correctly set up, please double check your Django DATABASES setting and that the cache server is responding."
)
finally:
self.delete('rosetta_cache_test')
def get(self, key, default=None):
# print ('get', self._key_prefix + key)
return cache.get(self._key_prefix + key, default)
def set(self, key, val):
# print ('set', self._key_prefix + key)
cache.set(self._key_prefix + key, val, 86400)
def has(self, key):
# print ('has', self._key_prefix + key)
return (self._key_prefix + key) in cache
def delete(self, key):
# print ('del', self._key_prefix + key)
cache.delete(self._key_prefix + key)
def get_storage(request):
from rosetta.conf import settings
storage_module, storage_class = settings.STORAGE_CLASS.rsplit('.', 1)
storage_module = importlib.import_module(storage_module)
return getattr(storage_module, storage_class)(request)
|
jricardo27/travelhelper | travelhelper/apps/core/templatetags/__init__.py | Python | bsd-3-clause | 93 | 0 | """
Template Tag | s go in this directory
Load modules in templates with {% loa | d badger %}
"""
|
oghm2/hackdayoxford | cellcounter/main/management/commands/loadcsv.py | Python | mit | 1,153 | 0.026886 | from django.core.management.base import BaseCommand, CommandError
from cellcounter.main.models import CellImage, SimilarLookingGroup, CellType
import csv
class dialect(csv.Dialect):
pass
class Command(BaseCommand):
args = '<csvfile1 csvfile2 ...>'
help = 'Loads images and descriptions from specified csv file(s)'
def handle(self, *args, **options):
for fi | lename in args:
file_ = csv.DictReader(open(filename), dialect="excel-tab")
for line in file_:
try:
celltype = CellType.objects.get(readable_name = line["CellType"] | )
except:
print "Cell Type not found:" + line["CellType"]
ci = CellImage(title = line["Title"],
description = line["Description"],
file = line["Filename"],
thumbnail_left = line["X"],
thumbnail_top = line["Y"],
thumbnail_width = line["Pixels"],
celltype = celltype)
ci.save()
|
sdispater/orator | tests/support/test_collection.py | Python | mit | 8,036 | 0.000622 | # -*- coding: utf-8 -*-
from .. import OratorTestCase
from orator.support.collection import Collection
class CollectionTestCase(OratorTestCase):
def test_first_returns_first_item_in_collection(self):
c = Collection(["foo", "bar"])
self.assertEqual("foo", c.first())
def test_last_returns_last_item_in_collection(self):
c = Collection(["foo", "bar"])
self.assertEqual("bar", c.last())
def test_pop_removes_and_returns_last_item_or_specified_index(self):
c = Collection(["foo", "bar"])
self.assertEqual("bar", c.pop())
self.assertEqual("foo", c.last())
c = Collection(["foo", "bar"])
self.assertEqual("foo", c.pop(0))
self.assertEqual("bar", c.first())
def test_shift_removes_and_returns_first_item(self):
c = Collection(["foo", "bar"])
self.assertEqual("foo", c.shift())
self.assertEqual("bar", c.first())
def test_empty_collection_is_empty(self):
c = Collection()
c2 = Collection([])
self.assertTrue(c.is_empty())
self.assertTrue(c2.is_empty())
def test_collection_is_constructed(self):
c = Collection("foo")
self.assertEqual(["foo"], c.all())
c = Collection(2)
self.assertEqual([2], c.all())
c = Collection(False)
self.assertEqual([False], c.all())
c = Collection(None)
self.assertEqual([], c.all())
c = Collection()
self.assertEqual([], c.all())
def test_offset_access(self):
c = Collection(["foo", "bar"])
self.assertEqual("bar", c[1])
c[1] = "baz"
self.assertEqual("baz", c[1])
del c[0]
self.assertEqual("baz", c[0])
def test_forget(self):
c = Collection(["foo", "bar", "boom"])
c.forget(0)
self.assertEqual("bar", c[0])
c.forget(0, 1)
self.assertTrue(c.is_empty())
def test_get_avg_items_from_collection(self):
c = Collection([{"foo": 10}, {"foo": 20}])
self.assertEqual(15, c.avg("foo"))
c = Collection([1, 2, 3, 4, 5])
self.assertEqual(3, c.avg())
c = Collection()
self.assertIsNone(c.avg())
def test_collapse(self):
obj1 = object()
obj2 = object()
c = Collection([[obj1], [obj2]])
self.assertEqual([obj1, obj2], c.collapse().all())
def test_collapse_with_nested_collection(self):
c = Collection([Collection([1, 2, 3]), Collection([4, 5, 6])])
self.assertEqual([1, 2, 3, 4, 5, 6], c.collapse().all())
def test_contains(self):
c = Collection([1, 3, 5])
self.assertTrue(c.contains(1))
self.assertFalse(c.contains(2))
self.assertTrue(c.contains(lambda x: x < 5))
self.assertFalse(c.contains(lambda x: x > 5))
self.assertIn(3, c)
c = Collection([{"v": 1}, {"v": 3}, {"v": 5}])
self.assertTrue(c.contains("v", 1))
self.assertFalse(c.contains("v", 2))
obj1 = type("lamdbaobject", (object,), {})()
obj1.v = 1
obj2 = type("lamdbaobject", (object,), {})()
obj2.v = 3
obj3 = type("lamdbaobject", (object,), {})()
obj3.v = 5
c = Collection([{"v": 1}, {"v": 3}, {"v": 5}])
self.assertTrue(c.contains("v", 1))
self.assertFalse(c.contains("v", 2))
def test_countable(self):
c = Collection(["foo", "bar"])
self.assertEqual(2, c.count())
self.assertEqual(2, len(c))
def test_diff(self):
c = Collection(["foo", "bar"])
self.assertEqual(["foo"], c.diff(Collection(["bar", "baz"])).all())
def test_each(self):
original = ["foo", "bar", "baz"]
c = Collection(original)
result = []
c.each(lambda x: result.append(x))
self.assertEqual(result, original)
self.assertEqual(original, c.all())
def test_every(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([1, 3, 5], c.every(2).all())
self.assertEqual([2, 4, 6], c.every(2, 1).all())
def test_filter(self):
c = Collection([{"id": 1, "name": "hello"}, {"id": 2, "name": "world"}])
self.assertEqual(
[{"id": 2, "name": "world"}], c.filter(lambda item: item["id"] == 2).all()
)
c = Collection(["", "hello", "", "world"])
self.assertEqual(["hello", "world"], c.filter().all())
def test_where(self):
c = Collection([{"v": 1}, {"v": 3}, {"v": 2}, {"v": 3}, {"v": 4}])
self.assertEqual([{"v": 3}, {"v": 3}], c.where("v", 3).all())
def test_implode(self):
obj1 = type("lamdbaobject", (object,), {})()
obj1.name = "john"
obj1.email = "foo"
c = Collection(
[{"name": "john", "email": "foo"}, {"name": "jane", "email": "bar"}]
)
self.assertEqual("foobar", c.implode("email"))
self.assertEqual("foo,bar", c.implode("email", ","))
c = Collection(["foo", "bar"])
self.assertEqual("foobar", c.implode(""))
self.assertEqual("foo,bar", c.implode(","))
def test_lists(self):
obj1 = type("lamdbaobject", (object,), {})()
obj1.name = "john"
obj1.email = "foo"
c = Collection([obj1, {"name": "jane", "email": "bar"}] | )
self.asser | tEqual({"john": "foo", "jane": "bar"}, c.lists("email", "name"))
self.assertEqual(["foo", "bar"], c.pluck("email").all())
def test_map(self):
c = Collection([1, 2, 3, 4, 5])
self.assertEqual([3, 4, 5, 6, 7], c.map(lambda x: x + 2).all())
def test_merge(self):
c = Collection([1, 2, 3])
c.merge([4, 5, 6])
self.assertEqual([1, 2, 3, 4, 5, 6], c.all())
c = Collection(Collection([1, 2, 3]))
c.merge([4, 5, 6])
self.assertEqual([1, 2, 3, 4, 5, 6], c.all())
def test_for_page(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([4, 5, 6], c.for_page(2, 3).all())
self.assertEqual([5, 6], c.for_page(2, 4).all())
def test_prepend(self):
c = Collection([4, 5, 6])
c.prepend(3)
self.assertEqual([3, 4, 5, 6], c.all())
def test_append(self):
c = Collection([3, 4, 5])
c.append(6)
self.assertEqual([3, 4, 5, 6], c.all())
def test_pull(self):
c = Collection([1, 2, 3, 4])
c.pull(2)
self.assertEqual([1, 2, 4], c.all())
def test_put(self):
c = Collection([1, 2, 4])
c.put(2, 3)
self.assertEqual([1, 2, 3], c.all())
def test_reject(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([1, 2, 3], c.reject(lambda x: x > 3).all())
def test_reverse(self):
c = Collection([1, 2, 3, 4])
self.assertEqual([4, 3, 2, 1], c.reverse().all())
def test_sort(self):
c = Collection([5, 3, 1, 2, 4])
sorted = c.sort(lambda x: x)
self.assertEqual([1, 2, 3, 4, 5], sorted.all())
def test_take(self):
c = Collection([1, 2, 3, 4, 5, 6])
self.assertEqual([1, 2, 3], c.take(3).all())
self.assertEqual([4, 5, 6], c.take(-3).all())
def test_transform(self):
c = Collection([1, 2, 3, 4])
c.transform(lambda x: x + 2)
self.assertEqual([3, 4, 5, 6], c.all())
def test_zip(self):
c = Collection([1, 2, 3])
self.assertEqual([(1, 4), (2, 5), (3, 6)], c.zip([4, 5, 6]).all())
def test_only(self):
c = Collection([1, 2, 3, 4, 5])
self.assertEqual([2, 4], c.only(1, 3).all())
def test_without(self):
c = Collection([1, 2, 3, 4, 5])
self.assertEqual([1, 3, 5], c.without(1, 3).all())
self.assertEqual([1, 2, 3, 4, 5], c.all())
def test_flatten(self):
c = Collection({"foo": [5, 6], "bar": 7, "baz": {"boom": [1, 2, 3, 4]}})
self.assertEqual([1, 2, 3, 4, 5, 6, 7], c.flatten().sort().all())
c = Collection([1, [2, 3], 4])
self.assertEqual([1, 2, 3, 4], c.flatten().all())
|
aureooms/checkio | elementary/02-index-power.py | Python | agpl-3.0 | 49 | 0.102041 | inde | x_power=lambda a,n:a[n]**n if n<len | (a)else-1
|
spiderbit/canta-ng | event/keyboard_event.py | Python | gpl-3.0 | 3,140 | 0.003822 | #! /usr/bin/python -O
# -*- coding: utf-8 -*-
#
# CANTA - A free entertaining educational software for singing
# Copyright (C) 2007 S. Huchler, A. Kattner, F. Lopez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import time
class KeyboardEvent():
def __init__(self, widget_properties = None, theme_mgr=None):
# self.parent_widget = widget_properties['root_widget']
# self.shift = False
# self.ctrl = False
# self.alt = False
self.observed_events = []
def add_connection(self, type, action, args = None):
self.observed_events.append({'type': type, 'action': action, 'args': args })
def reset(self):
self.observed_events=[]
def begin_round(self):
# soya.Body.begin_round(self)
# # go through the events:
# for event in soya.process_event():
# # key pressed:
# if event[0] == soya.sdlconst.KEYDOWN:
# # [ARROW UP] - no action:
# for observed_event in self.observed_events:
# if event[1] == observed_event['type']:
# if observed_event['args'] is not None:
# observed_event['action'](observed_event['args'])
# else:
# observed_event['action']()
# break
# else:
# if event[1] == soya.sdlconst.K_RSHIFT \
# or event[1] == soya.sdlconst.K_LSHIFT:
# self.shift = True
# elif event[1] == soya.sdlconst.K_RCTRL \
# or event[1] == soya.sdlconst.K_LCTRL:
# self.ctrl = True
# elif event[1] == soya.sdlconst.K_RALT \
# or event[1] == soya.sdlconst.K_LALT:
# self.alt = True
# elif event[0] == soya.sdlconst.KEYUP:
# if event[1] == soya.sdlconst.K_RSHIFT \
# or even | t[1] == soya.sdl | const.K_LSHIFT:
# self.shift = False
# elif event[1] == soya.sdlconst.K_RCTRL \
# or event[1] == soya.sdlconst.K_LCTRL:
# self.ctrl = False
# elif event[1] == soya.sdlconst.K_RALT \
# or event[1] == soya.sdlconst.K_LALT:
# self.alt = False
print ("begin_round in keyboard_event")
|
takeflight/wagtailvideos | wagtailvideos/fields.py | Python | bsd-3-clause | 2,114 | 0.001419 | from django.conf import settings
from django.core.exceptions import ValidationError
from django.forms.fi | elds import FileField
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
class WagtailVideoField(FileField):
def __init__(self, *args, **kwargs):
super(WagtailVideoField, self).__init__(*args, **kwargs)
# Get max upload size from settings
self.max_upload_size = getattr(settings, 'WAGTAILVIDEOS_MAX_UPLOAD_SIZE', 1024 * 1024 * 1024)
| max_upload_size_text = filesizeformat(self.max_upload_size)
# Help text
if self.max_upload_size is not None:
self.help_text = _(
"Maximum filesize: %(max_upload_size)s."
) % {
'max_upload_size': max_upload_size_text,
}
# Error messages
self.error_messages['invalid_video_format'] = _(
"Not a valid video. Content type was %s."
)
self.error_messages['file_too_large'] = _(
"This file is too big (%%s). Maximum filesize %s."
) % max_upload_size_text
self.error_messages['file_too_large_unknown_size'] = _(
"This file is too big. Maximum filesize %s."
) % max_upload_size_text
def check_video_file_format(self, f):
if not f.content_type.startswith('video'):
raise ValidationError(self.error_messages['invalid_video_format'] % f.content_type)
def check_video_file_size(self, f):
# Upload size checking can be disabled by setting max upload size to None
if self.max_upload_size is None:
return
# Check the filesize
if f.size > self.max_upload_size:
raise ValidationError(self.error_messages['file_too_large'] % (
filesizeformat(f.size),
), code='file_too_large')
def to_python(self, data):
f = super(WagtailVideoField, self).to_python(data)
if f is not None:
self.check_video_file_size(f)
self.check_video_file_format(f)
return f
|
richardliaw/ray | streaming/python/tests/test_operator.py | Python | apache-2.0 | 1,294 | 0 | from ray.streaming import function
from ray.streaming import operator
from ray.streaming.operator import OperatorType
from ray.streaming.runtime impor | t gateway_client
def test_create_operator_with_func():
map_func = function.SimpleMapFunction(lambda x: x)
map_operator = operator.create_operator_with_func(map | _func)
assert type(map_operator) is operator.MapOperator
class MapFunc(function.MapFunction):
def map(self, value):
return str(value)
class EmptyOperator(operator.StreamOperator):
def __init__(self):
super().__init__(function.EmptyFunction())
def operator_type(self) -> OperatorType:
return OperatorType.ONE_INPUT
def test_load_operator():
# function_bytes, module_name, class_name,
descriptor_func_bytes = gateway_client.serialize(
[None, __name__, MapFunc.__name__, "MapFunction"])
descriptor_op_bytes = gateway_client.serialize(
[descriptor_func_bytes, "", ""])
map_operator = operator.load_operator(descriptor_op_bytes)
assert type(map_operator) is operator.MapOperator
descriptor_op_bytes = gateway_client.serialize(
[None, __name__, EmptyOperator.__name__])
test_operator = operator.load_operator(descriptor_op_bytes)
assert isinstance(test_operator, EmptyOperator)
|
bluecube/pysystemfan | pysystemfan/status_server.py | Python | mit | 2,693 | 0.003713 | from . import config_params
from . import util
import http.server
import threading
import json
import logging
logger = logging.getLogger(__name__)
_not_set = object()
class StatusServer(config_params.Configurable):
_params = [
("port", _not_set, "Port where to serve the status page. Default is to not run a server."),
("bind", "127.0.0.1", "Address to bind to"),
("status_path", "/status.json", "Path of the status file on the server")
]
def __init__(self, parent, params):
self.process_params(params)
self._data = {} # Storage for the exported data that are being processed (inactive yet)
self._active_data = {} # Storage for the exported data that are being served
def __enter__(self):
if self.port is not _not_set:
self.start()
return self
def __exit__(self, *args):
if self.port is not _not_set:
self.stop()
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def update(self):
self._active_data = self._data
def start(self):
path = self.status_path
instance = self # Local copy for handler
address = (self.bind, self.port)
class Handler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
try:
if self.path == path:
document = json.dumps(instance._active_data, indent=2).encode("utf-8")
self.send_response(200)
self.send_header("Content-type", "application/json")
self.send_header("Content-length", len(document))
self.end_headers()
self.wfile.write(document)
else:
self.send_error(404)
except Exception as e:
logger.exception("Exception in handler")
def log_error(self, msg, *args):
logger.warning("%s: " + msg, self.client_address[0], *args)
def log_message(self, msg, *args):
logger.debug("%s: " + msg, self.client_address[0], *args)
self._server = http.server.HTTPServer(address, Handler)
self._thread = threading.Thread(target=self._server.serve_forever,
name="Status HTTP")
logger.info("Starting server at %s", a | ddress)
self._thread.start()
def stop(self):
logger.debug("Waiting for server to shut down")
self._server.shutdown()
self._thread.join()
logger.inf | o("Server stopped")
|
maferelo/saleor | saleor/product/migrations/0018_auto_20161212_0725.py | Python | bsd-3-clause | 582 | 0.001718 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-12 13:25
from __future__ import unicode_literals
from django.db import migrations
from django.utils.text import slugify
def create_slugs(apps, schema_editor):
Value = apps.get_model("product", "AttributeChoiceValue")
for | value in Value.objects.all():
value.slug = slugify(value.display)
value.save()
class Migration(migrations.Migration):
dependencies = [("product", "0017_attributechoicevalue_slug")]
opera | tions = [migrations.RunPython(create_slugs, migrations.RunPython.noop)]
|
larsyencken/cjktools | cjktools/resources/zhuyin_table.py | Python | bsd-3-clause | 2,155 | 0.000464 | # -*- coding: utf-8 -*-
#
# zhuyin_table.py
# cjktools
#
"""
An interface to the zhuyin <-> pinyin table.
"""
from functools import partial
from . import cjkdata
from cjktools.common import get_stream_context, stream_codec
def _default_stream():
return open(cjkdata.get_resource('tables/zhuyin_pinyin_conv_table'))
_get_stream_context = partial(get_stream_context, _default_stream)
def parse_lines(istream):
istream = stream_codec(istream)
for line in istream:
if not line.startswith('#'):
yield line.rstrip().split()
def zhuyin_to_pinyin_table(istream=None):
""" Returns a dictionary mapping zhuyin to pinyin. """
with _get_stream_context(istream) as stream:
table = {}
for zhuyin, pinyin in parse_lines(stream):
table[zhuyin] = pinyin
return table
def pinyin_to_zhuyin_table(istream=None):
""" Returns a dictionary mapping zhuyin to pinyin. """
with _get_stream_con | text(istream) as istream:
table = {}
for zhuyin, pinyin in parse_lines(istream):
table[pinyin] = zhuyin
return table
def get_all_pinyin(istream=None):
| """ Returns a list of all pinyin """
with _get_stream_context(istream) as istream:
all_pinyin = ['r']
for zhuyin, pinyin in parse_lines(istream):
all_pinyin.append(pinyin)
return all_pinyin
def pinyin_regex_pattern(istream=None):
""" Returns a pinyin regex pattern, with optional tone number. """
all_pinyin = get_all_pinyin(istream)
# Sort from longest to shortest, so as to make maximum matches whenever
# possible.
all_pinyin = sorted(all_pinyin, key=len, reverse=True)
# Build a generic pattern for a single pinyin with an optional tone.
pattern = '(%s)([0-5]?)' % '|'.join(all_pinyin)
return pattern
def zhuyin_regex_pattern(istream=None):
""" Returns a zhuyin regex pattern. """
with _get_stream_context(istream) as istream:
all_pinyin = []
for zhuyin, pinyin in parse_lines(istream):
all_pinyin.append(pinyin)
pattern = '(%s)[0-4]?' % '|'.join(all_pinyin)
return pattern
|
jgeewax/gcloud-python | scripts/verify_included_modules.py | Python | apache-2.0 | 6,286 | 0 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check if all public modules are included in our docs."""
from __future__ import print_function
import argparse
import os
import sys
import warnings
from sphinx.ext.intersphinx import fetch_inventory
from script_utils import PROJECT_ROOT
DOCS_DIR = os.path.join(PROJECT_ROOT, 'docs')
IGNORED_PREFIXES = ('test_', '_')
IGNORED_MODULES = frozenset([
'google.cloud',
'google.cloud.bigquery',
'google.cloud.bigtable',
'google.cloud.dns',
'google.cloud.error_reporting',
'google.cloud.language',
'google.cloud.logging',
'google.cloud.logging.handlers',
'google.cloud.logging.handlers.transports',
'google.cloud.monitoring',
'google.cloud.pubsub',
'google.cloud.resource_manager',
'google.cloud.speech',
'google.cloud.storage',
'google.cloud.streaming',
'google.cloud.streaming.buffered_stream',
'google.cloud.streaming.exceptions',
'google.cloud.streaming.http_wrapper',
'google.cloud.streaming.stream_slice',
'google.cloud.streaming.transfer',
'google.cloud.streaming.util',
'google.cloud.translate',
'google.cloud.vision',
'google.cloud.vision.fixtures',
])
PACKAGES = (
'bigquery',
'bigtable',
'core',
'datastore',
'dns',
'error_reporting',
'language',
'logging',
'monitoring',
'pubsub',
'resource_manager',
'runtimeconfig',
'speech',
'storage',
'translate',
'vision',
)
class SphinxApp(object):
"""Mock app to interact with Sphinx helpers."""
warn = warnings.warn
srcdir = DOCS_DIR
def is_valid_module(filename):
"""Determines if a filename is a valid Python module.
Assumes if is just the end of a path (i.e. does not contain
``os.path.sep``.
:type filename: str
:param filename: The name of a file.
:rtype: bool
:returns: Flag indicating if the filename is valid.
"""
if not filename.endswith('.py'):
return False
if filename == '__init__.py':
return True
for prefix in IGNORED_PREFIXES:
if filename.startswith(prefix):
return False
return True
def get_public_modules(path, base_package=None):
"""Get list of all public modules relative to a path.
:type path: str
:param path: The path containing the python modules.
:type base_package: str
:param base_package: (Optional) A package to prepend in
front of the path.
:rtype: list
:returns: List of all modules found.
"""
result = []
for subdir, _, files in os.walk(path):
# Skip folders that start with _.
if any([part.startswith('_')
for part in subdir.split(os.path.sep)]):
continue
_, rel_dir = subdir.split(path)
rel_dir = rel_dir.lstrip(os.path.sep)
for filename in files:
if is_valid_module(filename):
mod_name, _ = os.path.splitext(filename)
rel_path = os.path.join(rel_dir, mod_name)
if base_package is not None:
rel_path = os.path.join(base_package, rel_path)
# Turn into a | Python module rather than a file path.
rel_path = rel_path.replace(os.path.sep, '.')
if mod_name == '__init__':
result.append(rel_path[:-len('.__init__')])
else:
result.append(rel_path)
return result
def verify_modules(build_root='_build'):
"""Verify modules included.
:type build_root: str
:param build_root: The root of the directory where docs are built into.
Defaul | ts to ``_build``.
"""
object_inventory_relpath = os.path.join(build_root, 'html', 'objects.inv')
mock_uri = ''
inventory = fetch_inventory(SphinxApp, mock_uri,
object_inventory_relpath)
sphinx_mods = set(inventory['py:module'].keys())
public_mods = set()
for package in PACKAGES:
library_dir = os.path.join(PROJECT_ROOT, package, 'google', 'cloud')
package_mods = get_public_modules(library_dir,
base_package='google.cloud')
public_mods.update(package_mods)
if not sphinx_mods <= public_mods:
unexpected_mods = sphinx_mods - public_mods
message = ['Unexpected error. There were modules referenced by '
'Sphinx that are not among the public modules.']
message.extend(['- %s' % (mod,) for mod in unexpected_mods])
print('\n'.join(message), file=sys.stderr)
sys.exit(1)
undocumented_mods = public_mods - sphinx_mods
# Remove ignored modules.
undocumented_mods -= IGNORED_MODULES
if undocumented_mods:
message_parts = ['Found undocumented public modules:']
message_parts.extend(['- ' + mod_name
for mod_name in sorted(undocumented_mods)])
print('\n'.join(message_parts), file=sys.stderr)
sys.exit(1)
def get_parser():
"""Get simple ``argparse`` parser to determine package.
:rtype: :class:`argparse.ArgumentParser`
:returns: The parser for this script.
"""
description = ('Run check that all google-cloud '
'modules are included in docs.')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--build-root', dest='build_root',
help='The root directory where docs are located.')
return parser
def main():
"""Main script to verify modules included."""
parser = get_parser()
args = parser.parse_args()
verify_modules(build_root=args.build_root)
if __name__ == '__main__':
main()
|
casawa/mdtraj | mdtraj/utils/unit/standard_dimensions.py | Python | lgpl-2.1 | 2,307 | 0.002167 | #!/bin/env python
"""
Module simtk.unit.standard_dimensions
Definition of principal dimensions: mass, length, time, etc.
This is part of the OpenMM molecular simulation toolkit originating from
Simbios, the NIH National Center for Physics-Based Simulation of
Biological Structures at Stanford, funded under the NIH Roadmap for
Medical Research, grant U54 GM072970. See https://simtk.org.
Portions copyright (c) 2012 Stanford University and the Authors.
Authors: Christopher M. Bruns
Contributors: Peter Eastman
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AU | THORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = "Christopher M. Bruns"
__version__ = "0.6"
from .basedimension import BaseDimension
##################
### DIMENSIONS ###
#### | ##############
mass_dimension = BaseDimension('mass')
length_dimension = BaseDimension('length')
time_dimension = BaseDimension('time')
temperature_dimension = BaseDimension('temperature')
amount_dimension = BaseDimension('amount')
charge_dimension = BaseDimension('charge')
luminous_intensity_dimension = BaseDimension('luminous intensity')
angle_dimension = BaseDimension('angle')
information_dimension = BaseDimension('information')
# run module directly for testing
if __name__=='__main__':
# Test the examples in the docstrings
import doctest, sys
doctest.testmod(sys.modules[__name__])
|
pinac0099/dynamic-bus-scheduling | tests/mongodb_database_connection_test.py | Python | mit | 29,881 | 0.002577 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
- LICENCE
The MIT License (MIT)
Copyright (c) 2016 Eleftherios Anagnostopoulos for Ericsson AB (EU FP7 CityPulse Project)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
- DESCRIPTION OF DOCUMENTS
-- MongoDB Database Documents:
address_document: {
'_id', 'name', 'node_id', 'point': {'longitude', 'latitude'}
}
bus_line_document: {
'_id', 'bus_line_id', 'bus_stops': [{'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}]
}
bus_stop_document: {
'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}
}
bus_stop_waypoints_document: {
'_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[edge_object_id]]
}
bus_vehicle_document: {
'_id', 'bus_vehicle_id', 'maximum_capacity',
'routes': [{'starting_datetime', 'ending_datetime', 'timetable_id'}]
}
detailed_bus | _stop_waypoints_document: {
'_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'poin | t': {'longitude', 'latitude'}},
'waypoints': [[edge_document]]
}
edge_document: {
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}
node_document: {
'_id', 'osm_id', 'tags', 'point': {'longitude', 'latitude'}
}
point_document: {
'_id', 'osm_id', 'point': {'longitude', 'latitude'}
}
timetable_document: {
'_id', 'timetable_id', 'bus_line_id', 'bus_vehicle_id',
'timetable_entries': [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime', 'number_of_onboarding_passengers',
'number_of_deboarding_passengers', 'number_of_current_passengers',
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}],
'travel_requests': [{
'_id', 'client_id', 'bus_line_id',
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime',
'starting_timetable_entry_index', 'ending_timetable_entry_index'
}]
}
traffic_event_document: {
'_id', 'event_id', 'event_type', 'event_level', 'point': {'longitude', 'latitude'}, 'datetime'
}
travel_request_document: {
'_id', 'client_id', 'bus_line_id',
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime',
'starting_timetable_entry_index', 'ending_timetable_entry_index'
}
way_document: {
'_id', 'osm_id', 'tags', 'references'
}
-- Route Generator Responses:
get_route_between_two_bus_stops: {
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}
get_route_between_multiple_bus_stops: [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}]
get_waypoints_between_two_bus_stops: {
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[{
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}]]
}
get_waypoints_between_multiple_bus_stops: [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[{
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}]]
}]
"""
import time
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from src.mongodb_database.mongodb_database_connection import MongodbDatabaseConnection
from src.common.logger import log
from src.common.parameters import mongodb_host, mongodb_port
__author__ = 'Eleftherios Anagnostopoulos'
__email__ = 'eanagnostopoulos@hotmail.com'
__credits__ = [
'Azadeh Bararsani (Senior Researcher at Ericsson AB) - email: azadeh.bararsani@ericsson.com'
'Aneta Vulgarakis Feljan (Senior Researcher at Ericsson AB) - email: aneta.vulgarakis@ericsson.com'
]
class MongodbDatabaseConnectionTester(object):
def __init__(self):
log(module_name='mongodb_database_connection_test', log_type='INFO',
log_message='initialize_mongodb_database_connection: starting')
self.start_time = time.time()
self.mongodb_database_connection = MongodbDatabaseConnection(host=mongodb_host, port=mongodb_port)
self.elapsed_time = time.time() - self.start_time
log(module_name='mongodb_database_connection_test', log_type='INFO',
log_message='initialize_mongodb_database_connection: finished - elapsed_time = ' +
str(self.elapsed_time) + ' sec')
def clear_all_collections(self):
log(module_name='mongodb_database_connection_test', log_type='INFO',
log_message='clear_all_collections: starting')
self.start_time = time.time()
self.mongodb_database_connection.clear_all_collections()
self.elapsed_time = time.time() - self.start_time
log(module_name='mongodb_database_connection_test', log_type='INFO',
log_message='clear_all_collections: finished - elapsed_time = ' +
str(self.elapsed_time) + ' sec')
def clear_address_documents_collection(self):
log(module_name='mongodb_database_connection_test', log_type='INFO',
log_message='clear_address_documents_collection: starting')
self.start_time = time.time()
self.mongodb_database_connection.clear_address_documents_coll |
projectatomic/osbs-client | osbs/api.py | Python | bsd-3-clause | 57,765 | 0.002043 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals, absolute_import
from collections import namedtuple
import json
import logging
import os
import os.path
import stat
import sys
import warnings
import getpass
from functools import wraps
from contextlib import contextmanager
from types import GeneratorType
from osbs.build.build_requestv2 import (
BaseBuildRequest,
BuildRequestV2,
SourceBuildRequest,
)
from osbs.build.user_params import (
load_user_params_from_json,
BuildUserParams,
SourceContainerUserParams
)
from osbs.build.plugins_configuration import (
PluginsConfiguration,
SourceContainerPluginsConfiguration,
)
from osbs.build.build_response import BuildResponse
from osbs.build.pod_response import PodResponse
from osbs.build.config_map_response import ConfigMapResponse
from osbs.constants import (BUILD_RUNNING_STATES, WORKER_OUTER_TEMPLATE,
WORKER_INNER_TEMPLATE, WORKER_CUSTOMIZE_CONF,
ORCHESTRATOR_OUTER_TEMPLATE, ORCHESTRATOR_INNER_TEMPLATE,
ORCHESTRATOR_CUSTOMIZE_CONF, BUILD_TYPE_WORKER,
BUILD_TYPE_ORCHESTRATOR, BUILD_FINISHED_STATES,
DEFAULT_ARRANGEMENT_VERSION, REACTOR_CONFIG_ARRANGEMENT_VERSION,
FILTER_KEY, RELEASE_LABEL_FORMAT, VERSION_LABEL_FORBIDDEN_CHARS,
ORCHESTRATOR_SOURCES_OUTER_TEMPLATE,
USER_PARAMS_KIND_IMAGE_BUILDS,
USER_PARAMS_KIND_SOURCE_CONTAINER_BUILDS,
)
from osbs.core import Openshift
from osbs.exceptions import (OsbsException, OsbsValidationException, OsbsResponseException,
OsbsOrchestratorNotEnabled)
from osbs.utils.labels import Labels
# import utils in this way, so that we can mock standalone functions with flexmock
from osbs import utils
from osbs.utils import (retry_on_conflict, graceful_chain_get, RegistryURI, ImageName,
stringify_values)
from six.moves import http_client, input
# Decorator for API methods.
def osbsapi(func):
@wraps(func)
def catch_exceptions(*args, **kwargs):
if kwargs.pop("namespace", None):
warnings.warn("OSBS.%s: the 'namespace' argument is no longer supported" %
func.__name__)
try:
return func(*args, **kwargs)
except OsbsException:
# Re-raise OsbsExceptions
raise
except Exception as ex:
# Propogate flexmock errors immediately (used in test cases)
if getattr(ex, '__module__', None) == 'flexmock':
| raise
# Convert anything else to OsbsException
# Python 3 has implicit exception chaining and enhanced
# reporting, so you get the original traceback as well as
# the one or | iginating here.
# For Python 2, let's do that explicitly.
raise OsbsException(cause=ex, traceback=sys.exc_info()[2])
return catch_exceptions
_REQUIRED_PARAM = object()
logger = logging.getLogger(__name__)
LogEntry = namedtuple('LogEntry', ['platform', 'line'])
def validate_arrangement_version(arrangement_version):
"""Validate if the arrangement_version is supported
Shows a warning when version is deprecated
:param int|None arrangement_version: version to be validated
:raises ValueError: when version is not supported
"""
if arrangement_version is None:
return
if arrangement_version <= 5:
raise ValueError('arrangement_version <= 5 is no longer supported')
class OSBS(object):
"""
Note: all API methods return osbs.http.Response object. This is, due to historical
reasons, untrue for list_builds and get_user, which return list of BuildResponse objects
and dict respectively.
"""
_GIT_LABEL_KEYS = ('git-repo-name', 'git-branch', 'git-full-repo')
_OLD_LABEL_KEYS = ('git-repo-name', 'git-branch')
@osbsapi
def __init__(self, openshift_configuration, build_configuration):
""" """
self.os_conf = openshift_configuration
self.build_conf = build_configuration
self.os = Openshift(openshift_api_url=self.os_conf.get_openshift_api_uri(),
openshift_oauth_url=self.os_conf.get_openshift_oauth_api_uri(),
k8s_api_url=self.os_conf.get_k8s_api_uri(),
verbose=self.os_conf.get_verbosity(),
username=self.os_conf.get_username(),
password=self.os_conf.get_password(),
use_kerberos=self.os_conf.get_use_kerberos(),
client_cert=self.os_conf.get_client_cert(),
client_key=self.os_conf.get_client_key(),
kerberos_keytab=self.os_conf.get_kerberos_keytab(),
kerberos_principal=self.os_conf.get_kerberos_principal(),
kerberos_ccache=self.os_conf.get_kerberos_ccache(),
use_auth=self.os_conf.get_use_auth(),
verify_ssl=self.os_conf.get_verify_ssl(),
token=self.os_conf.get_oauth2_token(),
namespace=self.os_conf.get_namespace())
self._bm = None
@osbsapi
def list_builds(self, field_selector=None, koji_task_id=None, running=None,
labels=None):
"""
List builds with matching fields
:param field_selector: str, field selector for Builds
:param koji_task_id: str, only list builds for Koji Task ID
:return: BuildResponse list
"""
if running:
running_fs = ",".join(["status!={status}".format(status=status.capitalize())
for status in BUILD_FINISHED_STATES])
if not field_selector:
field_selector = running_fs
else:
field_selector = ','.join([field_selector, running_fs])
response = self.os.list_builds(field_selector=field_selector,
koji_task_id=koji_task_id, labels=labels)
serialized_response = response.json()
build_list = []
for build in serialized_response["items"]:
build_list.append(BuildResponse(build, self))
return build_list
def watch_builds(self, field_selector=None):
kwargs = {}
if field_selector is not None:
kwargs['fieldSelector'] = field_selector
for changetype, obj in self.os.watch_resource("builds", **kwargs):
yield changetype, obj
@osbsapi
def get_build(self, build_id):
response = self.os.get_build(build_id)
build_response = BuildResponse(response.json(), self)
return build_response
@osbsapi
def cancel_build(self, build_id):
response = self.os.cancel_build(build_id)
build_response = BuildResponse(response.json(), self)
return build_response
@osbsapi
def get_pod_for_build(self, build_id):
"""
:return: PodResponse object for pod relating to the build
"""
pods = self.os.list_pods(label='openshift.io/build.name=%s' % build_id)
serialized_response = pods.json()
pod_list = [PodResponse(pod) for pod in serialized_response["items"]]
if not pod_list:
raise OsbsException("No pod for build")
elif len(pod_list) != 1:
raise OsbsException("Only one pod expected but %d returned" % len(pod_list))
return pod_list[0]
def _set_build_request_resource_limits(self, build_request):
"""Apply configured resource limits to build_request"""
assert isinstance(build_request, BaseBuildRequest)
cpu_limit = self.build_conf.get_cpu_limit()
memory_li |
DESHRAJ/fjord | fjord/analytics/tests/test_views.py | Python | bsd-3-clause | 16,139 | 0 | import json
import logging
from datetime import date, datetime, timedelta
from elasticsearch.exceptions import ConnectionError
from nose.tools import eq_
from pyquery import PyQuery
from django.contrib.auth.models import Group
from django.http import QueryDict
from fjord.analytics import views
from fjord.base.tests import LocalizingClient, ProfileFactory, reverse
from fjord.feedback.tests import ResponseFactory, ProductFactory
from fjord | .search.tests import ElasticTestCase
logger = logging.getLogger(__name__)
class TestDashboardView(ElasticTestCase):
client_class = LocalizingClient
de | f setUp(self):
super(TestDashboardView, self).setUp()
# Set up some sample data
# 4 happy, 3 sad.
# 2 Windows XP, 2 Linux, 1 OS X, 2 Windows 7
now = datetime.now()
# The dashboard by default shows the last week of data, so
# these need to be relative to today. The alternative is that
# every test gives an explicit date range, and that is
# annoying and verbose.
items = [
# happy, platform, locale, description, created
(True, '', 'en-US', 'apple', now - timedelta(days=6)),
(True, 'Windows 7', 'es', 'banana', now - timedelta(days=5)),
(True, 'Linux', 'en-US', 'orange', now - timedelta(days=4)),
(True, 'Linux', 'en-US', 'apple', now - timedelta(days=3)),
(False, 'Windows XP', 'en-US', 'banana', now - timedelta(days=2)),
(False, 'Windows 7', 'en-US', 'orange', now - timedelta(days=1)),
(False, 'Linux', 'es', u'\u2713 apple', now - timedelta(days=0)),
]
for happy, platform, locale, description, created in items:
# We don't need to keep this around, just need to create it.
ResponseFactory(happy=happy, platform=platform, locale=locale,
description=description, created=created)
self.refresh()
def test_front_page(self):
url = reverse('dashboard')
r = self.client.get(url)
eq_(200, r.status_code)
self.assertTemplateUsed(r, 'analytics/dashboard.html')
pq = PyQuery(r.content)
# Make sure that each opinion is shown and that the count is correct.
eq_(pq('.block.count strong').text(), '7')
eq_(len(pq('li.opinion')), 7)
def test_hidden_products_dont_show_up(self):
# Create a hidden product and one response for it
prod = ProductFactory(
display_name=u'HiddenProduct', db_name='HiddenProduct',
on_dashboard=False)
ResponseFactory(product=prod.db_name)
self.refresh()
url = reverse('dashboard')
resp = self.client.get(url)
eq_(resp.status_code, 200)
assert 'HiddenProduct' not in resp.content
def test_cant_see_old_responses(self):
# Make sure we can't see responses from > 180 days ago
cutoff = datetime.today() - timedelta(days=180)
ResponseFactory(description='Young enough--Party!',
created=cutoff + timedelta(days=1))
ResponseFactory(description='Too old--Get off my lawn!',
created=cutoff - timedelta(days=1))
self.refresh()
url = reverse('dashboard')
resp = self.client.get(url, {
'date_start': cutoff.strftime('%Y-%m-%d')}
)
assert 'Young enough--Party!' in resp.content
assert 'Too old--Get off my lawn!' not in resp.content
def test_dashboard_atom_links(self):
"""Test dashboard atom links are correct"""
r = self.client.get(reverse('dashboard'))
eq_(200, r.status_code)
assert '/en-US/?format=atom' in r.content
r = self.client.get(
reverse('dashboard'),
{'happy': 1})
eq_(200, r.status_code)
pq = PyQuery(r.content)
pq = pq('link[type="application/atom+xml"]')
qs = QueryDict(pq[0].attrib['href'].split('?')[1])
eq_(qs['happy'], u'1')
eq_(qs['format'], u'atom')
r = self.client.get(
reverse('dashboard'),
{'product': 'Firefox', 'version': '20.0'})
eq_(200, r.status_code)
pq = PyQuery(r.content)
pq = pq('link[type="application/atom+xml"]')
qs = QueryDict(pq[0].attrib['href'].split('?')[1])
eq_(qs['product'], u'Firefox')
eq_(qs['version'], u'20.0')
def test_truncated_description_on_dashboard(self):
# Create a description that's 500 characters long (which is
# the truncation length) plus a string that's easy to assert
# non-existence of.
desc = ('0' * 500) + 'OMGou812'
ResponseFactory(description=desc)
self.refresh()
url = reverse('dashboard')
r = self.client.get(url)
assert 'OMGou812' not in r.content
def test_search(self):
url = reverse('dashboard')
# Happy
r = self.client.get(url, {'happy': 1})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 4)
# Sad
r = self.client.get(url, {'happy': 0})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 3)
# Locale
r = self.client.get(url, {'locale': 'es'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 2)
# Platform and happy
r = self.client.get(url, {'happy': 1, 'platform': 'Linux'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 2)
# Product
r = self.client.get(url, {'product': 'Firefox'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 7)
# Product
r = self.client.get(url, {'product': 'Firefox for Android'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 0)
# Product version
r = self.client.get(
url, {'product': 'Firefox', 'version': '17.0'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 7)
# Product version
r = self.client.get(
url, {'product': 'Firefox', 'version': '18.0'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 0)
# Empty search
r = self.client.get(url, {'platform': 'Atari'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 0)
def test_empty_and_unknown(self):
url = reverse('dashboard')
# Empty value should work
r = self.client.get(url, {'platform': ''})
eq_(r.status_code, 200)
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 1)
# "Unknown" value should also work
r = self.client.get(url, {'platform': 'Unknown'})
eq_(r.status_code, 200)
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 1)
def test_version_noop(self):
"""version has no effect if product isn't set"""
url = reverse('dashboard')
# Filter on product and version--both filters affect the
# results
r = self.client.get(
url, {'product': 'Firefox', 'version': '18.0'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 0)
# Filter on version--filter has no effect on results
r = self.client.get(
url, {'version': '18.0'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 7)
def test_text_search(self):
url = reverse('dashboard')
# Text search
r = self.client.get(url, {'q': 'apple'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 3)
# Text and filter
r = self.client.get(url, {'q': 'apple', 'happy': 1, 'locale': 'en-US'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 2)
def test_text_search_unicode(self):
"""Unicode in the search field shouldn't kick up errors"""
url = reverse('dashboard')
# Text search
r = self.client.get(url, {'q': u'\u2713'})
eq_(r.status_code, 200)
def test_search_format_json(self):
"""JSON output works"""
url = reverse('dashboard')
# Text search
r = self.client.get(url, {'q': u'apple |
daureg/illalla | twitter_helper.py | Python | mit | 8,052 | 0.000994 | #! /usr/bin/python2
# vim: set fileencoding=utf-8
"""Functions used in twitter scrapper main code."""
import functools
from timeit import default_timer as clock
from time import sleep
import utils as u
import cities
import pytz
import ujson
import logging
from datetime import datetime, timedelta
import re
CHECKIN_URL = re.compile(r'([0-9a-f]{24})\?s=([0-9A-Za-z_-]{27})')
from collections import namedtuple
import locale
locale.setlocale(locale.LC_ALL, 'C') # to parse date
UTC_DATE = '%a %b %d %X +0000 %Y'
FullCheckIn = namedtuple('FullCheckIn', ['id', 'lid', 'uid', 'city', 'loc',
'time', 'tid', 'tuid', 'msg'])
def parse_tweet(tweet):
"""Return a CheckIn from `tweet` or None if it is not located in a valid
city"""
loc = u.get_nested(tweet, | 'coordinates')
city = None
if not loc:
# In that case, we would have to follow the link to know whether the
# checkin falls within our cities but tha | t's too costly so we drop it
# (and introduce a bias toward open sharing users I guess)
return None
lon, lat = loc['coordinates']
city = find_town(lat, lon, CITIES_TREE)
if not (city and city in cities.SHORT_KEY):
return None
tid = u.get_nested(tweet, 'id_str')
urls = u.get_nested(tweet, ['entities', 'urls'], [])
# short url of the checkin that need to be expand, either using bitly API
# or by VenueIdCrawler. Once we get the full URL, we still need to request
# 4SQ (500 per hours) to get info.
is_foursquare_url = lambda u: '4sq.com' in u or 'swarmapp.com' in u
fsq_urls = [url['expanded_url'] for url in urls
if is_foursquare_url(url['expanded_url'])]
if not fsq_urls:
return None
lid = str(fsq_urls[0])
uid = u.get_nested(tweet, ['user', 'id_str'])
msg = u.get_nested(tweet, 'text')
try:
time = datetime.strptime(tweet['created_at'], UTC_DATE)
time = cities.utc_to_local(city, time)
except ValueError:
print('time: {}'.format(tweet['created_at']))
return None
return FullCheckIn('', lid, '', city, loc, time, tid, uid, msg)
def import_json():
"""Return a json module (first trying ujson then simplejson and finally
json from standard library)."""
try:
import ujson as json
except ImportError:
# try:
# import simplejson as json
# except ImportError:
# import json
# I cannot make the others two work with utf-8
raise
return json
def log_exception(log, default=None, reraise=False):
"""If `func` raises an exception, log it to `log`. By default, assume it's
not critical and thus resume execution by returning `default`, except if
`reraise` is True."""
def actual_decorator(func):
"""Real decorator, with no argument"""
@functools.wraps(func)
def wrapper(*args, **kwds):
"""Wrapper"""
try:
return func(*args, **kwds)
except (KeyboardInterrupt, SystemExit):
raise
except:
log.exception("")
if reraise:
raise
return default
return wrapper
return actual_decorator
class Failures(object):
"""Keep track of Failures."""
def __init__(self, initial_waiting_time):
"""`initial_waiting_time` is in minutes."""
self.total_failures = 0
self.last_failure = clock()
self.initial_waiting_time = float(initial_waiting_time)*60.0
self.reset()
def reset(self):
"""Restore initial state with no recent failure."""
self.recent_failures = 0
self.waiting_time = self.initial_waiting_time
def fail(self):
"""Register a new failure and return a reasonable time to wait"""
if self.has_failed_recently():
# Hopefully the golden ration will bring us luck next time
self.waiting_time *= 1.618
else:
self.reset()
self.total_failures += 1
self.recent_failures += 1
self.last_failure = clock()
return self.waiting_time
def has_failed_recently(self, small=3600):
"""Has it failed in the last `small` seconds?"""
return self.total_failures > 0 and clock() - self.last_failure < small
def do_sleep(self):
"""Indeed perform waiting."""
sleep(self.waiting_time)
def parse_json_checkin(json, url=None):
"""Return salient info about a Foursquare checkin `json` that can be
either JSON text or already parsed as a dictionary."""
if not json:
return None
if not isinstance(json, dict):
try:
checkin = ujson.loads(json)
except (TypeError, ValueError) as not_json:
print(not_json, json, url)
return None
else:
checkin = json['checkin']
uid = u.get_nested(checkin, ['user', 'id'])
vid = u.get_nested(checkin, ['venue', 'id'])
time = u.get_nested(checkin, 'createdAt')
offset = u.get_nested(checkin, 'timeZoneOffset', 0)
if None in [uid, vid, time]:
return None
time = datetime.fromtimestamp(time, tz=pytz.utc)
# by doing this, the date is no more UTC. So why not put the correct
# timezone? Because in that case, pymongo will convert to UTC at
# insertion. Yet I want local time, but without doing the conversion
# when the result comes back from the DB.
time += timedelta(minutes=offset)
return int(uid), str(vid), time
def save_checkins_json(complete, prefix='tweets'):
"""Save `complete` as JSON in a file."""
now = datetime.now().strftime('%Y%m%d_%H%M%S')
filename = '{}_{}.json'.format(prefix, now)
msg = 'Save {} tweets in {}.'.format(len(complete), filename)
try:
for idx, checkin in enumerate(complete):
fmt_time = checkin['time'].strftime('%Y-%m-%dT%H:%M:%SZ')
complete[idx]['time'] = {'$date': fmt_time}
with open(filename, 'w') as out:
out.write(ujson.dumps(complete, ensure_ascii=False).replace('\/',
'/'))
logging.info(msg)
except (KeyboardInterrupt, SystemExit):
raise
except:
msg = "Fail to save {} tweets.".format(len(complete))
logging.exception(msg)
Point = namedtuple('Point', ['x', 'y'])
Node = namedtuple('Node', ['val', 'left', 'right'])
from numpy import median
def build_tree(bboxes, depth=0, max_depth=2):
if depth >= max_depth:
return bboxes
split_val = median([b.bottom[1] for b in bboxes])
left, right = [], []
for b in bboxes:
if b.bottom[1] > split_val:
right.append(b)
else:
left.append(b)
return Node(split_val,
build_tree(left, depth+1), build_tree(right, depth+1))
def find_town(x, y, tree, depth=0):
if isinstance(tree, list):
for city in tree:
if city.contains(x, y):
return city.name
return None
if y > tree.val:
return find_town(x, y, tree.right, depth+1)
else:
return find_town(x, y, tree.left, depth+1)
class Bbox():
bottom = None
top = None
center = None
name = None
def __init__(self, bbox, name):
self.bottom = Point(*bbox[:2])
self.top = Point(*bbox[2:])
self.name = name
def contains(self, x, y):
return self.bottom.x <= x <= self.top.x and\
self.bottom.y <= y <= self.top.y
def __repr__(self):
return '{}: {:.2f}, {:.2f}'.format(self.name, self.bottom.x,
self.bottom.y)
def obtain_tree():
all_cities = cities.US + cities.EU
cities_names = [cities.short_name(c) for c in cities.NAMES]
bboxes = [Bbox(city, name) for city, name in zip(all_cities,
cities_names)]
return build_tree(bboxes)
CITIES_TREE = obtain_tree()
|
SuperNovaPOLIUSP/supernova | aeSupernova/login/migrations/0001_initial.py | Python | agpl-3.0 | 1,436 | 0.002089 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Log',
fields=[
('idlog', models.AutoField(serialize=False, primary_key=True, db_column=b'idLog')),
('action', models.TextField()),
('time', models.DateTimeField(db_column=b'time')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
| options={
'db_table': b'user_log',
'managed': True,
},
bases=(models.Model,),
),
migrations.CreateModel(
| name='Session',
fields=[
('idsession', models.AutoField(serialize=False, primary_key=True, db_column=b'idSession')),
('start', models.DateTimeField(db_column=b'start')),
('end', models.DateTimeField(null=True, db_column=b'end', blank=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': b'login_session',
'managed': True,
},
bases=(models.Model,),
),
]
|
mbedmicro/pyOCD | test/json_lists_test.py | Python | apache-2.0 | 7,561 | 0.003174 | # pyOCD debugger
# Copyright (c) 2006-2015 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse, os, sys
from time import sleep, time
from random import randrange
import math
import argparse
import subprocess
import json
import traceback
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
from pyocd import __version__
from pyocd.core.helpers import ConnectHelper
from pyocd.utility.conversion import float32_to_u32
from test_util import Test, TestResult
import logging
from random import randrange
class JsonListsTestResult(TestResult):
def __init__(self):
super(JsonListsTestResult, self).__init__(None, None, None)
self.name = "json_lsits"
class JsonListsTest(Test):
def __init__(self):
super(JsonListsTest, self).__init__("Json Lists Test", json_lists_test)
def print_perf_info(self, result_list, output_file=None):
pass
def run(self, board):
try:
result = self.test_function(board.unique_id)
except Exception as e:
result = JsonListsTestResult()
result.passed = False
print("Exception %s when testing board %s" % (e, board.unique_id))
traceback.print_exc(file=sys.stdout)
result.board = board
result.test = self
return result
def json_lists_test(board_id, testing_standalone=False):
test_count = 0
test_pass_count = 0
def validate_basic_keys(data, minor_version=0):
did_pass = True
print('pyocd_version', end=' ')
p = 'pyocd_version' in data
if p:
p = data['pyocd_version'] == __version__
if p:
print("PASSED")
else:
did_pass = False
print("FAILED")
print('version', end=' ')
p = 'version' in data
if p:
v = data['version']
p = 'major' in v and 'minor' in v
if p:
p = v['major'] == 1 and v['minor'] == minor_version
if p:
print("PASSED")
else:
did_pass = False
print("FAILED")
print('status', end=' ')
p = 'status' in data
if p:
p = data['status'] == 0
if p:
print("PASSED")
else:
did_pass = False
print("FAILED")
return did_pass
def validate_boards(data):
did_pass = True
print('boards', end=' ')
p = 'boards' in data and type(data['boards']) is list
if p:
b = data['boards']
if p:
print("PASSED")
else:
did_pass = False
print("FAILED")
# Only if we're running this test standalone do we want to compare against the list
# of boards returned by ConnectHelper.get_sessions_for_all_connected_probes(). When running in the full
# automated test suite, there could be other test jobs running concurrently that have
# exclusive access to the boards they are testing. Thus, those boards will not show up
# in the return list and this test will fail.
if testing_standalone:
try:
all_sessions = ConnectHelper.get_sessions_for_all_connected_probes(blocking=False)
all_mbeds = [x.board for x in all_sessions]
p = len(all_mbeds) == len(b)
matching_boards = 0
if p:
for mbed in all_mbeds:
for brd in b:
if mbed.unique_id == brd['unique_id']:
matching_boards += 1
p = 'info' in brd and 'target' in brd and 'board_name' in brd
if not p:
break
if not p:
break
p = matching_boards == len(all_mbeds)
if p:
print("PASSED")
else:
did_pass = False
print("FAILED")
except Exception as e:
print("FAILED")
traceback.print_exc(file=sys.stdout)
did_pass = False
else:
# Check for required keys in all board info dicts.
p = True
for brd in b:
p = ('unique_id' in brd and
'info' in brd and
'target' in brd and
'board_name' in brd)
if not p:
break
if p:
print("PASSED")
else:
did_pass = False
print("FAILED")
return did_pass
def validate_targets(data):
did_pass = True
print('targets', end=' ')
p = 'targets' in data and type(data['targets']) is list
if p:
targets = data['targets']
for t in targets:
p = 'name' in t and 'part_number' in t
if not p:
break
if | p:
print("PASSED")
else:
did_pass = False
print("FAILED")
return | did_pass
result = JsonListsTestResult()
print("\n\n----- TESTING PROBES LIST -----")
out = subprocess.check_output(['pyocd', 'json', '--probes'])
data = json.loads(out)
test_count += 2
if validate_basic_keys(data):
test_pass_count += 1
if validate_boards(data):
test_pass_count += 1
print("\n\n----- TESTING TARGETS LIST -----")
out = subprocess.check_output(['pyocd', 'json', '--targets'])
data = json.loads(out)
test_count += 2
if validate_basic_keys(data, minor_version=2):
test_pass_count += 1
if validate_targets(data):
test_pass_count += 1
# Doesn't actually verify returned probes, simply makes sure it doesn't crash.
print("\n\n----- TESTING BOARDS LIST -----")
out = subprocess.check_output(['pyocd', 'json', '--boards'])
data = json.loads(out)
test_count += 1
if validate_basic_keys(data, minor_version=1):
test_pass_count += 1
# Doesn't actually verify returned features and options, simply makes sure it doesn't crash.
print("\n\n----- TESTING FEATURES LIST -----")
out = subprocess.check_output(['pyocd', 'json', '--features'])
data = json.loads(out)
test_count += 1
if validate_basic_keys(data, minor_version=1):
test_pass_count += 1
result.passed = test_count == test_pass_count
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='pyocd json output test')
parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging')
args = parser.parse_args()
level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=level)
json_lists_test(None, testing_standalone=True)
|
astromme/classify-handwritten-characters | predict.py | Python | mit | 1,285 | 0.003113 | import tensorflow as tf
import numpy as np
import sys
from libgnt.character_index import character_index
from utils.show_tf_image import show_tf_image
from utils.array_top_n_indexes import array_top_n_indexes
import os
def main():
if len(sys.argv) < 3:
print("usage: predict.py MODEL PNG_FILE")
sys.exit()
_, model_filename, png_filename = sys.argv
model = tf.keras.models.load_model(model_filename)
image = tf.keras.utils.load_img(png_filename, color_mode='rgb')
input_arr = tf.keras.utils.img_to_array(image)
print(input_arr.shape)
input_arr = 255 - input_arr
input_arr = tf.cast(input_arr, tf.float32)
input_arr = input_arr / 255
input_arr = tf.image.resize_with_pad(input_arr, 48, 48)
predictions = model(np.array([input_arr]), training=False)
for char_index in array_top_n_indexes(predictions[0], 5):
print(f'c: {character_index[char_index]}, v: {predictions[0][char_index]}')
# predicted_char_num = np.argmax(predictions, axis = 1)
top_5_predictions = [character_index[c] for c in array_top_n_indexes(pr | edictions[0], 5)]
print(top_5_predictions)
# show_tf_image(input_arr, f'"{os.path.basename(png_filename)}" predictions: {top_5_predictions}')
if __name__ == "__main__":
| main()
|
grindylow/tut2 | doc/conf.py | Python | gpl-3.0 | 5,052 | 0.000198 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# TUT2 documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 28 20:04:59 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'TUT2'
copyright = '2018, Martin Grill'
author = 'Martin Grill'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a th | eme
# further. For a list of op | tions available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TUT2doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TUT2.tex', 'TUT2 Documentation',
'Martin Grill', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tut2', 'TUT2 Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TUT2', 'TUT2 Documentation',
author, 'TUT2', 'One line description of project.',
'Miscellaneous'),
]
|
HeavenMin/PlantImageRecognition | deepLearning/verifyResult.py | Python | apache-2.0 | 1,141 | 0.004382 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR : MIN
PURPOSE : verify the accuracy of the model
VERSION : 0.1
DATE : 3.2017
"""
__author__ = 'Min'
import tensorflow as tf, sys
# path of the graph model
graph | Path = sys.argv[1]
# path of the model labels
labelPath = sys.argv[2]
# path of the image need to be identified
imagePath = sys.argv[3]
# read in the image
imageData = tf.gfile.FastGFile(imagePath, 'rb').read()
# loads label file
labelLines = [line.rstrip() for line | in tf.gfile.GFile(labelPath)]
with tf.gfile.FastGFile(graphPath, 'rb') as f:
graphDef = tf.GraphDef()
graphDef.ParseFromString(f.read())
_ = tf.import_graph_def(graphDef, name = '')
with tf.Session() as sess:
softmaxTensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmaxTensor, \
{'DecodeJpeg/contents:0': imageData})
# sort the prediction results
topK = predictions[0].argsort()[-len(predictions[0]):][::-1]
for nodeID in topK:
className = labelLines[nodeID]
ratio = predictions[0][nodeID]
print('%s (ratio = %.5f)' % (className, ratio))
|
Haabb/pwnfork | pwn/shellcode/misc/fork.py | Python | mit | 910 | 0.00989 | from pwn.internal.shellcode_helper import *
@shellcode_reqs(arch=['i386', 'amd64'], os=['linux', 'freebsd'])
def fork(parent, child = None, os = None, arch = None):
"""Fork this shit."""
if arch == 'i386':
if os in ['linux', 'freebsd']:
return _fork_i386(parent, child)
elif arch == 'amd64':
if | os in ['linux', 'freebsd']:
return _fork_amd64(parent, child)
bug('OS/arch combination (%s, %s) was not supported for fork' % (os, arch))
def _fork_amd64(parent, child):
code = """
push SYS_fork
pop rax
syscall
test rax, rax
jne %s
""" % parent
if child is not None:
code += 'jmp %s\n' % child
return code
def _fork_i386(parent, child):
| code = """
push SYS_fork
pop eax
int 0x80
test eax, eax
jne %s
""" % parent
if child is not None:
code += 'jmp %s\n' % child
return code
|
lamenezes/agendi | core/urls.py | Python | apache-2.0 | 542 | 0 | from django.core.urlresolvers import reverse_lazy
from django.conf.urls import url
from django.contrib.auth import views
from core.views import HomeView, UserAuthView, UserCreateView
urlpatterns = [
url(r'^$',
HomeV | iew.as_view(),
name='home'),
url(r'^login/$',
UserAuthView.as_view(),
name='login'),
url(r'^logout/$',
views.logout,
{'next_page': reverse | _lazy('core:home')},
name='logout'),
url(r'^signup/$',
UserCreateView.as_view(),
name='signup'),
]
|
eduNEXT/edunext-platform | import_shims/lms/bulk_email/apps.py | Python | agpl-3.0 | 368 | 0.008152 | """Deprecated import support. | Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('bulk_email.apps', 'lms.djangoapps.bulk_email.apps')
from lms.djangoapps.bulk_email.apps import *
| |
roshchupkin/VBM | scripts/python/nii2np.py | Python | gpl-2.0 | 6,005 | 0.024147 | import sys
from timer import Timer
import os
import pandas as pd
import nipy
import numpy as np
import re
import argparse
def get_images_list(path, regexp, number_images=None):
im_list=[]
dir_list=os.listdir(path)
if regexp=="NO":
im_list=dir_list
return dir_list
reg=re.compile(regexp)
im_list=[i for i in dir_list for m in [reg.search(i)] if m]
if isinstance(number_images, type(None) ):
if len(im_list)!=int(number_images):
raise Exception("set numbers of images have to be the same with numbers images in directory!")
return im_list
def delete_arrays(path_4d, region_code):
'''delete temporal arrays '''
p=1
while True:
if os.path.isfile( os.path.join(path_4d, str(region_code) +'_'+str(p) + ".npy" ) ):
os.remove(os.path.join(path_4d, str(region_code) +'_'+str(p) + ".npy" ))
p+=1
else:
break
def convert_array_for_regression(path_4d, region_code, split_size=1000):
''' merge region array to one and split it in (number images in study) x (voxels split_size) '''
regression_data=[]
p=1
while True:
try:
regression_d | ata.append(np.load( os.path.join(path_4d, str(region_code) +'_'+str(p) + ".npy" ) ) )
print str(region_code) +'_' +str(p) + ".npy"
p+=1
except:
break
reg | ression_data=np.concatenate(regression_data)
print "Region {}, regression data size {}, will be split by {} voxels chunks ".format(region_code,regression_data.shape, split_size)
sample_size, number_voxels=regression_data.shape
d=number_voxels/split_size
r=number_voxels-d*split_size
if d!=0:
l=[range(split_size*i,split_size*(i+1)) for i in range(0,d) ]
for i,j in enumerate(l): # TODO start from 0, maybe change to 1
save_np=regression_data[:,j]
np.save(os.path.join(path_4d, 'reg' + str(region_code) + "_" + str(i)) , save_np )
if r!=0:
save_np=regression_data[:,d*split_size:d*split_size+r]
np.save(os.path.join(path_4d, 'reg' + str(region_code) + "_" + str(i+1)) , save_np )
else:
np.save(os.path.join(path_4d, 'reg' + str(region_code) + "_" + str(0)) , regression_data )
def save_4d_data(Hammer_atlas, image_path, path_4d, image_names):
'''produce nparrays (voxels in region) x (image in study)
only if number of images less then 1000
'''
region_codes=np.unique(Hammer_atlas._data)
region_codes=region_codes[region_codes!=0]
region_coodinates={i:np.where(Hammer_atlas._data==i) for i in region_codes}
data_4d={i:[] for i in region_codes}
for im in image_names:
print im
try:
images_data=nipy.load_image(os.path.join(image_path, im ))._data
for k in data_4d:
data_4d[k].append(images_data[region_coodinates[k]])
except:
raise ValueError("Error during reading image {}".format(str(im)))
for c in region_codes:
c=int(c)
np_4d=np.array(data_4d[c])
print np_4d.shape
np.save(os.path.join(path_4d, str(c) +"_" + str(1)) , np_4d )
convert_array_for_regression(path_4d, c)
delete_arrays(path_4d, c)
def save_4d_data_region(logs_dir, atlas, image_path, path_4d, region_code, regexp='NO'):
image_names=get_images_list(image_path,regexp)
df=pd.DataFrame(image_names)
df.to_csv(os.path.join(logs_dir, str(region_code)+ '.csv'))
if len(image_names)<1000:
if int(region_code)!=0:
print 'FORCE MULTI JOBS SUBMISSION ( NOT EFFICIENT)'
elif int(region_code)==0:
save_4d_data(atlas, image_path, path_4d, image_names)
return 0
data_4d=[]
part=1
coordinate=np.where(atlas._data==int(region_code) )
if coordinate[0].shape[0]==0:
raise ValueError('Region code {} does not exist'.format(region_code))
count=0
for im in image_names:
# reading all images and dump nparrays by voxels in region by 1000 images
try:
images_data=nipy.load_image(os.path.join(image_path, im ))._data
count+=1
data=images_data[coordinate]
data_4d.append(data)
if count==1000:
np_4d=np.array(data_4d)
np.save(os.path.join(path_4d, str(region_code) + "_" + str(part)) , np_4d )
data_4d=[]
np_4d=None
part+=1
count=0
except:
print ("Error during reading image {}".format(str(im)))
if count!=0:
np_4d=np.array(data_4d)
np.save(os.path.join(path_4d, str(region_code) +"_" + str(part)) , np_4d )
convert_array_for_regression(path_4d, region_code)
delete_arrays(path_4d, region_code)
def experiment_save_4d(logs_dir, atlas_path,image_path, path_4d, region_code , reg):
atlas=nipy.load_image(atlas_path)
save_4d_data_region(logs_dir, atlas, image_path, path_4d, region_code , regexp=reg)
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Convert nifti images to nparray files')
parser.add_argument("-o",required=True, type=str, help="path to save result folder")
parser.add_argument("-i",required=True, type=str, help="path to nifti images")
parser.add_argument("-atlas",required=True, type=str, help="path to Atlas images to use to define voxel chunks")
parser.add_argument("-code",required=True,type=int, help="Atlas chunk code")
parser.add_argument("-regexp",type=str,default='NO', help="REGEXP to select images")
parser.add_argument("-logs",type=str,required=True, help="path to save logs")
args = parser.parse_args()
print args
with Timer() as t:
experiment_save_4d(args.logs, args.atlas, args.i, args.o, args.code, args.regexp)
print "save data for analysis %s s" %(t.secs)
|
youtube/cobalt | third_party/llvm-project/lldb/scripts/utilsOsType.py | Python | bsd-3-clause | 3,130 | 0.003514 | """ Utility module to determine the OS Python running on
--------------------------------------------------------------------------
File: utilsOsType.py
Overview: Pyth | on module to supply functions and an enumeration to
help determine the platform type, bit size and OS currently
being used.
--------------------------------------------------------------------------
"""
# Python modules:
import sys # Provide system information
# Third party modules:
# In-house modules:
# Instantiations:
# Enumerations:
#-----------------------------------------------------------------------------
# Details: Class to imp | lement a 'C' style enumeration type.
# Gotchas: None.
# Authors: Illya Rudkin 28/11/2013.
# Changes: None.
#--
if sys.version_info.major >= 3:
from enum import Enum
class EnumOsType(Enum):
Unknown = 0
Darwin = 1
FreeBSD = 2
Linux = 3
NetBSD = 4
Windows = 5
kFreeBSD = 6
else:
class EnumOsType(object):
values = ["Unknown",
"Darwin",
"FreeBSD",
"Linux",
"NetBSD",
"OpenBSD",
"Windows",
"kFreeBSD"]
class __metaclass__(type):
#++----------------------------------------------------------------
# Details: Fn acts as an enumeration.
# Args: vName - (R) Enumeration to match.
# Returns: Int - Matching enumeration/index.
# Throws: None.
#--
def __getattr__(cls, vName):
return cls.values.index(vName)
#++---------------------------------------------------------------------------
# Details: Reverse fast lookup of the values list.
# Args: vI - (R) Index / enumeration.
# Returns: Str - text description matching enumeration.
# Throws: None.
#--
def name_of(cls, vI):
return EnumOsType.values[vI]
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#++---------------------------------------------------------------------------
# Details: Determine what operating system is currently running on.
# Args: None.
# Returns: EnumOsType - The OS type being used ATM.
# Throws: None.
#--
def determine_os_type():
eOSType = EnumOsType.Unknown
strOS = sys.platform
if strOS == "darwin":
eOSType = EnumOsType.Darwin
elif strOS.startswith("freebsd"):
eOSType = EnumOsType.FreeBSD
elif strOS.startswith("linux"):
eOSType = EnumOsType.Linux
elif strOS.startswith("netbsd"):
eOSType = EnumOsType.NetBSD
elif strOS.startswith("openbsd"):
eOSType = EnumOsType.OpenBSD
elif strOS == "win32":
eOSType = EnumOsType.Windows
elif strOS.startswith("gnukfreebsd"):
eOSType = EnumOsType.kFreeBSD
return eOSType
|
kslundberg/pants | src/python/pants/backend/jvm/tasks/jvm_compile/jvm_compile_global_strategy.py | Python | apache-2.0 | 28,551 | 0.009422 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import os
import shutil
import uuid
from collections import defaultdict
from twitter.common.collections import OrderedSet
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.backend.jvm.tasks.jvm_compile.compile_context import CompileContext
from pants.backend.jvm.tasks.jvm_compile.jvm_compile_strategy import JvmCompileStrategy
from pants.backend.jvm.tasks.jvm_compile.resource_mapping import ResourceMapping
from pants.base.build_environment import get_buildroot, get_scm
from pants.base.exceptions import TaskError
from pants.base.target import Target
from pants.base.worker_pool import Work
from pants.option.custom_types import list_option
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir
class JvmCompileGlobalStrategy(JvmCompileStrategy):
"""A strategy for JVM compilation that uses a global classpath and analysis."""
class InternalTargetPartitioningError(Exception):
"""Error partitioning targets by jvm platform settings."""
@classmethod
def register_options(cls, register, compile_task_name, supports_concurrent_execution):
register('--changed-targets-heuristic-limit', advanced=True, type=int, default=0,
help='If non-zero, and we have fewer than this number of locally-changed targets, '
'partition them separately, to preserve stability when compiling repeatedly.')
def __init__(self, context, options, workdir, analysis_tools, compile_task_name,
sources_predicate):
super(JvmCompileGlobalStrategy, self).__init__(context, options, workdir, analysis_tools,
compile_task_name, sources_predicate)
# Various working directories.
# NB: These are grandfathered in with non-strategy-specific names, but to prevent
# collisions within the buildcache, strategies should use strategy-specific subdirectories.
self._analysis_dir = os.path.join(workdir, 'analysis')
self._classes_dir = os.path.join(workdir, 'classes')
self._analysis_file = os.path.join(self._analysis_dir, 'global_analysis.valid')
self._invalid_analysis_file = os.path.join(self._analysis_dir, 'global_analysis.invalid')
self._target_sources_dir = os.path.join(workdir, 'target_sources')
# The rough number of source files to build in each compiler pass.
self._partition_size_hint = options.partition_size_hint
# Computed lazily as needed.
self._upstream_class_to_path = None
# If non-zero, and we have fewer than this number of locally-changed targets,
# then we partition them separately, to preserve stability in the face of repeated
# compilations.
self._changed_targets_heuristic_limit = options.changed_targets_heuristic_limit
# Sources (relative to buildroot) present in the last analysis that have since been deleted.
# Populated in prepare_compile().
self._deleted_sources = None
self._upstream_class_to_path = None
def name(self):
return 'global'
def compile_context(self, target):
"""Returns the default/stable compile context for the given target.
Temporary compile contexts are private to the strategy.
"""
return CompileContext(target,
self._analysis_file,
self._classes_dir,
self._sources_for_target(target))
def move(self, src, dst):
if self.delete_scratch:
shutil.move(src, dst)
else:
shutil.copy(src, dst)
def pre_compile(self):
super(JvmCompileGlobalStrategy, self).pre_compile()
# Only create these working dirs during execution phase, otherwise, they
# would be wiped out by clean-all goal/task if it's specified.
safe_mkdir(self._target_sources_dir)
safe_mkdir(self._analysis_dir)
safe_mkdir(self._classes_dir)
# Look for invalid analysis files.
for f in (self._invalid_analysis_file, self._analysis_file):
self.validate_analysis(f)
def prepare_compile(self, cache_manager, all_targets, relevant_targets):
super(JvmCompileGlobalStrategy, self).prepare_compile(cache_manager, all_targets,
relevant_targets)
# Update the classpath for us and for downstream tasks.
compile_classpaths = self.context.products.get_data('compile_classpath')
for conf in self._confs:
compile_classpaths.add_for_targets(all_targets, [(conf, self._classes_dir)])
# Split the global analysis file into valid and invalid parts.
invalidation_check = cache_manager.check(relevant_targets)
if invalidation_check.invalid_vts:
# The analysis for invalid and deleted sources is no longer valid.
invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
invalid_sources_by_target = {}
for tgt in invalid_targets:
invalid_sources_by_target[tgt] = self._sources_for_target(tgt)
invalid_sources = list(itertools.chain.from_iterable(invalid_sources_by_target.values()))
self._deleted_sources = self._compute_deleted_sources()
tmpdir = os.path.join(self.analysis_tmpdir, str(uuid.uuid4()))
os.mkdir(tmpdir)
valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
newly_invalid_analysis_tmp = os.path.join(tmpdir, 'newly_invalid_analysis')
invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
if self._analysis_parser.is_nonempty_analysis(self._analysis_file):
with self.context.new_workunit(name='prepare-analysis'):
self._analysis_tools.split_to_paths(self._analysis_file,
[(invalid_sources + self._deleted_sources, newly_invalid_analysis_tmp)],
valid_analysis_tmp)
if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file):
self._analysis_tools.merge_from_paths(
[self._invalid_analysis_file, newly_invalid_analysis_tmp], invalid_analysis_tmp)
else:
invalid_analysis_tmp = newly_invalid_analysis_tmp
# Now it's OK to overwrite the main analysis files with the new state.
self.move(valid_analysis_tmp, self._analysis_file)
self.move(invalid_analysis_tmp, self._invalid_analysis_file)
else:
self._deleted_sources = []
def invalidation_hints(self, relevant_targets):
# If needed, find targets that we've changed locally (as opposed to
# changes synced in from the SCM).
# TODO(benjy): Should locally_changed_targets be available in all Tasks?
locally_changed_targets = None
if self._changed_targets_heuristic_limit:
locally_changed_targets = self._find_locally_changed_targets(relevant_targets)
if (locally_changed_targets and
len(locally_changed_targets) > self._changed_targets_heuristic_limit):
locally_changed_targets = None
return (self._partition_size_hint, locally_changed_targets)
def ordered_compile_settings_and_targets(self, relevant_targets):
"""Groups the targets into ordered chunks, dependencies before dependees.
Each chunk is of the form (compile_sett | ing, targets). Attempts to create as few chunks as
possible, under the constraint that targets with | different compile settings cannot be in the
same chunk, and dependencies must be in the same chunk or an earlier chunk than their
dependees.
Detects impossible combinations/dependency relationships with respect to the java target and
source level, and raising errors as necessary (see targets_to_compile and
infer_and_validate_java_target_levels).
:return: a list of tuples of the form (compile_settings, list of targets)
"""
relevant_targets = set(relevant_targets)
def get_platform(target):
return getattr(target, 'platform', None)
# NB(g |
funa1g/TextAnalyzer | analyze.py | Python | apache-2.0 | 636 | 0.001572 | import arg | parse
from lib import TextAnalyzer
def main(file_path: str):
"""execute TextAnalyzer
"""
analyzer = TextAnalyzer()
analyzer.read(file_path)
analyzer.execute()
if __name__ == "__main__":
# execute only if run as a script
parser = argparse.ArgumentParser(description='Analyzing text')
parser.add_argument('text_file_pa | th', metavar='f', type=str,
help='analyzing text file path')
parser.add_argument('-T', dest='text', metavar='T', type=str,
required=False, help='analyzing text')
args = parser.parse_args()
main(args.text_file_path)
|
MungoRae/home-assistant | tests/components/climate/test_generic_thermostat.py | Python | apache-2.0 | 30,816 | 0 | """The tests for the generic_thermostat."""
import asyncio
import datetime
import pytz
import unittest
from unittest import mock
import homeassistant.core as ha
from homeassistant.core import callback
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_OFF,
TEMP_CELSIUS,
)
from homeassistant.util.unit_system import METRIC_SYSTEM
from homeassistant.components import climate
from tests.common import assert_setup_component, get_test_home_assistant
ENTITY = 'climate.test'
ENT_SENSOR = 'sensor.test'
ENT_SWITCH = 'switch.test'
MIN_TEMP = 3.0
MAX_TEMP = 65.0
TARGET_TEMP = 42.0
TOLERANCE = 0.5
class TestSetupClimateGenericThermostat(unittest.TestCase):
"""Test the Generic thermostat with custom config."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_missing_conf(self):
"""Test set up heat_control with missing config values."""
config = {
'name': 'test',
'target_sensor': ENT_SENSOR
}
with assert_setup_component(0):
setup_component(self.hass, 'climate', {
'climate': config})
def test_valid_conf(self):
"""Test set up genreic_thermostat with valid config values."""
self.assertTrue(setup_component(self.hass, 'climate',
{'climate': {
'platform': 'generic_thermostat',
'name': 'test',
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR}}))
def test_setup_with_sensor(self):
"""Test set up heat_control with sensor to trigger update at init."""
self.hass.states.set(ENT_SENSOR, 22.0, {
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS
})
assert setup_component(self.hass, climate.DOMAIN, {'climate': {
'platform': 'generic_thermostat',
'name': 'test',
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR
}})
state = self.hass.states.get(ENTITY)
self.assertEqual(
TEMP_CELSIUS, state.attributes.get('unit_of_measurement'))
self.assertEqual(22.0, state.attributes.get('current_temperature'))
class TestClimateGenericThermostat(unittest.TestCase):
"""Test the Generic thermostat."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.units = METRIC_SYSTEM
assert setup_component(self.hass, climate.DOMAIN, {'climate': {
'platform': 'generic_thermostat',
'name': 'test',
'tolerance': 2,
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR
}})
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_defaults_to_unknown(self):
"""Test the setting of defaults to unknown."""
self.assertEqual('idle', self.hass.states.get(ENTITY).state)
def test_default_setup_params(self):
"""Test the setup with default parameters."""
state = self.hass.states.get(ENTITY)
self.assertEqual(7, state.attributes.get('min_temp'))
self.assertEqual(35, state.attributes.get('max_temp'))
self.assertEqual(None, state.attributes.get('temperature'))
def test_get_operation_modes(self):
"""Test that the operation list returns the correct modes."""
state = self.hass.states.get(ENTITY)
modes = state.attributes.get('operation_list')
self.assertEqual([climate.STATE_AUTO, STATE_OFF], modes)
def test_set_target_temp(self):
"""Test the setting of the target temperature."""
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY)
self.assertEqual(30.0, state.attributes.get('temperature'))
def test_sensor_bad_unit(self):
"""Test sensor that have bad unit."""
state = self.hass.states.get(ENTITY)
temp = state.attributes.get('current_temperature')
unit = state.attributes.get('unit_of_measurement')
self._setup_sensor(22.0, unit='bad_unit')
self.hass.block_till_done()
state = self.hass.states.get(ENTITY)
self.assertEqual(unit, state.attributes.get('unit_of_measurement'))
self.assertEqual(temp, state.attributes.get('current_temperature'))
def test_sensor_bad_value(self):
"""Test sensor that have None as state."""
state = self.hass.states.get(ENTITY)
temp = state.attributes.get('current_temperature')
unit = state.attributes.get('unit_of_measurement')
self._setup_sensor(None)
self.hass.block_till_done()
state = self.hass.states.get(ENTITY)
self.assertEqual(unit, state.attributes.get('unit_of_measurement'))
self.assertEqual(temp, state.attributes.get('current_temperature'))
def test_set_target_temp_heater_on(self):
"""Test if target temperature turn heater on."""
self._setup_switch(False)
self._setup_sensor(25)
self.hass.block_till_done()
climate.set_temperature(self.hass, | 30)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = | self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_set_target_temp_heater_off(self):
"""Test if target temperature turn heater off."""
self._setup_switch(True)
self._setup_sensor(30)
self.hass.block_till_done()
climate.set_temperature(self.hass, 25)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_temp_change_heater_on_within_tolerance(self):
"""Test if temperature change doesn't turn on within tolerance."""
self._setup_switch(False)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(29)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_temp_change_heater_on_outside_tolerance(self):
"""Test if temperature change turn heater on outside tolerance."""
self._setup_switch(False)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(25)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_temp_change_heater_off_within_tolerance(self):
"""Test if temperature change doesn't turn off within tolerance."""
self._setup_switch(True)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(31)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_temp_change_heater_off_outside_tolerance(self):
"""Test if temperature change turn heater off outside tolerance."""
self._setup_switch(True)
climate.set_temperature(self.hass, 30)
self.hass.block_till_done()
self._setup_sensor(35)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqua |
patflick/tsppi | src/bpscore_benchmark.py | Python | mit | 3,882 | 0.000773 | #!/usr/bin/env python3
#
# This script executes different GO BPScore algorithms
# in order to compare their run times.
# for timing
import time
# for the data connection
import pappi.sql
from pappi.data_config import *
# import the GO association loading function
from pappi.go.utils import load_go_associations_sql
# import similarity scorer to be benchmarked
from pappi.go.fast_similarity import GoFastSimilarity
from pappi.go.fastSemSim_similarity import GoFastSemSimSimilarity
from pappi.go.prebuf_similarity import GoPreBufSimilarity
from pappi.go.gene_prebuf_similarity import GoGenePreBufSimilarity
class BPScore_Benchmarker:
def __init__(self):
# get database connection
self.con = pappi.sql.get_conn(DATABASE)
self.genes = self.get_benchmark_genes(self.con)
self.scorers = []
self.init_time = []
self.run_times = dict() # dict {number of genes -> list of run times}
def init_scorers(self):
# initialize all the scorers (and save the initalization time)
start = time.time()
self.scorers.append(GoFastSimilarity(GO_OBO_FILE, self.con, True))
self.init_time.append(time.time() - start)
start = time.time()
self.scorers.append(GoFastSemSimSimilarity(GO_OBO_FILE, GO_ASSOC_FILE,
self.con))
self.init_time.append(time.time() - start)
start = time.time()
self.scorers.append(GoPreBufSimilarity(GO_OBO_FILE, GO_SCORE_FILE,
GO_SCORE_MAP_FILE, self.con, True))
self.init_time.append(time.time() - start)
start = time.time()
self.scorers.append(GoGenePreBufSimilarity(GO_OBO_FILE, GO_SCORE_FILE,
GO_SCORE_MAP_FILE,
GO_BPSCORE_FILE,
GO_BPSCORE_MA | P_FILE, self.con,
True))
self.init_time.ap | pend(time.time() - start)
def benchmark_scorers(self, nGenes):
# get a set of genes with the given size
benchmark_genes = set(self.genes[0:nGenes])
# score the gene set with all scorers
score_time = []
for scorer in self.scorers:
start = time.time()
score = scorer.gene_set_score(benchmark_genes)
score_time.append(time.time() - start)
# save run time to class table
self.run_times[nGenes] = score_time
def get_benchmark_genes(self, sql_conn):
# load Gene->GO-Term associations to get a set of genes to be used in
# the benchmark
assoc = load_go_associations_sql(sql_conn)
# use a list for fast/efficient range access
genes = list(assoc.keys())
return genes
def run_benchmark(self):
for n in range(10, 1001, 10):
print("benchmarking for n = " + str(n) + " genes...")
self.benchmark_scorers(n)
def print_timings(self):
print("scored by " + str(len(self.scorers)) + " scorers")
print()
print("n\t" + "\t".join(self.scorers[i].__class__.__name__
for i in range(0, len(self.scorers))))
print("init\t" + "\t".join(str(self.init_time[i])
for i in range(0, len(self.scorers))))
for n in sorted(self.run_times.keys()):
score_time = self.run_times[n]
print(str(n) + "\t" + "\t".join(str(s) for s in score_time))
# the main benchmark:
if __name__ == '__main__':
print("loading benchmarking class...")
benchmarker = BPScore_Benchmarker()
print("benchmark init times...")
benchmarker.init_scorers()
print("benchmark scoring...")
benchmarker.run_benchmark()
# print the actual timing results
benchmarker.print_timings()
|
czechmark/neurioToCSV | neurioToCSV.py | Python | gpl-2.0 | 4,657 | 0.018037 | #!/usr/bin/env python
"""
Copyright [2016] [Mark Petschek mark@petschek.com]
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.:
"""
#set up the imports
import sys
import neurio
import my_keys
import json
import dateutil.parser
import getopt
import datetime
import subprocess
import pprint
import requests
sys.path.append(".")
sys.path.append("..")
def gen_headers(token):
"""Utility method adding authentication token to requests."""
headers = {
"Authorization": " ".join(["Bearer", token])
}
return headers
def get_user_information(client,tp):
"""Gets the current user information, including sensor ID
Args:
None
Returns:
dictionary object containing information about the current user
"""
url = "https://api.neur.io/v1/users/current"
headers = gen_headers(tp.get_token())
headers["Content-Type"] = "application/json"
r = requests.get(url, headers=headers)
return r.json()
def main(argv):
entireDay=False
stdOutput=True;
getSensorId=False
ltz = dateutil.tz.tzlocal()
UTCtz = dateutil.tz.tzutc()
try:
opts, args = getopt.getopt(argv,"sht:o:",["hoursBack"])
except getopt.GetoptError:
print 'neurioToPvoutput -sh -t dHrs -o fileName'
sys.exit(2)
for opt, arg in opts:
if opt in ("-s"):
getSensorId=True
if opt in ("-o"):
ofile = arg
stdOutput=False;
if opt in ("-t"):
dHrs = int(arg)
entireDay=True;
if opt in ("-h"):
print 'neurioToCSv -sh -t dHrs '
print '-s print sensor id'
print '-h print help info'
print '-t dHrs - dHrs = number of hours in the past go get the neurio data'
sys.exit(0)
# get the Neurio token
tp = neurio.TokenProvider(key=my_keys.key,
secret=my_keys.secret)
nc = neurio.Client(token_provider=tp)
#read the sensor Id
if getSensorId:
user_info = get_user_information(nc,tp)
locations = user_info.get("locations")
sensors = locations[0].get("sensors")
sensorId = sensors[0].get("sensorId")
print "Sensor Id = " + sensorId.encode("utf-8")
sys.exit(0)
#do the file stuff
if stdOutput:
fO=sys.stdout
else:
try:
fO = open(ofile,'w')
except IOError:
print "Could not open file for writing"
sys.exit(-1)
#Neurio doesnt allow more than a days worth of data in a single request
#so limit the data to either now - 1day or input offset time + 1 day
| if entireDay:
stime = datetime.datetime.now() - datetime.timedelta(hours=dHrs)
etime = stime + datetime.timedelta(days=1)
| else:
stime = datetime.datetime.now() - datetime.timedelta(days=1)
etime = stime+datetime.timedelta(days=1)
#nuerio uses UTC, so we need to convert localtime to UTC and
#format the strings that neurio expects
stime = stime.replace(tzinfo=ltz)
etime = etime.replace(tzinfo=ltz)
stimeString = stime.astimezone(UTCtz).strftime("%Y-%m-%dT%H:%M:%S")
etimeString = etime.astimezone(UTCtz).strftime("%Y-%m-%dT%H:%M:%S")
#read the data from neurio
stats = nc.get_samples_stats(my_keys.sensor_id,stimeString,"minutes",etimeString,5)
#print "time,consuptionEnergy(Watt Sec),generationEnergy(Watt Sec)"
fO.write( 'time,consuptionEnergy(Watt Sec),generationEnergy(Watt Sec)\n')
for item in stats:
#read the time
time = dateutil.parser.parse(item.get("start")).astimezone(ltz)
fO.write (str(time) + ',' + str(item.get('consumptionEnergy')) + ',' + str(item.get('generationEnergy')) + '\n')
if __name__ == '__main__':
main(sys.argv[1:])
|
ray-project/ray | rllib/train.py | Python | apache-2.0 | 9,901 | 0.000202 | #!/usr/bin/env python
import argparse
import os
from pathlib import Path
import yaml
import ray
from ray.tune.config_parser import make_parser
from ray.tune.progress_reporter import CLIReporter, JupyterNotebookReporter
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.resources import resources_to_json
from ray.tune.tune import run_experiments
from ray.tune.schedulers import create_scheduler
from ray.rllib.utils.deprecation import deprecation_warning
from ray.rllib.utils.framework import try_import_tf, try_import_torch
try:
class_name = get_ipython().__class__.__name__
IS_NOTEBOOK = True if "Terminal" not in class_name else False
except NameError:
IS_NOTEBOOK = False
# Try to import both backends for flag checking/warnings.
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
EXAMPLE_USAGE = """
Training example via RLlib CLI:
rllib train --run DQN --env CartPole-v0
Grid search example via RLlib CLI:
rllib train -f tuned_examples/cartpole-grid-search-example.yaml
Grid search example via executable:
./train.py -f tuned_examples/cartpole-grid-search-example.yaml
Note that -f overrides all other trial-specific command-line options.
"""
def create_parser(parser_creator=None):
parser = make_parser(
parser_creator=parser_creator,
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Train a reinforcement learning agent.",
epilog=EXAMPLE_USAGE,
)
# See also the base parser definition in ray/tune/config_parser.py
parser.add_argument(
"--ray-address",
default=None,
type=str,
help="Connect to an existing Ray cluster at this address instead "
"of starting a new one.",
)
parser.add_argument(
"--ray-ui", action="store_true", help="Whether to enable the Ray web UI."
)
# Deprecated: Use --ray-ui, instead.
parser.add_argument(
"--no-ray-ui",
action="store_true",
help="Deprecated! Ray UI is disabled by default now. "
"Use `--ray-ui` to enable.",
)
parser.add_argument(
"--local-mode",
action="store_true",
help="Run ray in local mode for easier debugging.",
)
parser.add_argument(
"--ray-num-cpus",
default=None,
type=int,
help="--num-cpus to use if starting a new cluster.",
)
parser.add_argument(
"--ray-num-gpus",
default=None,
type=int,
help="--num-gpus to use if starting a new cluster.",
)
parser.add_argument(
"--ray-num-nodes",
default=None,
type=int,
help="Emulate multiple cluster nodes for debugging.",
)
parser.add_argument(
"--ray-object-store-memory",
default=None,
type=int,
help="--object-store-memory to use if starting a new cluster.",
)
parser.add_argument(
"--experiment-name",
default="default",
type=str,
help="Name of the subdirectory under `local_dir` to put results in.",
)
parser.add_argument(
"--local-dir",
default=DEFAULT_RESULTS_DIR,
type=str,
help="Local dir to save training results to. Defaults to '{}'.".format(
DEFAULT_RESULTS_DIR
),
)
parser.add_argument(
"--upload-dir",
default="",
type=str,
help="Optional URI to sync training results to (e.g. s3://bucket).",
)
# This will override any framework setting found in a yaml file.
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default=None,
help="The DL fr | amework specifier.",
)
parser.add_argument(
"-v", action="store_true", help="Whether to use INFO level logging."
)
parser.add_argument(
"-vv", action=" | store_true", help="Whether to use DEBUG level logging."
)
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume previous Tune experiments.",
)
parser.add_argument(
"--trace",
action="store_true",
help="Whether to attempt to enable tracing for eager mode.",
)
parser.add_argument(
"--env", default=None, type=str, help="The gym environment to use."
)
parser.add_argument(
"-f",
"--config-file",
default=None,
type=str,
help="If specified, use config options from this file. Note that this "
"overrides any trial-specific options set via flags above.",
)
# Obsolete: Use --framework=torch|tf2|tfe instead!
parser.add_argument(
"--torch",
action="store_true",
help="Whether to use PyTorch (instead of tf) as the DL framework.",
)
parser.add_argument(
"--eager",
action="store_true",
help="Whether to attempt to enable TF eager execution.",
)
return parser
def run(args, parser):
if args.config_file:
with open(args.config_file) as f:
experiments = yaml.safe_load(f)
else:
# Note: keep this in sync with tune/config_parser.py
experiments = {
args.experiment_name: { # i.e. log to ~/ray_results/default
"run": args.run,
"checkpoint_freq": args.checkpoint_freq,
"checkpoint_at_end": args.checkpoint_at_end,
"keep_checkpoints_num": args.keep_checkpoints_num,
"checkpoint_score_attr": args.checkpoint_score_attr,
"local_dir": args.local_dir,
"resources_per_trial": (
args.resources_per_trial
and resources_to_json(args.resources_per_trial)
),
"stop": args.stop,
"config": dict(args.config, env=args.env),
"restore": args.restore,
"num_samples": args.num_samples,
"sync_config": {
"upload_dir": args.upload_dir,
},
}
}
# Ray UI.
if args.no_ray_ui:
deprecation_warning(old="--no-ray-ui", new="--ray-ui", error=False)
args.ray_ui = False
verbose = 1
for exp in experiments.values():
# Bazel makes it hard to find files specified in `args` (and `data`).
# Look for them here.
# NOTE: Some of our yaml files don't have a `config` section.
input_ = exp.get("config", {}).get("input")
if input_ and input_ != "sampler":
# This script runs in the ray/rllib dir.
rllib_dir = Path(__file__).parent
def patch_path(path):
if isinstance(path, list):
return [patch_path(i) for i in path]
elif isinstance(path, dict):
return {patch_path(k): patch_path(v) for k, v in path.items()}
elif isinstance(path, str):
if os.path.exists(path):
return path
else:
abs_path = str(rllib_dir.absolute().joinpath(path))
return abs_path if os.path.exists(abs_path) else path
else:
return path
exp["config"]["input"] = patch_path(input_)
if not exp.get("run"):
parser.error("the following arguments are required: --run")
if not exp.get("env") and not exp.get("config", {}).get("env"):
parser.error("the following arguments are required: --env")
if args.torch:
deprecation_warning("--torch", "--framework=torch")
exp["config"]["framework"] = "torch"
elif args.eager:
deprecation_warning("--eager", "--framework=[tf2|tfe]")
exp["config"]["framework"] = "tfe"
elif args.framework is not None:
exp["config"]["framework"] = args.framework
if args.trace:
if exp["config"]["framework"] not in ["tf2", "tfe"]:
raise ValueError("Must enable --eager to enable tracing.")
exp["config"]["eager_tracing"] = True
if ar |
lkash/test | tests/test-perf2.py | Python | bsd-3-clause | 590 | 0.00339 | #!/usr/bin/env python
import time
import unittest
import dpkt
class TestPerf(unittest.TestCase):
rounds = 10000
def setUp(self):
self.start = time.time()
def tearDown(self):
print self.rounds / (time.time() - self.start), 'rounds/s'
def test_pack(self):
for i in xrange(self.rounds):
str | (dpkt.ip.IP())
print 'pack:',
def test_unpack(self):
buf = st | r(dpkt.ip.IP())
for i in xrange(self.rounds):
dpkt.ip.IP(buf)
print 'unpack:',
if __name__ == '__main__':
unittest.main()
|
bitmazk/cmsplugin-video-gallery | manage.py | Python | mit | 294 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault | ('DJANGO_SETTINGS_MODULE',
'video_gallery.tests.south_settings')
from django.core.management import execute_from_command_line
execute_from_com | mand_line(sys.argv)
|
github-borat/cinder | cinder/volume/drivers/netapp/eseries/client.py | Python | apache-2.0 | 14,901 | 0.000067 | # Copyright (c) 2014 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client classes for web services.
"""
import json
import requests
import six.moves.urllib.parse as urlparse
from cinder import exception
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class WebserviceClient(object):
"""Base client for e-series web services."""
def __init__(self, scheme, host, port, service_path, username,
password, **kwargs):
self._validate_params(scheme, host, port)
self._create_endpoint(scheme, host, port, service_path)
self._username = username
self._password = password
self._init_connection()
def _validate_params(self, scheme, host, port):
"""Does some basic validation for web service params."""
if host is None or port is None or scheme is None:
msg = _("One of the required inputs from host, port"
" or scheme not found.")
raise exception.InvalidInput(reason=msg)
if scheme not in ('http', 'https'):
raise exception.InvalidInput(reason=_("Invalid transport type."))
def _create_endpoint(self, scheme, host, port, service_path):
"""Creates end point url for the service."""
netloc = '%s:%s' % (host, port)
self._endpoint = urlparse.urlunparse((scheme, netloc, service_path,
None, None, None))
def _init_connection(self):
"""Do client specific set up for session and connection pooling."""
self.conn = requests.Session()
if self._username and self._password:
self.conn.auth = (self._username, self._password)
def invoke_service(self, method='GET', url=None, params=None, data=None,
headers=None, timeout=None, verify=False):
url = url or self._endpoint
try:
response = self.conn.request(method, url, params, data,
headers=headers, timeout=timeout,
verify=verify)
# Catching error conditions other than the perceived ones.
# Helps propagating only known exceptions back to the caller.
except Exception as e:
LOG.exception(_("Unexpected error while invoking web service."
" Error - %s."), e)
raise exception.NetAppDriverException(
_("Invoking web service failed."))
self._eval_response(response)
return response
def _eval_response(self, response):
"""Evaluates response before passing result to invoker."""
pass
class RestClient(WebserviceClient):
"""REST client specific to e-series storage service."""
def __init__(self, scheme, host, port, service_path, username,
password, **kwargs):
super(RestClient, self).__init__(scheme, host, port, service_path,
username, password, **kwargs)
kwargs = kwargs or {}
self._system_id = kwargs.get('system_id')
self._content_type = kwargs.get('content_type') or 'json'
def set_system_id(self, system_id):
"""Set the storage system id."""
self._system_id = system_id
def get_system_id(self):
"""Get the storage system id."""
return getattr(self, '_system_id', None)
def _get_resource_url(self, path, use_system=True, **kwargs):
"""Creates end point url for rest service."""
kwargs = kwargs or {}
if use_system:
if not self._system_id:
raise exception.NotFound(_('Storage system id not set.'))
kwargs['system-id'] = self._system_id
path = path.format(**kwargs)
if not self._endpoin | t.endswith('/'):
self._endpoint = '%s/' % self._endpoint
return urlparse.urljoin(self._endpoint, path.lstrip('/'))
def _invoke(self, method, path, data=None, use_system=True,
timeout=None, verify=False, **kwargs):
"""Invokes end point for resource on path."""
params = {'m': method, 'p': path, 'd': data, 'sys': use_system,
't': timeout, | 'v': verify, 'k': kwargs}
LOG.debug("Invoking rest with method: %(m)s, path: %(p)s,"
" data: %(d)s, use_system: %(sys)s, timeout: %(t)s,"
" verify: %(v)s, kwargs: %(k)s." % (params))
url = self._get_resource_url(path, use_system, **kwargs)
if self._content_type == 'json':
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
data = json.dumps(data) if data else None
res = self.invoke_service(method, url, data=data,
headers=headers,
timeout=timeout, verify=verify)
return res.json() if res.text else None
else:
raise exception.NetAppDriverException(
_("Content type not supported."))
def _eval_response(self, response):
"""Evaluates response before passing result to invoker."""
super(RestClient, self)._eval_response(response)
status_code = int(response.status_code)
# codes >= 300 are not ok and to be treated as errors
if status_code >= 300:
# Response code 422 returns error code and message
if status_code == 422:
msg = _("Response error - %s.") % response.text
else:
msg = _("Response error code - %s.") % status_code
raise exception.NetAppDriverException(msg)
def create_volume(self, pool, label, size, unit='gb', seg_size=0):
"""Creates volume on array."""
path = "/storage-systems/{system-id}/volumes"
data = {'poolId': pool, 'name': label, 'sizeUnit': unit,
'size': int(size), 'segSize': seg_size}
return self._invoke('POST', path, data)
def delete_volume(self, object_id):
"""Deletes given volume from array."""
path = "/storage-systems/{system-id}/volumes/{object-id}"
return self._invoke('DELETE', path, **{'object-id': object_id})
def list_volumes(self):
"""Lists all volumes in storage array."""
path = "/storage-systems/{system-id}/volumes"
return self._invoke('GET', path)
def list_volume(self, object_id):
"""List given volume from array."""
path = "/storage-systems/{system-id}/volumes/{object-id}"
return self._invoke('GET', path, **{'object-id': object_id})
def update_volume(self, object_id, label):
"""Renames given volume in array."""
path = "/storage-systems/{system-id}/volumes/{object-id}"
data = {'name': label}
return self._invoke('POST', path, data, **{'object-id': object_id})
def get_volume_mappings(self):
"""Creates volume mapping on array."""
path = "/storage-systems/{system-id}/volume-mappings"
return self._invoke('GET', path)
def create_volume_mapping(self, object_id, target_id, lun):
"""Creates volume mapping on array."""
path = "/storage-systems/{system-id}/volume-mappings"
data = {'mappableObjectId': object_id, 'targetId': target_id,
'lun': lun}
return self._invoke('POST', path, data)
def delete_volume_mapping(self, map_object_id):
"""Deletes given volume mapping from array."""
pat |
evernote/pootle | pootle/apps/pootle_app/management/commands/test_checks.py | Python | gpl-2.0 | 3,548 | 0.001691 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Evernote Corporation
#
# This file is part of Pootle.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import logging
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
from optparse import make_option
from translate.filters.checks import FilterFa | ilure
from django.core.management.base import NoArgsCommand, CommandError
from pootle_misc.checks import ENChecker, get_qualitychecks
from pootle_store.models import Unit
class Command(NoArgsCommand):
help = "Tests qua | lity checks against string pairs."
shared_option_list = (
make_option('--check', action='append', dest='checks',
help='Check name to check for'),
make_option('--source', dest='source', help='Source string'),
make_option('--unit', dest='unit', help='Unit id'),
make_option('--target', dest='target',
help='Translation string'),
)
option_list = NoArgsCommand.option_list + shared_option_list
def handle_noargs(self, **options):
# adjust debug level to the verbosity option
verbosity = int(options.get('verbosity', 1))
debug_levels = {
0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG
}
debug_level = debug_levels.get(verbosity, logging.DEBUG)
logging.getLogger().setLevel(debug_level)
self.name = self.__class__.__module__.split('.')[-1]
source = options.get('source', '')
target = options.get('target', '')
unit_id = options.get('unit', '')
checks = options.get('checks', [])
if (source and target) == bool(unit_id):
raise CommandError("Either --unit or a pair of --source "
"and --target must be provided.")
if unit_id:
try:
unit = Unit.objects.get(id=unit_id)
source = unit.source
target = unit.target
except Unit.DoesNotExist, e:
raise CommandError(e.message)
checker = ENChecker()
if not checks:
checks = get_qualitychecks().keys()
error_checks = []
for check in checks:
filtermessage = ''
try:
test = getattr(checker, check)
filterresult = test(source, target)
except FilterFailure, e:
filterresult = False
filtermessage = unicode(e)
message = "%s - %s" % (filterresult, check)
if filtermessage:
message += ": %s" % filtermessage
logging.info(message)
if not filterresult:
error_checks.append(check)
if error_checks:
self.stdout.write('Failing checks: %s' % ', '.join(error_checks))
else:
self.stdout.write('No errors found.')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.