repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
bjuvensjo/scripts
|
vang/bitbucket/has_branch.py
|
Python
|
apache-2.0
| 1,977
| 0
|
#!/usr/bin/env python3
import argparse
from sys import argv
from vang.bitbucket.get_branches import get_branches
from vang.bitbucket.utils import get_repo_specs
def has_br
|
anch(repo_specs, branch):
for spec in repo_specs:
branches = [
b['displayId'] for spec, bs in get_branches((spec, ), branch)
for b in bs
]
yield spec, branch in branches
def main(branch,
only_has=True,
only_not_has=False,
dirs=None,
|
repos=None,
projects=None):
specs = get_repo_specs(dirs, repos, projects)
for spec, has in has_branch(specs, branch):
if only_has:
if has:
print(f'{spec[0]}/{spec[1]}')
elif only_not_has:
if not has:
print(f'{spec[0]}/{spec[1]}')
else:
print(f'{spec[0]}/{spec[1]}, {branch}: {has}')
def parse_args(args):
parser = argparse.ArgumentParser(
description='Check repository branches in Bitbucket')
parser.add_argument('branch', help='The branch to check')
filter_group = parser.add_mutually_exclusive_group()
filter_group.add_argument(
'-o',
'--only_has',
action='store_true',
help='Print only repos that has the branch.')
filter_group.add_argument(
'-n',
'--only_not_has',
action='store_true',
help='Print only repos that not has the branch.')
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-d',
'--dirs',
nargs='*',
default=['.'],
help='Git directories to extract repo information from')
group.add_argument(
'-r', '--repos', nargs='*', help='Repos, e.g. key1/repo1 key2/repo2')
group.add_argument(
'-p', '--projects', nargs='*', help='Projects, e.g. key1 key2')
return parser.parse_args(args)
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
|
madflow/qnap-radicale
|
shared/lib/daemon/pidfile.py
|
Python
|
gpl-2.0
| 2,139
| 0
|
# -*- coding: utf-8 -*-
# daemon/pidfile.py
# Part of ‘python-daemon’, an implementation of PEP 3143.
#
# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Apache License, version 2.0 as published by the
# Apache Software Foundation.
# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import (absolute_import, unicode_literals)
from lockfile.pidlockfile import PIDLockFile
class TimeoutPIDLockFile(PIDLockFile, object):
""" Lockfile with default timeout, implemented as a Unix PID file.
This uses the ``PIDLockFile`` implementation, with the
following changes:
* The `acquire_timeout` parameter to the initialiser will be
used as the default `timeout` parameter for the `acquire`
method.
"""
def __init__(self, path, acquire_timeout=None, *args, **kwargs):
""" Set up the parameters of a TimeoutPIDLockFile.
:param path: Filesystem path to the PID file.
:param acquire_timeout: Value to use by default for the
`acquire` call.
:return: ``None``.
"""
self.acquire_timeout = acquire_timeout
super(TimeoutPIDLockFile, self).__init__(path, *args, **kwargs)
def acquire(self, timeout=None, *args, **kwargs):
""" Acquire the lock.
:param timeout: Specifies the timeout; see below for valid
|
values.
:return: ``None``.
The `timeout` defaults to the value set during
initialisation with the `acquire_timeout` parameter. It is
passed to `PIDLockFile.acquire`; see that method for
|
details.
"""
if timeout is None:
timeout = self.acquire_timeout
super(TimeoutPIDLockFile, self).acquire(timeout, *args, **kwargs)
# Local variables:
# coding: utf-8
# mode: python
# End:
# vim: fileencoding=utf-8 filetype=python :
|
SinnerSchraderMobileMirrors/django-cms
|
cms/test_utils/util/context_managers.py
|
Python
|
bsd-3-clause
| 6,028
| 0.004147
|
# -*- coding: utf-8 -*-
from contextlib import contextmanager
from django.conf import settings
from django.core.signals import request_started
from django.db import reset_queries
from django.template import context
from django.utils.translation import get_language, activate
from shutil import rmtree as _rmtree
from tempfile import template, mkdtemp
import sys
from cms.utils.compat.string_io import StringIO
class NULL:
pass
class SettingsOverride(object):
"""
Overrides Django settings within a context and resets them to their inital
values on exit.
Example:
with SettingsOverride(DEBUG=True):
# do something
"""
def __init__(self, **overrides):
self.overrides = overrides
self.special_handlers = {
'TEMPLATE_CONTEXT_PROCESSORS': self.template_context_processors,
}
def __enter__(self):
self.old = {}
for key, value in self.overrides.items():
self.old[key] = getattr(settings, key, NULL)
setattr(settings, key, value)
def __exit__(self, type, value, traceback):
for key, value in self.old.items():
if value is not NULL:
setattr(settings, key, value)
else:
delattr(settings,key) # do not pollute the context!
self.special_handlers.get(key, lambda:None)()
def template_context_processors(self):
context._standard_context_processors = None
class StdOverride(object):
def __init__(self, std='out', buffer=None):
self.std = std
self.buffer = buffer or StringIO()
def __enter__(self):
setattr(sys, 'std%s' % self.std, self.buffer)
return self.buffer
def __exit__(self, type, value, traceback):
setattr(sys, 'std%s' % self.std, getattr(sys, '__std%s__' % self.std))
class StdoutOverride(StdOverride):
"""
This overrides Python's the standard output and redirects it to a StringIO
object, so that on can test the output of the program.
example:
lines = None
with StdoutOverride() as buffer:
# print stuff
lines = buffer.getvalue()
"""
def __init__(self, buffer=None):
super(StdoutOverri
|
de, self).__init__('out', buffer)
class LanguageOverride(object):
def __init__(self, language):
self.newlang = language
def __enter__(self):
self.oldlang = get_language()
activate(self.newlang)
def __exit__(self, type, value, traceback):
act
|
ivate(self.oldlang)
class TemporaryDirectory:
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everthing contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
def __enter__(self):
return self.name
def cleanup(self):
try:
from tempfile import _exists
if _exists(self.name):
_rmtree(self.name)
except ImportError:
pass
def __exit__(self, exc, value, tb):
self.cleanup()
class UserLoginContext(object):
def __init__(self, testcase, user):
self.testcase = testcase
self.user = user
def __enter__(self):
loginok = self.testcase.client.login(username=self.user.username,
password=self.user.username)
self.old_user = getattr(self.testcase, 'user', None)
self.testcase.user = self.user
self.testcase.assertTrue(loginok)
def __exit__(self, exc, value, tb):
self.testcase.user = self.old_user
if not self.testcase.user:
delattr(self.testcase, 'user')
self.testcase.client.logout()
class ChangeModel(object):
"""
Changes attributes on a model while within the context.
These changes *ARE* saved to the database for the context!
"""
def __init__(self, instance, **overrides):
self.instance = instance
self.overrides = overrides
def __enter__(self):
self.old = {}
for key, value in self.overrides.items():
self.old[key] = getattr(self.instance, key, NULL)
setattr(self.instance, key, value)
self.instance.save()
def __exit__(self, exc, value, tb):
for key in self.overrides.keys():
old_value = self.old[key]
if old_value is NULL:
delattr(self.instance, key)
else:
setattr(self.instance, key, old_value)
self.instance.save()
class _AssertNumQueriesContext(object):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
self.connection = connection
def __enter__(self):
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.starting_queries = len(self.connection.queries)
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
settings.DEBUG = self.old_debug
request_started.connect(reset_queries)
if exc_type is not None:
return
final_queries = len(self.connection.queries)
executed = final_queries - self.starting_queries
queries = '\n'.join([q['sql'] for q in self.connection.queries[self.starting_queries:]])
self.test_case.assertEqual(
executed, self.num, "%d queries executed, %d expected. Queries executed:\n%s" % (
executed, self.num, queries
)
)
@contextmanager
def disable_logger(logger):
old = logger.disabled
logger.disabled = True
yield
logger.disabled = old
|
maxwelld90/searcher_simulations
|
configs/publications/cikm2015/scripts/user_summary_generator_cg.py
|
Python
|
gpl-2.0
| 13,051
| 0.009961
|
import os
import sys
import fnmatch
serp_directory = '/Users/david/Dropbox/Shares/Vu-David/data/iiix_serp_results/'
serp_directory_files = [z for z in os.listdir(serp_directory) if os.path.isfile(os.path.join(serp_directory, z))]
def get_user_topic_queries(queries):
"""
Taking the queries data structure (generated from get_query_stats()), returns a new dictionary with userid/topic focusing. Bleh.
"""
ds = {}
for queryid in queries.keys():
query = queries[queryid]
if query['userid'] not in ds:
ds[query['userid']] = {}
if query['topic'] not in ds[query['userid']]:
ds[query['userid']][query['topic']] = []
ds[query['userid']][query['topic']].append(queryid)
for userid in ds:
for topic in ds[userid]:
ds[userid][topic] = sorted(map(int, ds[userid][topic]))
ds[userid][topic] = map(str, ds[userid][topic])
return ds
def get_query_stats(query_details_file, per_query_stats_file):
"""
Reads the query stats file, returns a data structure containing information for each query.
"""
ds = {}
f = open(per_query_stats_file, 'r')
for line in f:
line = line.strip().split(',')
if line[0] == 'queryid':
continue # Skip the header line
ds[line[0]] = {'queryid': line[0],
'userid': line[1],
'topic': line[2],
'condition': line[3],
'real_docs_viewed': line[5],
'real_doc_click_depth': line[6],
'real_doc_hover_depth': line[7],
'real_cg': get_real_query_cg(line[0]),}
f.close()
# Process the query details file to get the terms.
f = open(query_details_file, 'r')
for line in f:
line = line.strip().split(',')
if line[0] == 'queryid':
continue # Skip the header line
ds[line[0]]['terms'] = line[5]
f.close()
return ds
def get_log_filename_info(log_filename):
"""
Returns a dictionary of information from a supplied log filename (just the filename, not the absolute path).
"""
log_filename = log_filename.split('.')
if len(log_filename) > 2:
log_filename = '.'.join(log_filename[:len(log_filename)-1])
#print log_filename
else:
log_filename = log_filename[0]
log_filename = log_filename.split('-')
ret = {}
ret['ss'] = log_filename[0]
ret['topic'] = log_filename[1]
log_filename = log_filename[2].split('_')
ret['userid'] = log_filename[1][1:]
ret['threshold'] = log_filename[2][1:]
return ret
def get_serp_file(queryid):
"""
Returns the filename for the SERP results, given a queryid.
"""
global serp_directory
global serp_directory_files
for s in serp_directory_files:
if s.startswith('{0}-'.format(str(queryid))):
return os.path.join(serp_directory, s)
def get_real_query_cg(queryid):
"""
Given a queryid, returns
|
the CG the searcher obtained (by marking).
"""
serp_filename = get_serp_file(queryid)
serp_fo = open(serp_filename, 'r')
cg_count = 0
for line in serp_fo:
if line.startswith('rank
|
'):
continue
line = line.strip().split(',')
marked = int(line[6])
trec_judgement = int(line[7])
if marked > 0:
cg_count += trec_judgement
serp_fo.close()
return cg_count
def generate_summary_file(query_details_file, per_query_stats_file, run_base_dir, output_file):
"""
Generates the summary file! Each query on its own line.
Log filenames should be like: SS1-435-user_usearch0_t5.log
Stopping Strat 1, Topic 435, User search0, threshold=5
"""
queries = get_query_stats(query_details_file, per_query_stats_file)
user_queries = get_user_topic_queries(queries)
observed_strategies = {}
sim_queries = {} # Strategy -> t -> queryid
output_file = open(output_file, 'w')
def get_blank_query_stats_dict():
"""
Returns an empty query statistics dictionary.
"""
return {'click_depth': 0,
'hover_depth': 0,
'docs_viewed': 0,
'cg': 0,}
def get_cg_value(queryid, rank):
"""
Given a queryid and a rank, returns the CG for the document at that rank based upon the QRELS.
"""
serp_filename = get_serp_file(queryid)
serp_fo = open(serp_filename, 'r')
for line in serp_fo:
if line.startswith('rank'):
continue
line = line.strip().split(',')
if line[0] == str(rank):
serp_fo.close()
return int(line[7])
serp_fo.close()
return 0
for root, dirnames, filenames in os.walk(run_base_dir):
for filename in fnmatch.filter(filenames, '*.log'):
log_filename = os.path.join(root, filename)
log_details = get_log_filename_info(filename)
real_queries = user_queries[log_details['userid']][log_details['topic']]
f = open(log_filename, 'r')
query_counter = 0
curr_depth = 0
sim_query_stats = get_blank_query_stats_dict()
for line in f:
line = line.strip().split()
if line[1] == 'QUERY':
sim_query = ' '.join(line[4:])
real_query = queries[real_queries[query_counter]]
if sim_query == real_query['terms']:
if sim_query_stats['click_depth'] > 0 or sim_query_stats['hover_depth'] > 0: # Was there a previous query?
sim_queries[strat][threshold][queryid] = sim_query_stats
sim_query_stats = get_blank_query_stats_dict()
curr_depth = 0
strat = log_details['ss']
queryid = real_query['queryid']
threshold = log_details['threshold']
#print 'QUERY ISSUED {0}-{1} ({2}, t={3})'.format(queryid, real_query['terms'], log_details['ss'], log_details['threshold'])
if strat not in observed_strategies:
observed_strategies[strat] = []
if threshold not in observed_strategies[strat]:
observed_strategies[strat].append(threshold)
if strat not in sim_queries:
sim_queries[strat] = {}
if threshold not in sim_queries[strat]:
sim_queries[strat][threshold] = {}
query_counter += 1
elif line[1] == 'SNIPPET':
#print ' Snippet encountered for {0}'.format(line[5])
curr_depth += 1
sim_query_stats['hover_depth'] = curr_depth
#print ' HD: {0} CD: {1} DV: {2}'.format(sim_query_stats['hover_depth'], sim_query_stats['click_depth'], sim_query_stats['docs_viewed'])
elif line[1] == 'DOC' and line[4] == 'EXAMINING_DOCUMENT':
#print ' Document clicked - {0}'.format(line[5])
sim_query_stats['click_depth'] = curr_depth
sim_query_stats['docs_viewed'] += 1
#print ' HD: {0} CD: {1} DV: {2}'.format(sim_query_stats['hover_depth'], sim_query_stats['click_depth'], sim_query_stats['docs_viewed'])
|
tylerprete/evaluate-math
|
postfix.py
|
Python
|
mit
| 792
| 0.001263
|
import sys
from stack import Stack
def parse_expression_into_parts(expression):
"""
Parse expression into list of parts
:rtype : list
:param expression: str # i.e. "2 * 3 + ( 2 - 3 )"
"""
raise NotImplementedError("complete me!")
def evaluate_expression(a, b, op):
raise NotImplementedError("complete me!")
def evaluate_postfix(par
|
ts):
raise NotImplementedError("complete me!")
if __name__ == "__main__":
expr = None
if len(sys.argv) > 1:
expr = sys.argv[1]
parts = parse_expression_into_parts(expr)
print "Evaluating %s == %s" % (expr, evaluate_postfix(parts))
else:
print 'Usage: python postfix.py "<expr>" -- i.e. python po
|
stfix.py "9 1 3 + 2 * -"'
print "Spaces are required between every term."
|
pidydx/grr
|
grr/lib/builders/tests.py
|
Python
|
apache-2.0
| 196
| 0
|
#!/u
|
sr/bin/env python
"""Test registry for builders."""
# These need to register plugins so, pylint: disable=unused-import
from grr.lib.builders import signing_test
# pylint: enab
|
le=unused-import
|
pygeo/geoval
|
geoval/polygon/__init__.py
|
Python
|
apache-2.0
| 23
| 0
|
from .
|
polygon impo
|
rt *
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/sklearn/linear_model/tests/test_sgd.py
|
Python
|
mit
| 44,262
| 0.000226
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import raises
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary clas
|
sification problem
X5 = np.array([[-2, -1], [-1,
|
-1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
|
CenterForOpenScience/scinet
|
scinet/views.py
|
Python
|
mit
| 4,696
| 0.005537
|
import json
import os
from flask import request, g, render_template, make_response, jsonify, Response
from helpers.raw_endpoint import get_id, store_json_to_file
from helpers.groups import get_groups
from json_controller import JSONController
from main import app
from pymongo import MongoClient, errors
HERE = os.path.dirname(os.path.abspath(__file__))
# setup database connection
def connect_client():
"""Connects to Mongo client"""
try:
return MongoClient(app.config['DB_HOST'], int(app.config['DB_PORT']))
except errors.ConnectionFailure as e:
raise e
def get_db():
"""Connects to Mongo database"""
if not hasattr(g, 'mongo_client'):
g.mongo_client = connect_client()
g.mongo_db = getattr(g.mongo_client, app.config['DB_NAME'])
g.groups_collection = g.mongo_db[os.environ.get('DB_GROUPS_COLLECTION')]
return g.mongo_db
@app.teardown_appcontext
def close_db(error):
"""Closes connection with Mongo client"""
if hasattr(g, 'mongo_client'):
g.mongo_client.close()
# Begin view routes
@app.route('/')
@app.route('/index/')
def index():
"""Landing page for SciNet"""
return render_template("index.html")
@app.route('/faq/')
def faq():
"""FAQ page for SciNet"""
return render_template("faq.html")
@app.route('/leaderboard/')
def leaderboard():
"""Leaderboard page for SciNet"""
get_db()
groups = get_groups(g.groups_collection)
return render_template("leaderboard.html", groups=groups)
@app.route('/ping', methods=['POST'])
def ping_endpoint():
"""API endpoint determines potential article hash exists in db
:return: status code 204 -- hash not present, continue submission
:return: status code 201 -- hash already exists, drop submission
"""
db = get_db()
target_hash = request.form.get('hash')
if db.raw.find({'hash': target_hash}).count():
return Response(status=201)
else:
return Response(status=204)
@app.route('/articles')
def ArticleEndpoint():
"""Eventual landing page for searching/retrieving articles"""
if request.method == 'GET':
return render_template("articles.html")
@app.route('/raw', methods=['POST'])
def raw_endpoint():
"""API endpoint for submitting raw article data
:return: status code 405 - invalid JSON or invalid request type
:return: status code 400 - unsupported content-type or invalid publisher
:return: status code 201 - successful submission
"""
# Ensure post's content-type is supported
if request.headers['content-type'] == 'application/json':
# Ensure data is a valid JSON
try:
user_submission = json.loads(request.data)
except ValueError:
return Response(status=405)
# generate UID for new entry
uid = get_id()
# store incoming JSON in raw storage
file_path = os.path.join(
HERE,
'raw_payloads',
str(uid)
)
store_json_to_file(user_submission, file_path)
# hand submission to controller and return Resposne
db = get_db()
controller_response = JSONController(user_submission, db=db, _id=uid).submit()
return controller_response
# User submitted an unsupported content-type
else:
return Response(status=400)
#@TODO: Implicit or Explicit group additions? Issue #51 comments on the issues page
#@TODO: Add form validation
@app.route('/requestnewgroup/', methods=['POST'])
def request_new_group():
# Grab submission form data and prepare email message
data = request.json
msg = "Someone has request that you add {group_name} to the leaderboard \
groups. The groups website is {group_website} and the submitter can \
be reached at {submitter_email}.".format(
group_name=data['new_group_name'],
group_website=data['new_group_website'],
submitter_email=data['submitter_email'])
return Response(status=200)
'''
try:
email(
subject="SciNet: A new group has been requested",
fro="no-reply@scinet.osf.io",
to='harry@scinet.osf.io',
msg=msg)
return Response(status=200)
except:
return Response(status=500)
'''
# Error handlers
@app.errorhandler(404)
def not_found(error):
return
|
make_response(jsonify( { 'error': 'Page Not Found' } ), 404)
@app.errorhandler(405)
def method_not_allowed(error):
return make_resp
|
onse(jsonify( { 'error': 'Method Not Allowed' } ), 405)
|
nuagenetworks/vspk-python
|
vspk/v6/nutestsuite.py
|
Python
|
bsd-3-clause
| 12,813
| 0.009444
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUTestsFetcher
from .fetchers import NUTestSuiteRunsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUTestSuite(NURESTObject):
""" Represents a TestSuite in the VSD
Notes:
A Test Suite is grouping a number of diagnostic Tests that can be run consecutively from a given source (NSGateway or VPort) toward a specified destination.
"""
__rest_name__ = "testsuite"
__resource_name__ = "testsuites"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a TestSuite instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> testsuite = NUTestSuite(id=u'xxxx-xxx-xxx-xxx', name=u'TestSuite')
>>> testsuite = NUTestSuite(data=my_dict)
"""
super(NUTestSuite, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._description = None
self._embedded_metadata = None
self._underlay_test = None
self._enterprise_id = None
self._entity_scope = None
self._creation_date = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="underlay_test", remote_name="underlayTest", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.tests = NUTestsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.test_suite_runs = NUTestSuiteRunsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
The name given by the operator to the Test Suite.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
The name given by the operator to the Test Suite.
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
|
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
|
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def description(self):
""" Get description value.
Notes:
An operator given description of the Test Suite.
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
An operator given description of the Test Suite.
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata obj
|
planetarypy/pdsspect
|
pdsspect/pdsspect_image_set.py
|
Python
|
bsd-3-clause
| 26,738
| 0.000037
|
"""The main model for all the views in pdsspect"""
import os
import warnings
import numpy as np
from astropy import units as astro_units
from ginga.util.dp import masktorgb
from planetaryimage import PDS3Image
from ginga.BaseImage import BaseImage
from ginga import colors as ginga_colors
from ginga.canvas.types.image import Image
from instrument_models.get_wavelength import get_wavelength
ginga_colors.add_color('crimson', (0.86275, 0.07843, 0.23529))
ginga_colors.add_color('teal', (0.0, 0.50196, 0.50196))
ginga_colors.add_color('eraser', (0.0, 0.0, 0.0))
ACCEPTED_UNITS = [
'nm',
'um',
'AA',
]
class ImageStamp(BaseImage):
"""BaseImage for the image view canvas
Parameters
----------
filepath : :obj:`str`
The path to the image to be opened
metadata : None
Metadata for `BaseImage`
logger : None
logger for `BaseImage`
wavelength : :obj:`float` [``nan``]
Image's filter wavelength. If ``nan``, will try to use
:meth:`instrument_models.get_wavelength.get_wavelength` to get the
wavelength
unit : :obj:`str` [``nm``]
Wavelength unit. Must be one of :attr:`accepted_units`
Attributes
----------
pds_image : :class:`~planetaryimage.pds3image.PDS3Image`
Image object that holds data and the image label
image_name : :obj:`str`
The basename of the filepath
seen : :obj:`bool`
False if the image has not been seen by the viewer. True otherwise
Default if False
cuts : :obj:`tuple`
The cut levels of the image. Default is two `None` types
accepted_units : :obj:`list`
List of accepted units: ``nm``, ``um``, and ``AA``
"""
accepted_units = ACCEPTED_UNITS
def __init__(self, filepath, metadata=None, logger=None,
wavelength=float('nan'), unit='nm'):
self.pds_image = PDS3Image.open(filepath)
data = self.pds_image.image.astype(float)
BaseImage.__init__(self, data_np=data,
metadata=metadata, logger=logger)
self.set_data(data)
self.image_name = os.path.basename(filepath)
self.seen = False
self.cuts = (None, None)
self._check_acceptable_unit(unit)
if np.isnan(wavelength):
wavelength = get_wavelength(self.pds_image.label, unit)
unit = astro_units.Unit(unit)
self._wavelength = wavelength * unit
@property
def data(self):
""":class:`numpy.ndarray` : Image data"""
return self.get_data()
@property
def wavelength(self):
""":obj:`int` : The images wavelength"""
return float(round(self._wavelength.value, 3))
@wavelength.setter
def wavelength(self, new_wavelength):
self._wavelength = new_wavelength * self._wavelength.unit
@property
def unit(self):
""":class:`astropy.units.Unit` : The :attr:`wavelength` unit
Setting the unit will convert the wavelength value as well. The new
unit must also be one of the :attr:`accepted_units`
"""
|
return self._wavelength.unit
@unit.setter
def unit(self, new_unit):
self._check_acceptable_unit(new_unit)
new_unit = astro_units.Unit(new_unit)
self._wavelength = self._wavelength.to(new_unit)
def _check_acceptable_unit(self, unit):
if unit not in self.accepted_units:
raise ValueError(
|
'Unit mus be one of the following %s' % (
', '.join(self.accepted_units)
)
)
def get_wavelength(self):
""":class:`astropy.units.quantity.Quantity` Copy of the wavelength"""
return self._wavelength.copy()
class PDSSpectImageSet(object):
"""Model for each view is pdsspect
The images loaded should all have the same shape. Otherwise the images will
have the smallest common shape and not look as expected (i.e., If when
loading two images where one image has a shape of ``(63, 36)`` and the
other image has a shape of ``(24, 42)``, the displayed shape will be
``(24, 36)``. This will cause the first image to have the right side cut
off and the second image to have the top cut off). This is done so all ROIs
created can apply to the entire list of images. To avoid this behavior,
either only open images that have the same shape or open images one at a
time.
Parameters
----------
filepaths : :obj:`list`
List of filepaths to images
Attributes
----------
colors : :obj:`list` of :obj:`str`
List of possible color names to make ROIs.
The possible choices for colors: ``red``, ``brown``, ``lightblue``,
``lightcyan``, ``darkgreen``, ``yellow``, ``pink``, ``teal``,
``goldenrod``, ``sienna``, ``darkblue``, ``crimson``, ``maroon``,
``purple``, and ``eraser (black)``
selection_types : :obj:`list` of :obj:`str`
Selection types for making ROIs. The possible types are
:class:`Filled Rectangle <.pdsspect.roi.Rectangle>`,
:class:`Filled Polygon <.pdsspect.roi.Polygon>`, and
:class:`Filled Rectangle <.pdsspect.roi.Pencil>`, (single points).
accepted_units : :obj:`list`
List of accepted units: ``nm``, ``um``, and ``AA``
images : :obj:`list` of :class:`ImageStamp`
Images to view and make selections. Must all have the same dimensions
filepaths : :obj:`list`
List of filepaths to images
current_color_index : :obj:`int`
Index of the current color in :attr:`colors` list for ROI creation
(Default is 0)
"""
colors = [
'red',
'brown',
'lightblue',
'lightcyan',
'darkgreen',
'yellow',
'pink',
'teal',
'goldenrod',
'sienna',
'darkblue',
'crimson',
'maroon',
'purple',
'eraser',
]
selection_types = [
'filled rectangle',
'filled polygon',
'pencil'
]
accepted_units = ACCEPTED_UNITS
def __init__(self, filepaths):
self._views = []
self.images = []
self.filepaths = filepaths
self._create_image_list()
self._determin_shape()
self._current_image_index = 0
self.current_color_index = 0
self._selection_index = 0
self._zoom = 1.0
self._center = None
self._alpha = 1.0
self._flip_x = False
self._flip_y = False
self._swap_xy = False
mask = np.zeros(self.shape[:2], dtype=np.bool)
self._maskrgb = masktorgb(mask, self.color, self.alpha)
self._roi_data = self._maskrgb.get_data().astype(float)
self._maskrgb_obj = Image(0, 0, self._maskrgb)
self._subsets = []
self._simultaneous_roi = False
self._unit = 'nm'
self.set_unit()
def _determin_shape(self):
shape = []
for image in self.images:
rows, cols = image.shape[:2]
if len(shape) == 0:
shape = [rows, cols]
else:
shape[0] = rows if shape[0] > rows else shape[0]
shape[1] = cols if shape[1] > cols else shape[1]
self.shape = tuple(shape)
s_ = np.s_[:self.shape[0], :self.shape[1]]
for image in self.images:
image.set_data(image.data[s_])
def _create_image_list(self):
self.images = []
for filepath in self.filepaths:
try:
image = ImageStamp(filepath)
self.images.append(image)
except Exception:
warnings.warn("Unable to open %s" % (filepath))
def register(self, view):
"""Register a View with the model"""
if view not in self._views:
self._views.append(view)
def unregister(self, view):
"""Unregister a View with the model"""
if view in self._views:
self._views.remove(view)
@property
def filenames(self):
""":obj:`list` of :obj:`str` : Basenames of the :attr:`filepaths`"""
return [os.path.basename(filepath) for filepath in self.filepat
|
valdergallo/raidmanager
|
manager/admin.py
|
Python
|
mit
| 81
| 0
|
# encoding: utf-8
fr
|
om django.contri
|
b import admin
# Register your models here.
|
molmod/tamkin
|
tamkin/pftools.py
|
Python
|
gpl-3.0
| 21,612
| 0.004164
|
# -*- coding: utf-8 -*-
# TAMkin is a post-processing toolkit for normal mode analysis, thermochemistry
# and reaction kinetics.
# Copyright (C) 2008-2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, An Ghysels
# <An.Ghysels@UGent.be> and Matthias Vandichel <Matthias.Vandichel@UGent.be>
# Center for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all
# rights reserved unless otherwise stated.
#
# This file is part of TAMkin.
#
# TAMkin is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "TAMkin: A Versatile Package for Vibrational Analysis and Chemical Kinetics",
# An Ghysels, Toon Verstraelen, Karen Hemelsoet, Michel Waroquier and Veronique
# Van Speybroeck, Journal of Chemical Information and Modeling, 2010, 50,
# 1736-1750W
# http://dx.doi.org/10.1021/ci100099g
#
# TAMkin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
"""High level utilities for partition functions"""
from __future__ import print_function, division
import sys, numpy, types, csv
from molmod.units import kjmol, mol, kelvin, joule, centimeter
from molmod.constants import boltzmann, lightspeed
from tamkin.partf import PartFun
__all__ = ["ThermoAnalysis", "ThermoTable", "ReactionAnalysis"]
class ThermoAnalysis(object):
"""Perform a regular thermochemistry analysis."""
def __init__(self, pf
|
, temps):
"""
Arguments:
| ``pf`` -- A partition function
| ``temps`` -- An array with temperatures to consider.
The tables with energy, free energy, heat capacity, entropy,
|
logarithm of the partition function and the first and second order
derivative of the logarithm of the partition functions are computed
and stored in self.tables. The latter attribute is a list of
ThermoTable objects.
The results can be written to a csv file with the method
write_to_file.
"""
self.pf = pf
self.temps = temps
self.tables = [
ThermoTable("Internal heat", kjmol, "kJ/mol", "internal_heat", pf, temps),
ThermoTable("Heat capacity", joule/mol/kelvin, "J/(mol*K)", "heat_capacity", pf, temps),
ThermoTable("Free energy", kjmol, "kJ/mol", "free_energy", pf, temps),
ThermoTable("Chemical potential", kjmol, "kJ/mol", "chemical_potential", pf, temps),
ThermoTable("Entropy", joule/mol/kelvin, "J/(mol*K)", "entropy", pf, temps),
ThermoTable("ln(Z_N)/N", 1.0, "1", "log", pf, temps),
ThermoTable("1/N d ln(Z_N) / dT", 1.0/kelvin, "1/K", "logt", pf, temps),
ThermoTable("1/N d^2 ln(Z_N) / dT^2", 1.0/kelvin**2, "1/K^2", "logtt", pf, temps),
ThermoTable("d ln(Z_N) / dN", 1.0, "1", "logn", pf, temps),
ThermoTable("d ln(Z_N) / dN - log(V/N)", 1.0, "mixed: 1 or ln(bohr^-dim)", "logv", pf, temps),
]
def write_to_file(self, filename):
"""Write the entire thermochemistry analysis to a csv file.
Argument:
| ``filename`` -- the file to write the output.
"""
with open(filename, 'w') as f:
self.dump(f)
def dump(self, f):
"""Write the entire thermochemistry analysis in csv format.
Argument:
| ``f`` -- the stream to write to.
"""
for table in self.tables:
table.dump(f)
print(file=f)
class ThermoTable(object):
"""A thermo table, i.e. the thermochemistry analysis for one
specific thermodynamic quantity.
"""
def __init__(self, label, unit, unit_name, method_name, pf, temps, pf_method_name=None):
"""This object is used by the ThermoAnalysis class and should probably
never be used directly.
Arguments:
| ``label`` -- a string to identify the thermodynamic quantity.
| ``unit`` -- the conversion factor from the conventional unit to
atomic units
| ``unit_name`` -- a human readable string that describes the
conventional unit
| ``method_name`` -- the method of the partition function that
computes the quantity of interest
| ``temps`` -- the temperatures at which the quantity has to be
computed.
Optional argument:
| ``pf_method_name`` -- In case of the actual partition function
object, this alternative method can be used
compute to quantity of interest. This
workaround is required due to poor naming
conventions in statistical physics.
The results are stored in an array self.data of which the columns
correspond to the given temperatures and the rows correspond to the
different terms in the partition function.
The attribute self.keys is a list describing the rows, i.e. the each
contribution from the partition function.
"""
if pf_method_name is None:
pf_method_name = method_name
self.label = label
self.unit = unit
self.unit_name = unit_name
self.method_name = method_name
self.pf = pf
self.temps = temps
self.pf_method_name = pf_method_name
self.keys = []
data = []
for term in [pf] + pf.terms:
self.keys.append(term.name)
if isinstance(term, PartFun):
method = getattr(term, pf_method_name, None)
else:
method = getattr(term, method_name, None)
if isinstance(method, types.MethodType):
row = []
for temp in temps:
row.append(method(temp))
data.append(numpy.array(row,ndmin=2))
method = getattr(term, "%s_terms" % method_name, None)
if isinstance(method, types.MethodType):
columns = []
for i in range(term.num_terms):
self.keys.append("%s (%i)" % (term.name, i))
for temp in temps:
columns.append(method(temp))
data.append(numpy.array(columns).transpose())
self.data = numpy.concatenate(data)
def dump(self, f):
"""Dumps the table in csv format
Arguments:
| ``f`` -- the file object to write to
"""
c = csv.writer(f)
c.writerow([self.label, "[%s]" % self.unit_name])
c.writerow(["Temperatures"] + [temp for temp in self.temps])
for key, row in zip(self.keys, self.data):
c.writerow([key] + [value/self.unit for value in row])
class ReactionAnalysis(object):
"""A Reaction analysis object."""
def __init__(self, kinetic_model, temp_low, temp_high, temp_step=10*kelvin):
"""
Arguments:
| ``kinetic_model`` -- A kinetic model object. See
mod:`tamkin.chemmod`.
| ``temp_low`` -- The lower bound of the temperature interval in
Kelvin.
| ``temp_high`` -- The upper bound of the temperature interval in
Kelvin.
Optional arguments:
| ``temp_step`` -- The resolution of the temperature grid.
|
salivatears/ansible
|
lib/ansible/plugins/action/service.py
|
Python
|
gpl-3.0
| 2,322
| 0.003876
|
# (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
|
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
''' handler for package operations '''
|
name = self._task.args.get('name', None)
state = self._task.args.get('state', None)
module = self._task.args.get('use', 'auto')
if module == 'auto':
try:
module = self._templar.template('{{ansible_service_mgr}}')
except:
pass # could not get it from template!
if module == 'auto':
facts = self._execute_module(module_name='setup', module_args=dict(filter='ansible_service_mgr'), task_vars=task_vars)
self._display.debug("Facts %s" % facts)
if not 'failed' in facts:
module = getattr(facts['ansible_facts'], 'ansible_service_mgr', 'auto')
if not module or module == 'auto' or module not in self._shared_loader_obj.module_loader:
module = 'service'
if module != 'auto':
# run the 'service' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
del new_module_args['use']
self._display.vvvv("Running %s" % module)
return self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars)
else:
return {'failed': True, 'msg': 'Could not detect which service manager to use. Try gathering facts or setting the "use" option.'}
|
respawner/peering-manager
|
peering/__init__.py
|
Python
|
apache-2.0
| 2,416
| 0.001656
|
import json
import re
import subprocess
from django.conf import settings
default_app_config = "peering.apps.PeeringConfig"
def call_irr_as_set_resolver(irr_as_set, address_family=6):
"""
Call a subprocess to expand the given AS-SET for an IP version.
"""
prefixes = []
if not irr_as_set:
return prefixes
# Call bgpq3 with arguments to get a JSON result
command = [
settings.BGPQ3_PATH,
"-h",
settings.BGPQ3_HOST,
"-S",
settings.BGPQ3_SOURCES,
"-{}".format(address_family),
"-A",
"-j",
"-l",
"prefix_list",
irr_as_set,
]
# Merge user settings to command line right before the name of the prefix list
if settings.BGPQ3_ARGS:
index = len(command) - 3
command[index:index] = settings.BGPQ3_ARGS[
"ipv6" if address_family == 6 else "ipv4"
]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if process.returncode != 0:
error_log = "bgpq3 exit
|
code is {}".format(process.returncode)
if err and err.strip():
error_log += ", stderr: {}".format(err)
raise ValueError(error_log)
prefixes.
|
extend([p for p in json.loads(out.decode())["prefix_list"]])
return prefixes
def parse_irr_as_set(asn, irr_as_set):
"""
Validate that an AS-SET is usable and split it into smaller part if it is actually
composed of several AS-SETs.
"""
as_sets = []
# Can't work with empty or whitespace only AS-SET
if not irr_as_set or not irr_as_set.strip():
return ["AS{}".format(asn)]
unparsed = re.split(r"[/,&\s]", irr_as_set)
for value in unparsed:
value = value.strip()
if not value:
continue
for regexp in [
# Remove registry prefix if any
r"^(?:{}):[:\s]".format(settings.BGPQ3_SOURCES.replace(",", "|")),
# Removing "ipv4:" and "ipv6:"
r"^(?:ipv4|ipv6):",
]:
pattern = re.compile(regexp, flags=re.IGNORECASE)
value, number_of_subs_made = pattern.subn("", value)
# If some substitutions have been made, make sure to clean things up
if number_of_subs_made > 0:
value = value.strip()
as_sets.append(value)
return as_sets
|
BehavioralInsightsTeam/edx-platform
|
openedx/core/djangoapps/theming/checks.py
|
Python
|
agpl-3.0
| 2,784
| 0.001796
|
"""
Settings validations for the theming app
"""
import os
import six
from django.conf import settings
from django.core.checks import Error, Tags, register
@register(Tags.compatibility)
def check_comprehensive_theme_settings(app_configs, **kwargs):
"""
Checks the comprehensive theming theme directory settings.
Raises compatibility Errors upon:
- COMPREHENSIVE_THEME_DIRS is not a list
- theme dir path is not a string
- theme dir path is not an absolute path
- path specified in COMPREHENSIVE_THEME_DIRS does not exist
Returns:
List of any Errors.
"""
if not getattr(settings, "ENABLE_COMPREHENSIVE_THEMING"):
# Only perform checks when comprehensive theming is enabled.
return []
errors = []
# COMPREHENSIVE_THEME_DIR is no longer supported - support has been removed.
if hasattr(settings, "COMPREHENSIVE_THEME_DIR"):
theme_dir = settings.COMPREHENSIVE_THEME_DIR
errors.append(
Error(
"COMPREHENSIVE_THEME_DIR setting has been removed in favor of COMPREHENSIVE_THEME_DIRS.",
hint='Transfer the COMPREHENSIVE_THEME_DIR value to COMPREHENSIVE_THEME_DIRS.',
obj=theme_dir,
id='openedx.core.djangoapps.theming.E001',
)
)
if hasattr(settings, "COMPREHENSIVE_THEME_DIRS"):
theme_dirs = settings.COMPREHENSIVE_THEME_DIRS
if not isinstance(theme_dirs, list):
errors.append(
Error(
"COMPREHENSIVE_THEME_DIRS must be a list.",
obj=theme_dirs,
id='openedx.core.djangoapps.theming.E004',
)
)
if not all([isinstance(theme_dir, six.string_types) for theme_dir in theme_dirs]):
errors.append(
|
Error(
"COMPREHENSIVE_THEME_DIRS must contain only strings.",
obj=theme_dirs,
id='openedx.core.djangoapps.theming.E005',
)
)
if not all([theme_dir.startswith("/") for theme_dir in theme_dirs]):
errors.append(
Error(
"COMPREHENSIVE_THEME_DIRS must contain only absolute paths to themes dirs.",
obj=theme_
|
dirs,
id='openedx.core.djangoapps.theming.E006',
)
)
if not all([os.path.isdir(theme_dir) for theme_dir in theme_dirs]):
errors.append(
Error(
"COMPREHENSIVE_THEME_DIRS must contain valid paths.",
obj=theme_dirs,
id='openedx.core.djangoapps.theming.E007',
)
)
return errors
|
arrayfire/arrayfire-python
|
arrayfire/features.py
|
Python
|
bsd-3-clause
| 2,480
| 0.002823
|
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
"""
Features class used for Computer Vision algorithms.
"""
from .library import *
from .array import *
import numbers
class Features(object):
"""
A container class used for various feature detectors.
Parameters
----------
num: optional: int. default: 0.
Specifies the number of features.
"""
def __init__(self, num=0):
self.feat = c_void_ptr_t(0)
if num is not None:
assert(isinstance(num, numbers.Number))
safe_call(backend.get().af_create_features(c_pointer(self.feat), c_dim_t(num)))
def __del__(self):
"""
Release features' memory
"""
if self.feat:
backend.get().af_release_features(self.feat)
self.feat = None
def num_features(self):
"""
Returns the number of features detected.
"""
num = c_dim_t(0)
safe_call(backend.get().af_get_features_num(c_pointer(num), self.feat))
return num
def get_xpos(self):
"""
Returns the x-positions of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_xpos(c_pointer(out.arr), self
|
.feat))
return out
def get_ypos(self):
"""
Returns the y-positions of the f
|
eatures detected.
"""
out = Array()
safe_call(backend.get().af_get_features_ypos(c_pointer(out.arr), self.feat))
return out
def get_score(self):
"""
Returns the scores of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_score(c_pointer(out.arr), self.feat))
return out
def get_orientation(self):
"""
Returns the orientations of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_orientation(c_pointer(out.arr), self.feat))
return out
def get_size(self):
"""
Returns the sizes of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_size(c_pointer(out.arr), self.feat))
return out
|
oblique-labs/pyVM
|
rpython/flowspace/test/test_checkgraph.py
|
Python
|
mit
| 2,837
| 0.003525
|
from rpython.flowspace.model import *
import py
def test_mingraph():
g = FunctionGraph("g", Block([]))
g.startblock.closeblock(Link([Constant(1)], g.returnblock))
checkgraph(g)
def template():
g = FunctionGraph("g", Block([]))
g.startblock.closeblock(Link([Constant(1)], g.returnblock))
checkgraph(g)
py.test.raises(AssertionError, checkgraph, g)
def test_exitlessblocknotexitblock():
g = FunctionGraph("g", Block([]))
py.test.raises(AssertionError, checkgraph, g)
def test_nonvariableinputarg():
b = Block([Constant(1)])
g = FunctionGraph("g", b)
g.startblock.closeblock(Link([Constant(1)], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
def test_multiplydefinedvars():
v = Variable()
g = FunctionGraph("g", Block([v, v]))
g.startblock.closeblock(Link([v], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
v = Variable()
b = Block([v])
b.operations.append(SpaceOperation("add", [Constant(1), Constant(2)], v))
g = FunctionGraph("g", b)
g.startblock.closeblock(Link([v], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
def test_varinmorethanoneblock():
v = Variable()
g = FunctionGraph("g", Block([]))
g.startblock.operations.append(SpaceOperation("pos", [Constant(1)], v))
b = Block([v])
g.startblock.closeblock(Link([v], b))
b.closeblock(Link([
|
v], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
def test_useundefinedvar():
v = Variable()
g = FunctionGraph("g", Block([]))
g.startblock.closeblock(Link([v], g.returnblock))
py.test.raises(AssertionError, checkgraph
|
, g)
v = Variable()
g = FunctionGraph("g", Block([]))
g.startblock.exitswitch = v
g.startblock.closeblock(Link([Constant(1)], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
def test_invalid_arg():
v = Variable()
g = FunctionGraph("g", Block([]))
g.startblock.operations.append(SpaceOperation("pos", [1], v))
g.startblock.closeblock(Link([v], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
def test_invalid_links():
g = FunctionGraph("g", Block([]))
g.startblock.closeblock(Link([Constant(1)], g.returnblock), Link([Constant(1)], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
v = Variable()
g = FunctionGraph("g", Block([v]))
g.startblock.exitswitch = v
g.startblock.closeblock(Link([Constant(1)], g.returnblock, True),
Link([Constant(1)], g.returnblock, True))
py.test.raises(AssertionError, checkgraph, g)
v = Variable()
g = FunctionGraph("g", Block([v]))
g.startblock.exitswitch = v
g.startblock.closeblock(Link([Constant(1)], g.returnblock))
py.test.raises(AssertionError, checkgraph, g)
|
WeftWiki/phetools
|
hocr/hocr_cgi.py
|
Python
|
gpl-3.0
| 10,682
| 0.008706
|
# -*- coding: utf-8 -*-
#
# @file hocr.py
#
# @remark Copyright 2014 Philippe Elie
# @remark Read the file COPYING
#
# @author Philippe Elie
import os
import sys
import json
sys.path.append(os.path.expanduser('~/phe/common'))
import serialize
sys.path.append(os.path.expanduser('~/phe/jobs'))
import sge_jobs
import common_html
import time
import types
import utils
import hocr
|
def span_anchor(anchor, table):
return '<span id="' + table + '_' + str(anchor) + '"></span>'
def a_anchor(anchor, table):
return '<a href="#' + table + '_' + str(anchor) + '">' + str(anchor) + '</a>'
def format_job_table_anchor(ancho
|
r):
return span_anchor(anchor, 'job') + a_anchor(anchor, 'acct')
def format_accounting_table_anchor(anchor):
return span_anchor(anchor, 'acct') + a_anchor(anchor, 'job')
def format_timestamp(timestamp, fields):
return time.strftime("%d/%m/%Y %H:%M:%S", time.gmtime(timestamp))
def format_sge_jobnumber_job(sge_jobnumber, fields):
if not sge_jobnumber:
return sge_jobnumber
return format_job_table_anchor(sge_jobnumber)
def format_sge_jobnumber_accounting(sge_jobnumber, fields):
return format_accounting_table_anchor(sge_jobnumber)
def format_command(cmd, fields):
if cmd != 'python':
return cmd
else:
arg = json.loads(fields['job_args'])[0]
prefixes = [ '/data/project/phetools/phe/hocr/', '/data/project/phetools/phe/ocr/', '/data/project/phetools/phe/', '/data/project/phetools/botpywi/' ]
for prefix in prefixes:
if arg.startswith(prefix):
arg = arg[len(prefix):]
return arg
def format_args(args, fields):
args = json.loads(args)
if fields['job_run_cmd'] == 'python':
args = [ x.encode('utf-8') for x in args[1:]]
else:
args = [ x.encode('utf-8') for x in args]
new_args = []
prefix = '/data/project/phetools/'
for a in args:
if a.startswith(prefix):
a = '~/' + a[len(prefix):]
a = a.replace('_', ' ')
new_args.append(a)
return ' '.join(new_args)
def format_max_vmem(vmem, fields):
if not vmem:
vmem = 0
return "%.2fM" % (vmem / (1024.0*1024))
def format_hostname(hostname, fields):
suffix = '.eqiad.wmflabs'
if hostname.endswith(suffix):
hostname = hostname[:-len(suffix)]
return hostname
def format_job_id_job(job_id, fields):
return format_job_table_anchor(job_id)
def format_job_id_accounting(job_id, fields):
return format_accounting_table_anchor(job_id)
def format_time(t, fields):
if t:
return "%.2f" % t
return str(t)
# fields displayed by cmd=status, [0] is the database field name, [1] is the
# the <th> label, [2] is an optionnal formater function, default formater is
# str(data).
job_table_field = [
('job_id', 'job id', format_job_id_job),
('job_state', 'job state'),
('sge_jobnumber', 'sge job id', format_sge_jobnumber_job),
('job_run_cmd', 'cmd', format_command),
('job_args', 'args', format_args),
('job_submit_time', 'submit time (UTC)', format_timestamp),
]
accounting_table_field = [
('job_id', 'job id', format_job_id_accounting),
('sge_jobnumber', 'sge job id', format_sge_jobnumber_accounting),
('sge_hostname', 'host name', format_hostname),
('sge_qsub_time', 'submit at', format_timestamp),
('sge_start_time', 'start at', format_timestamp),
('sge_end_time', 'end at', format_timestamp),
('sge_failed', 'failed'),
('sge_exit_status', 'exit status'),
('sge_ru_utime', 'utime', format_time),
('sge_ru_stime', 'stime', format_time),
('sge_ru_wallclock', 'wallclock'),
('sge_used_maxvmem', 'max vmem', format_max_vmem),
]
def query_params(environ):
import cgi
field = cgi.FieldStorage(environ['wsgi.input'])
rdict = {
'format' : 'html',
'cmd' : 'status',
'filter' : '',
'book' : '',
'lang' : ''
}
for name in field:
if type(field[name]) == types.ListType:
rdict[name] = field[name][-1].value
else:
rdict[name] = field[name].value
return rdict
def handle_ping(start_response):
# pseudo ping, as we run on the web server, we always return 1 ms.
text = json.dumps( { 'error' : 0,
'text' : 'pong',
'server' : 'hocr',
'ping' : 0.001
} )
start_response('200 OK', [('Content-Type',
'text/plain; charset=UTF-8'),
('Content-Length', len(text)),
('Access-Control-Allow-Origin', '*')])
return [ text ]
def get_int_param(params, name, default, max_val = None):
try:
result = params.get(name, default)
result = int(result)
if max_val:
result = min(result, max_val)
except:
result = default
return result
def table_header(fields):
text = ' <tr>\n'
for f in fields:
text += ' <th>' + f[1] + '</th>\n'
text += ' </tr>\n'
return text
def to_html(data, fields):
text = ' <tr>\n'
for f in fields:
if f[0] in data:
text += ' <td>'
if len(f) >= 3:
text += str(f[2](data[f[0]], data))
else:
text += str(data[f[0]])
text += '</td>\n'
else:
text += '<td>Unknow field</td>'
text += ' </tr>\n'
return text
def prev_next_link(prev, has_next, state_filter, limit, offset, default_limit):
href = False
if prev:
label = 'Prev'
if offset:
new_offset = max(offset - limit, 0)
href = True
else:
label = 'Next'
if has_next:
new_offset = offset + limit
href = True
if href:
link = '<a href="?cmd=status&filter=%s' % state_filter
if new_offset:
link += "&offset=%d" % new_offset
if limit != default_limit:
link += "&limit=%d" % limit
link += '">' + label + '</a>'
else:
link = label
return link
def job_table(db_obj, state_filter, limit, offset, default_limit, max_limit):
data, has_next = db_obj.get_job_table(state_filter, limit, offset)
link_prev = prev_next_link(True, has_next, state_filter, limit,
offset, default_limit)
link_next = prev_next_link(False, has_next, state_filter, limit,
offset, default_limit)
text = link_prev + ' ' + link_next + '\n'
text += '<table class="wikitable" style="text-align:right;margin-left:auto;margin-right:auto;">\n'
global job_table_field
text += table_header(job_table_field)
for d in data:
text += to_html(d, job_table_field)
text += '</table>\n'
return text, data
def accounting_table(db_obj, jobs, state_filter,
limit, offset, default_limit, max_limit):
job_ids = [ x['job_id'] for x in jobs ]
# FIXME: offset/limit not correct, we must have separate offset/limit
# than the job table offset/limit.
data, has_next = db_obj.get_accounting_table(limit, 0, job_ids)
global accounting_table_field
link_prev = prev_next_link(True, has_next, state_filter, limit,
offset, default_limit)
link_next = prev_next_link(False, has_next, state_filter, limit,
offset, default_limit)
text = link_prev + ' ' + link_next + '\n'
text += '<table class="wikitable" style="text-align:right;margin-left:auto;margin-right:auto;">\n'
text += table_header(accounting_table_field)
for d in data:
text += to_html(d, accounting_table_field)
text += '</table>\n'
return text
def handle_status(params, start_response):
default_limit = 50
max_limit = 1000
state_filter = params.get('filter', '')
limit = get_int_param(params, 'limit', default_limit, max_limit)
offset = get_int_param(params, 'offset', 0, None)
#print >> sys.stderr, params
db_obj = sge_jobs.DbJob()
text = common_html.
|
chrmoritz/zoxel
|
src/plugins/tool_extrude.py
|
Python
|
gpl-3.0
| 6,660
| 0.000601
|
# tool_extrude.py
# Extrusion tool.
# Copyright (c) 2015, Lennart Riecken
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PySide import QtGui, QtCore
from tool import Tool, EventData, MouseButtons, KeyModifiers, Face
from plugin_api import register_plugin
class ExtrudeTool(Tool):
def __init__(self, api):
super(ExtrudeTool, self).__init__(api)
# Create our action / icon
self.action = QtGui.QAction(QtGui.QPixmap(":/images/gfx/icons/border-bottom-thick.png"), "Extrude", None)
self.action.setStatusTip("Extude region")
self.action.setCheckable(True)
self.action.setShortcut(QtGui.QKeySequence("Ctrl+0"))
# Register the tool
self.priority = 10
self.api.register_tool(self)
# Area tool helper
self._mouse = None
self._stamp = []
self.xdir = True
self.ydir = True
self.zdir = True
self.pastoffset = 0
self.fixeddirection = False
def drawstamp(self, data, dx, dy, dz):
for x, y, z, col in self._stamp:
tgt = data.voxels.get(x + dx, y + dy, z + dz)
if tgt == 0:
data.voxels.set(x + dx, y + dy, z + dz, col, True, 1)
data.voxels.completeUndoFill()
def on_drag_start(self, data):
if len(data.voxels._selection) > 0:
self._stamp = []
for x, y, z in data.voxels._selection:
col = data.voxels.get(x, y, z)
self._stamp.append((x, y, z, col))
self._mouse = (data.mouse_x, data.mouse_y)
if QtCore.Qt.Key_X in data.keys:
self.xdir = True
self.ydir = False
self.zdir = False
self.fixeddirection = True
elif QtCore.Qt.Key_Y in data.keys:
self.xdir = False
self.ydir = True
self.zdir = False
self.fixeddirection = True
elif QtCore.Qt.Key_Z in data.keys:
self.xdir = False
self.ydir = False
self.zdir = True
self.fixeddirection = True
else:
self.xdir = True
self.ydir = True
self.zdir = True
self.fixeddirection = False
self.pastoffset = 0
# When dragging, create the selection
def on_drag(self, data):
# In case the first click has missed a valid target.
if self._mouse is None or len(self._stamp) == 0:
return
dx = data.mouse_x - self._mouse[0]
dy = data.mouse_y - self._mouse[1]
# Work out some sort of vague translation between screen and voxels
sx = self.api.mainwindow.width() / data.voxels.width
sy = self.api.mainwindow.height() / data.voxels.height
dx = int(round(dx / float(sx)))
dy = int(round(dy / float(sy)))
if dx == 0 and dy == 0:
return
# Work out translation for x,y
ax, ay = self.api.mainwindow.display.view_axis()
tx = 0
ty = 0
tz = 0
tdx = 0
tdy = 0
tdz = 0
if ax == self.api.mainwindow.display.X_
|
AXIS:
tdx = dx
|
if dx > 0:
tx = 1
elif dx < 0:
tx = -1
elif ax == self.api.mainwindow.display.Y_AXIS:
tdy = dx
if dx > 0:
ty = 1
elif dx < 0:
ty = -1
elif ax == self.api.mainwindow.display.Z_AXIS:
tdz = dx
if dx > 0:
tz = 1
elif dx < 0:
tz = -1
if ay == self.api.mainwindow.display.X_AXIS:
tdx = dy
if dy > 0:
tx = 1
elif dy < 0:
tx = -1
elif ay == self.api.mainwindow.display.Y_AXIS:
tdy = dy
if dy > 0:
ty = -1
elif dy < 0:
ty = 1
elif ay == self.api.mainwindow.display.Z_AXIS:
tdz = dy
if dy > 0:
tz = 1
elif dy < 0:
tz = -1
if self.fixeddirection:
if self.xdir:
if tx != 0:
self._mouse = (data.mouse_x, data.mouse_y)
self.pastoffset += tx
self.drawstamp(data, self.pastoffset, 0, 0)
elif self.ydir:
if ty != 0:
self._mouse = (data.mouse_x, data.mouse_y)
self.pastoffset += ty
self.drawstamp(data, 0, self.pastoffset, 0)
elif self.zdir:
if tz != 0:
self._mouse = (data.mouse_x, data.mouse_y)
self.pastoffset += tz
self.drawstamp(data, 0, 0, self.pastoffset)
else:
if tx != 0 and self.xdir and (not self.ydir or (abs(tdx) > abs(tdy) and abs(tdx) > abs(tdz))):
self._mouse = (data.mouse_x, data.mouse_y)
self.ydir = False
self.zdir = False
self.pastoffset += tx
self.drawstamp(data, self.pastoffset, 0, 0)
elif ty != 0 and self.ydir and (not self.zdir or abs(tdy) > abs(tdz)):
self._mouse = (data.mouse_x, data.mouse_y)
self.xdir = False
self.zdir = False
self.pastoffset += ty
self.drawstamp(data, 0, self.pastoffset, 0)
elif tz != 0 and self.zdir:
self._mouse = (data.mouse_x, data.mouse_y)
self.xdir = False
self.ydir = False
self.pastoffset += tz
self.drawstamp(data, 0, 0, self.pastoffset)
def on_drag_end(self, data):
data.voxels.clear_selection()
dx = self.pastoffset if self.xdir else 0
dy = self.pastoffset if self.ydir else 0
dz = self.pastoffset if self.zdir else 0
for x, y, z, col in self._stamp:
data.voxels.select(x + dx, y + dy, z + dz)
register_plugin(ExtrudeTool, "Extrude Tool", "1.0")
|
almc/speed_reader
|
pyperclip.py
|
Python
|
gpl-2.0
| 5,763
| 0.004164
|
# Pyperclip v1.4
# A cross-platform clipboard module for Python. (only handles plain text for now)
# By Al Sweigart al@coffeeghost.net
# Usage:
# import pyperclip
# pyperclip.copy('The text to be copied to the clipboard.')
# spam = pyperclip.paste()
# On Mac, this module makes use of the pbcopy and pbpaste commands, which should come wi
|
th the os.
# On Linux, this module makes use of the xclip command, which should come with the os.
|
Otherwise run "sudo apt-get install xclip"
# Copyright (c) 2010, Albert Sweigart
# All rights reserved.
#
# BSD-style license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the pyperclip nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Albert Sweigart "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Albert Sweigart BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Change Log:
# 1.2 Use the platform module to help determine OS.
# 1.3 Changed ctypes.windll.user32.OpenClipboard(None) to ctypes.windll.user32.OpenClipboard(0), after some people ran into some TypeError
import platform, os
def winGetClipboard():
ctypes.windll.user32.OpenClipboard(0)
pcontents = ctypes.windll.user32.GetClipboardData(1) # 1 is CF_TEXT
data = ctypes.c_char_p(pcontents).value
#ctypes.windll.kernel32.GlobalUnlock(pcontents)
ctypes.windll.user32.CloseClipboard()
return data
def winSetClipboard(text):
text = str(text)
GMEM_DDESHARE = 0x2000
ctypes.windll.user32.OpenClipboard(0)
ctypes.windll.user32.EmptyClipboard()
try:
# works on Python 2 (bytes() only takes one argument)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text))+1)
except TypeError:
# works on Python 3 (bytes() requires an encoding)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text, 'ascii'))+1)
pchData = ctypes.windll.kernel32.GlobalLock(hCd)
try:
# works on Python 2 (bytes() only takes one argument)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text))
except TypeError:
# works on Python 3 (bytes() requires an encoding)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text, 'ascii'))
ctypes.windll.kernel32.GlobalUnlock(hCd)
ctypes.windll.user32.SetClipboardData(1, hCd)
ctypes.windll.user32.CloseClipboard()
def macSetClipboard(text):
text = str(text)
outf = os.popen('pbcopy', 'w')
outf.write(text)
outf.close()
def macGetClipboard():
outf = os.popen('pbpaste', 'r')
content = outf.read()
outf.close()
return content
def gtkGetClipboard():
return gtk.Clipboard().wait_for_text()
def gtkSetClipboard(text):
global cb
text = str(text)
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def qtGetClipboard():
return str(cb.text())
def qtSetClipboard(text):
text = str(text)
cb.setText(text)
def xclipSetClipboard(text):
text = str(text)
outf = os.popen('xclip -selection c', 'w')
outf.write(text)
outf.close()
def xclipGetClipboard():
outf = os.popen('xclip -selection c -o', 'r')
content = outf.read()
outf.close()
return content
def xselSetClipboard(text):
text = str(text)
outf = os.popen('xsel -i', 'w')
outf.write(text)
outf.close()
def xselGetClipboard():
outf = os.popen('xsel -o', 'r')
content = outf.read()
outf.close()
return content
if os.name == 'nt' or platform.system() == 'Windows':
import ctypes
getcb = winGetClipboard
setcb = winSetClipboard
elif os.name == 'mac' or platform.system() == 'Darwin':
getcb = macGetClipboard
setcb = macSetClipboard
elif os.name == 'posix' or platform.system() == 'Linux':
xclipExists = os.system('which xclip') == 0
if xclipExists:
getcb = xclipGetClipboard
setcb = xclipSetClipboard
else:
xselExists = os.system('which xsel') == 0
if xselExists:
getcb = xselGetClipboard
setcb = xselSetClipboard
try:
import gtk
getcb = gtkGetClipboard
setcb = gtkSetClipboard
except Exception:
try:
import PyQt4.QtCore
import PyQt4.QtGui
app = PyQt4.QApplication([])
cb = PyQt4.QtGui.QApplication.clipboard()
getcb = qtGetClipboard
setcb = qtSetClipboard
except:
raise Exception('Pyperclip requires the gtk or PyQt4 module installed, or the xclip command.')
copy = setcb
paste = getcb
|
clone2727/mhkutil
|
stream.py
|
Python
|
gpl-3.0
| 3,692
| 0.026273
|
# mhkutil - A utility for dealing with Mohawk archives
#
# mhkutil is the legal property of its developers, whose names
# can be found in the AUTHORS file distributed with this source
# distribution.
#
# mhkutil is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (a
|
t your option) any later version.
#
# mhkutil is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITN
|
ESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mhkutil. If not, see <http://www.gnu.org/licenses/>.
import os
import struct
# TODO: Find a better place for this
def makeTag(text):
if len(text) != 4:
raise Exception('Invalid text size {0}'.format(len(text)))
return struct.unpack('>L', text)[0]
# TODO: Find a better place for this
def tagToString(tag):
return struct.pack('>L', tag)
class Stream:
def readByte(self):
return struct.unpack('B', self.read(1))[0]
def readSByte(self):
return struct.unpack('b', self.read(1))[0]
def readUint16LE(self):
return struct.unpack('<H', self.read(2))[0]
def readSint16LE(self):
return struct.unpack('<h', self.read(2))[0]
def readUint16BE(self):
return struct.unpack('>H', self.read(2))[0]
def readSint16BE(self):
return struct.unpack('>h', self.read(2))[0]
def readUint32LE(self):
return struct.unpack('<L', self.read(4))[0]
def readSint32LE(self):
return struct.unpack('<l', self.read(4))[0]
def readUint32BE(self):
return struct.unpack('>L', self.read(4))[0]
def readSint32BE(self):
return struct.unpack('>l', self.read(4))[0]
def readCString(self):
text = ''
while True:
char = self.readByte()
if char == 0:
break
text += chr(char)
return text
class WriteStream:
def writeByte(self, x):
self.write(struct.pack('B', x))
def writeSByte(self, x):
self.write(struct.pack('b', x))
def writeUint16LE(self, x):
self.write(struct.pack('<H', x))
def writeSint16LE(self, x):
self.write(struct.pack('<h', x))
def writeUint16BE(self, x):
self.write(struct.pack('>H', x))
def writeSint16BE(self, x):
self.write(struct.pack('>h', x))
def writeUint32LE(self, x):
self.write(struct.pack('<L', x))
def writeSint32LE(self, x):
self.write(struct.pack('<l', x))
def writeUint32BE(self, x):
self.write(struct.pack('>L', x))
def writeSint32BE(self, x):
self.write(struct.pack('>l', x))
class FileStream(Stream):
def __init__(self, handle):
self._handle = handle
handle.seek(0, os.SEEK_END)
self._size = handle.tell()
handle.seek(0)
def tell(self):
return self._handle.tell()
def size(self):
return self._size
def seek(self, offset, whence=os.SEEK_SET):
return self._handle.seek(offset, whence)
def read(self, size):
return bytearray(self._handle.read(size))
class FileWriteStream(WriteStream):
def __init__(self, handle):
self._handle = handle
def write(self, x):
self._handle.write(x)
class ByteStream(Stream):
def __init__(self, data):
self._data = data
self._pos = 0
def tell(self):
return self._pos
def size(self):
return len(self._data)
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_CUR:
self._pos += offset
elif whence == os.SEEK_END:
self._pos = len(self._data) + offset
else:
self._pos = offset
def read(self, size):
if size == 0:
return bytearray()
start = self._pos
end = start + size
self._pos = end
return self._data[start:end]
|
agaldona/odoo-addons
|
stock_information_mrp_procurement_plan/models/stock_information.py
|
Python
|
agpl-3.0
| 8,497
| 0
|
# -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api, _
import openerp.addons.decimal_precision as dp
class StockInformation(models.Model):
_inherit = 'stock.information'
@api.multi
def _compute_week(self):
super(StockInformation, self)._compute_week()
p_obj = self.env['procurement.order']
move_obj = self.env['stock.move']
for line in self:
if line.first_week:
moves = move_obj._find_moves_from_stock_information(
line.company, line.last_day_week,
products=[line.product.id], location_id=line.location,
periods=False)
else:
moves = move_obj._find_moves_from_stock_information(
line.company, line.last_day_week,
products=[line.product.id], from_date=line.first_day_week,
location_id=line.location, periods=False)
line.outgoing_pending_amount = sum(moves.mapped('product_uom_qty'))
line.outgoing_pending_moves = [(6, 0, moves.ids)]
states = ['confirmed', 'exception']
if line.first_week:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
products=[line.product.id], location_id=line.location,
without_reserves=False, without_plan=False)
else:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
from_date=line.first_day_week, products=[line.product.id],
location_id=line.location, without_reserves=False,
without_plan=False)
line.outgoing_pending_amount_reserv = sum(
procurements.mapped('product_qty'))
line.outgoing_pending_procurement_reserv = (
[(6, 0, procurements.ids)])
line.outgoing_pending_amount_moves = line.outgoing_pending_amount
line.outgoing_pending_amount += line.outgoing_pending_amount_reserv
states = ['confirmed', 'exception']
if line.first_week:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
products=[line.product.id], location_id=line.location,
without_reserves=True, without_plan=False)
else:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
from_date=line.first_day_week, products=[line.product.id],
location_id=line.location, without_reserves=True,
without_plan=False)
line.incoming_pending_amount_plan = sum(
procurements.mapped('product_qty'))
line.incoming_pending_procurements_plan = (
[(6, 0, procurements.ids)])
if line.first_week:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
products=[line.product.id], location_id=line.location,
without_reserves=False, without_plan=False)
else:
procurements = p_obj._find_procurements_from_stock_information(
line.company, line.last_day_week, states=states,
from_date=line.first_day_week, products=[line.product.id],
location_id=line.location, without_reserves=False,
without_plan=False)
line.incoming_pending_amount_plan_reservation = sum(
procurements.mapped('product_qty'))
line.incoming_pending_procurements_plan_reservation = (
[(6, 0, procurements.ids)])
line.incoming_pending_amount += (
line.incoming_pending_amount_plan +
line.incoming_pending_amount_plan_reservation)
line.stock_availability = (line.qty_available - line.minimum_rule +
line.incoming_pending_amount)
if line.stock_availability >= line.outgoing_pending_amount:
line.virtual_stock = 0
else:
line.virtual_stock = (line.outgoing_pending_amount -
line.stock_availability)
incoming_pending_amount_plan = fields.Float(
'Incoming pending amount from plan', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Incoming from plan')
incoming_pending_amount_plan_required_run = fields.Float(
'Incoming from plan required run',
related='incoming_pending_amount_plan',
digits=dp.get_precision('Product Unit of Measure'), store=True)
incoming_pending_procurements_plan = fields.Many2many(
comodel_name='procurement.order',
string='Incoming pending procurements from plan',
relation='rel_stock_info_incoming_pending_procurement_plan',
column1='stock_info_id', column2='pending_procurement_plan_id',
compute='_compute_week')
incoming_pending_amount_plan_reservation = fields.Float(
'Incoming pending amount from plan reservation',
digits=dp.get_precision('Product Unit of Measure'),
compute='_compute_week', help='Incoming from plan reservation')
incoming_pending_amount_plan_reserv_required_run = fields.Float(
'Incoming from plan reserv required run',
related='incoming_pending_amount_plan_reservation',
digits=dp.get_precision('Product Unit of Measure'), store=True)
incoming_pending_procurements_plan_reservation = fields.Many2many(
comodel_name='procurement.order',
string='Incoming pending procurements from plan reservation',
relation='rel_stock_info_incoming_pending_procurement_plan_reserv',
column1='stock_info_id', column2='pending_procurement_plan_id',
compute='_compute_week')
outgoing_pending_amount_moves = fields.Float(
'Outgoing pending amount from moves', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Gross requirement')
outgoing_pending_amount_reserv = fields.Float(
'Outgoing pending amount reservation', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Gross requirement reservation')
outgoing_pending_procurement_reserv = fields.Many2many(
comodel_name='procurement.order',
string='Outgoing pending procurements reservation',
relation='rel_stock_info_outgoing_pending_procurement_reserv',
column1='stock_info_id', column2='pending_procurement_reserv_id',
compute='_compute_week')
@api.multi
def show_outgoing_pending_reserved_moves(self):
self.ensure_one()
return {'name': _('Outgoing pending reserved procurements'),
'view_type': 'form',
"view_mode": 'tree,form',
'res_model': 'procurement.order',
'type': 'ir.actions.act_window',
'domain': [('id', 'in',
self.outgoing_pending_procurement_reserv.ids)]}
@api.multi
def show_incoming_procurements_from_plan(self):
|
self.ensure_one()
return {'name': _('Incoming procurements from plan'),
'view_type': 'form',
"view_mode": 'tree,form',
'res_model': 'procurement.order',
'type': 'ir.actions.act_window',
'domain': [('id', 'in',
self.incoming_pending_procurements_plan.ids)]}
@api.multi
def show_incoming_procurements_from_plan_reservation(self):
self.ensure_one()
ids = self.incomi
|
ng_pending_procurements_pla
|
WilliamQLiu/job-waffle
|
employer/admin.py
|
Python
|
apache-2.0
| 113
| 0
|
f
|
rom django.contrib import admin
from .models import Job
# Register your models here.
admin.site.register(Job)
| |
fnp/wolnelektury
|
src/redirects/admin.py
|
Python
|
agpl-3.0
| 595
| 0
|
from django.contrib import admin
from django.contrib.sites.
|
models import Site
from . import models
class RedirectAdmin(admin.ModelAdmin):
list_display = ['slug', 'url', 'counter', 'created_at', 'full_url']
readonly_fields = ['counter', 'created_at', 'full_url']
fields = ['slug', 'url', 'counter', 'created_at', 'full_url']
def full_url(self, obj):
if not obj.slug:
return None
site = Site.objects.get_current()
url = obj.get_absolute_url()
return f'https://{site.domain}{url}'
|
admin.site.register(models.Redirect, RedirectAdmin)
|
KECB/learn
|
computer_vision/01_inspecting_images.py
|
Python
|
mit
| 412
| 0
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
img = cv2.imread('images/dolphin.png', 0)
cv2.imshow("Dolphin Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
print('The intensity value at row 50 & column 100 is : {}'.format(img[49, 99]))
|
print('Row 50 column values:')
print(img[49, :])
p
|
rint('Rows 101-103 & columns 201-203')
print(img[100:103, 200:203])
plt.plot(img[49, :])
plt.show()
|
DevCouch/coderbyte_python
|
medium/letter_count.py
|
Python
|
gpl-2.0
| 614
| 0.004886
|
def LetterCount(str):
words = str.split(" ")
result_word = ""
letter_count = 0
for word in words:
word_map = {}
for ch in w
|
ord:
|
if ch in word_map:
word_map[ch] += 1
else:
word_map[ch] = 1
max_key = max(word_map.iterkeys(), key=lambda k: word_map[k])
if letter_count < word_map[max_key] and word_map[max_key] > 1:
letter_count = word_map[max_key]
result_word = word
return result_word if letter_count > 1 else -1
print LetterCount("Hello apple pie")
print LetterCount("No words")
|
vipmike007/avocado-vt
|
virttest/asset.py
|
Python
|
gpl-2.0
| 21,905
| 0.000228
|
import urllib2
import logging
import os
import re
import string
import types
import glob
import ConfigParser
import StringIO
import commands
import shutil
from distutils import dir_util # virtualenv problem pylint: disable=E0611
from avocado.utils import process
from avocado.utils import genio
from avocado.utils import crypto
from avocado.utils import download
from avocado.utils import git
from . import data_dir
class ConfigLoader:
"""
Base class of the configuration parser
"""
def __init__(self, cfg, tmpdir=data_dir.get_tmp_dir(), raise_errors=False):
"""
Instantiate ConfigParser and load data.
:param cfg: Where we'll get configuration data. It can be either:
* A URL containing the file
* A valid file path inside the filesystem
* A string containing configuration data
:param tmpdir: Where we'll dump the temporary conf files.
:param raise_errors: Whether config value absences will raise
ValueError exceptions.
"""
# Base Parser
self.parser = ConfigParser.ConfigParser()
# Raise errors when lacking values
self.raise_errors = raise_errors
# File is already a file like object
if hasattr(cfg, 'read'):
self.cfg = cfg
self.parser.readfp(self.cfg)
elif isinstance(cfg, types.StringTypes):
# Config file is a URL. Download it to a temp dir
if cfg.startswith('http') or cfg.startswith('ftp'):
self.cfg = os.path.join(tmpdir, os.path.basename(cfg))
download.url_download(cfg, self.cfg)
self.parser.read(self.cfg)
# Config is a valid filesystem path to a file.
elif os.path.exists(os.path.abspath(cfg)):
if os.path.isfile(cfg):
self.cfg = os.path.abspath(cfg)
self.parser.read(self.cfg)
else:
e_msg = 'Invalid config file path: %s' % cfg
raise IOError(e_msg)
# Config file is just a string, convert it to a python file like
# object using StringIO
else:
self.cfg = StringIO.StringIO(cfg)
self.parser.readfp(self.cfg)
def get(self, section, option, default=None):
"""
Get the value of a option.
Section of the config file and the option name.
You can pass a default value if the option doesn't exist.
:param section: Configuration file section.
:param option: Option we're looking after.
:default: In case the option is not available and raise_errors is set
to False, return the default.
"""
if not self.parser.has_option(section, option):
if self.raise_errors:
raise ValueError('No value for option %s. Please check your '
'config file "%s".' % (option, self.cfg))
else:
return default
return self.parser.get(section, option)
def set(self, section, option, value):
"""
Set an option.
This change is not persistent unless saved with 'save()'.
"""
if not self.parser.has_section(section):
self.parser.add_section(section)
return self.parser.set(section, option, value)
def remove(self, section, option):
"""
Remove an option.
"""
if self.parser.has_section(section):
self.parser.remove_option(section, option)
def save(self):
"""
Save the configuration file with all modifications
"""
if not self.cfg:
return
fileobj = file(self.cfg, 'w')
try:
self.parser.write(fileobj)
finally:
fileobj.close()
def check(self, section):
"""
Check if the config file has valid values
"""
if not self.parser.has_section(section):
return False, "Section not found: %s" % (section)
options = self.parser.items(section)
for i in range(options.__len__()):
param = options[i][0]
aux = string.split(param, '.')
if aux.__len__ < 2:
return False, "Invalid parameter syntax at %s" % (param)
if not self.check_parameter(aux[0], options[i][1]):
return False, "Invalid value at %s" % (param)
return True, None
def check_parameter(self, param_type, parameter):
"""
Check if a option has a valid value
"""
if parameter == '' or parameter is None:
return False
elif param_type == "ip" and self.__isipaddress(parameter):
return True
elif param_type == "int" and self.__isint(parameter):
return True
elif param_type == "float" and self.__isfloat(parameter):
return True
elif param_type == "str" and self.__isstr(parameter):
return True
return False
def __isipaddress(self, parameter):
"""
Verify if the ip address is valid
:param ip String: IP Address
:return: True if a valid IP Address or False
"""
octet1 = "([1-9][0-9]{,1}|1[0-9]{2}|2[0-4][0-9]|25[0-5])"
octet = "([0-9]{1,2}|1[0-9]{2}|2[0-4][0-9]|25[0-5])"
pattern = "^" + octet1 + "\.(" + octet + "\.){2}" + octet + "$"
if re.match(pattern, parameter) is None:
return False
else:
return True
def __isint(self, parameter):
try:
int(parameter)
except Exception, e_stack:
return False
return True
def __isfloat(self, parameter):
try:
float(parameter)
except Exception, e_stack:
return False
return True
def __isstr(self, parameter):
try:
str(parameter)
except Exception, e_stack:
return False
return True
def get_known_backends():
"""
Return virtualization backends supported by avocado-vt.
"""
# Generic means the test can run in multiple backends, such as libvirt
# and qemu.
known_backends = ['generic']
known_backends += os.listdir(data_dir.BASE_BACKEND_DIR)
return known_backends
def get_test_provider_names(backend=None):
"""
Get the names of all test providers available in test-providers.d.
:return: List with the names of all test providers.
"""
provider_name_list = []
tp_base_dir = data_dir.get_base_test_providers_dir()
tp_local_dir = data_dir.get_test_providers_dir()
dir_util.copy_tree(tp_base_dir, tp_local_dir)
provider_
|
dir = data_dir.get_test_providers_dir()
|
for provider in glob.glob(os.path.join(provider_dir, '*.ini')):
provider_name = os.path.basename(provider).split('.')[0]
provider_info = get_test_provider_info(provider_name)
if backend is not None:
if backend in provider_info['backends']:
provider_name_list.append(provider_name)
else:
provider_name_list.append(provider_name)
return provider_name_list
def get_test_provider_subdirs(backend=None):
"""
Get information of all test provider subdirs for a given backend.
If no backend is provided, return all subdirs with tests.
:param backend: Backend type, such as 'qemu'.
:return: List of directories that contain tests for the given backend.
"""
subdir_list = []
for provider_name in get_test_provider_names():
provider_info = get_test_provider_info(provider_name)
backends_info = provider_info['backends']
if backend is not None:
if backend in backends_info:
subdir_list.append(backends_info[backend]['path'])
else:
for b in backends_info:
subdir_list.append(backends_info[b]['path'])
return subdir_list
def get_test_provider_info(provider):
"""
Get a dictionary with relevant test provider info, such as:
* provider uri (
|
xgfone/snippet
|
snippet/example/python/circuit_breaker.py
|
Python
|
mit
| 8,647
| 0.000347
|
# -*- coding: utf8 -*-
from time import time
from threading import Lock
from functools import wraps
STATE_CLOSED = "closed"
STATE_OPEN = "open"
STATE_HALF_OPEN = "half-open"
def get_now():
return int(time())
class CircuitBreakerError(Exception):
pass
class TooManyRequestsError(CircuitBreakerError):
pass
class OpenStateError(CircuitBreakerError):
pass
class Count(object):
__slots__ = ("requests", "total_successes", "total_failures",
"consecutive_successes", "consecutive_failures")
def __init__(self):
self.requests = 0
self.total_successes = 0
self.total_failures = 0
self.consecutive_successes = 0
self.consecutive_failures = 0
def on_request(self):
self.requests += 1
def on_success(self):
self.total_successes += 1
self.consecutive_successes += 1
self.consecutive_failures = 0
def on_failure(self):
self.total_failures += 1
self.consecutive_failures += 1
self.consecutive_successes = 0
def clear(self):
self.requests = 0
self.total_successes = 0
self.total_failures = 0
self.consecutive_successes = 0
self.consecutive_failures = 0
def copy(self):
c = self.__class__.__new__()
c.requests = self.requests
c.total_successes = c.total_successes
c.total_failures = c.total_failures
c.consecutive_successes = c.consecutive_successes
c.consecutive_failures = c.consecutive_failures
return c
class CircuitBreaker(object):
MAX_REQUESTS = 1
COUNT_INTERVAL = 0
RECOVERY_TIMEOUT = 60
FAILURE_THRESHOLD = 5
EXPECTED_EXCEPTION = Exception
def __init__(self, name=None, max_requests=None, count_interval=None,
recovery_timeout=None, failure_threshold=None,
expected_exception=None, on_state_change=None):
"""The Circuit Breaker.
"""
self._name = name
self._max_requests = max_requests or self.MAX_REQUESTS
self._count_interval = count_interval or self.COUNT_INTERVAL
self._recovery_timeout = recovery_timeout or self.RECOVERY_TIMEOUT
self._failure_threshold = failure_threshold or self.FAILURE_THRESHOLD
self._expected_exception = expected_exception or self.EXPECTED_EXCEPTION
self._on_state_change = on_state_change
self._state = STATE_CLOSED
self._generation = 0
self._count = Count()
self._expiry = 0
self._lock = Lock()
self._new_generation(get_now())
@property
def name(self):
"""Return the name of Circuit Breaker."""
return self._name
@property
def state(self):
"""Return the state of Circuit Breaker."""
with self._lock:
return self._current_state(get_now())[0]
@property
def is_open(self):
"""Return True if the Circuit Breaker is open. Or False."""
return self.state == STATE_OPEN
@property
def is_closed(self):
"""Return True if the Circuit Breaker is closed. Or False."""
return self.state == STATE_CLOSED
@property
def is_half_open(self):
"""Return True if the Circuit Breaker is half-open. Or False."""
return self.state == STATE_HALF_OPEN
@property
def count(self):
"""Return the count information of the requests."""
with self._lock:
return self._count.copy()
def __call__(self, wrapped):
"""Decorate the function or method.
Notice: when decorating more than one function or method, you should
assign a unique name to the circuit breaker.
"""
if not self._name:
self._name = wrapped.__name
|
__
@wraps(wrapped)
def wrapper(*args, **kwargs):
return self.call(wrapped, *args, **kwargs)
CircuitBreakerMonitor.register(self)
return wrapper
def allow(self):
"""Checks if a new request can proceed.
It returns a callback that should be used to register the success
or failure in a separate step.
If the circuit breaker doesn't allow requests, it raises an exception.
"""
generation = self._before_request
|
()
return lambda ok: self._after_request(generation, ok)
def call(self, func, *args, **kwargs):
"""Run the given request if the CircuitBreaker accepts it.
It raises an error if the CircuitBreaker rejects the request.
Otherwise, it will return the result of the request.
If an exception is raised in the request, the CircuitBreaker handles it
as a failure and reraises it again.
"""
generation = self._before_request()
try:
result = func(*args, **kwargs)
except self._expected_exception:
self._after_request(generation, False)
raise
else:
self._after_request(generation, True)
return result
def _before_request(self):
with self._lock:
now = get_now()
state, generation = self._current_state(now)
if state == STATE_OPEN:
raise OpenStateError
elif state == STATE_HALF_OPEN and self._count.requests >= self._max_requests:
raise TooManyRequestsError
self._count.on_request()
return generation
def _after_request(self, before_generation, ok):
with self._lock:
now = get_now()
state, generation = self._current_state(now)
if generation != before_generation:
return
(self._on_success if ok else self._on_failure)(state, now)
def _on_success(self, state, now):
if state == STATE_CLOSED:
self._count.on_success()
elif state == STATE_HALF_OPEN:
self._count.on_success()
if self._count.consecutive_successes >= self._max_requests:
self._set_statue(STATE_CLOSED, now)
def _on_failure(self, state, now):
if state == STATE_CLOSED:
self._count.on_failure()
if self._count.consecutive_failures > self._failure_threshold:
self._set_statue(STATE_OPEN, now)
elif state == STATE_HALF_OPEN:
self._set_statue(STATE_OPEN, now)
def _current_state(self, now):
state = self._state
if state == STATE_CLOSED:
if self._expiry and self._expiry < now:
self._new_generation(now)
elif state == STATE_OPEN:
if self._expiry < now:
self._set_statue(STATE_HALF_OPEN, now)
return self._state, self._generation
def _set_statue(self, state, now):
if self._state == state:
return
prev, self._state = self._state, state
self._new_generation(now)
if self._on_state_change:
self._on_state_change(self._name, prev, state)
def _new_generation(self, now):
self._generation += 1
self._count.clear()
state = self._state
if state == STATE_CLOSED:
self._expiry = (now + self._count_interval) if self._count_interval else 0
elif state == STATE_OPEN:
self._expiry = now + self._recovery_timeout
else: # STATE_HALF_OPEN
self._expiry = 0
class CircuitBreakerMonitor(object):
circuit_breakers = {}
@classmethod
def register(cls, cb):
"""Register a circuit breaker."""
cls.circuit_breakers[cb.name] = cb
@classmethod
def all_closed(cls):
"""Return True if all circuit breakers are closed."""
return not cls.get_all_open()
@classmethod
def get_all_circuit_breakers(cls):
"""Return all circuit breakers."""
return cls.circuit_breakers.values()
@classmethod
def get(cls, name):
"""Return the circuit breaker named 'name'."""
return cls.circuit_breakers.get(name, None)
@classmethod
def get_all_open(cls):
"""Return all open circuit breakers."""
return [cb for cb
|
ifnull/hello-tracking
|
project/urls.py
|
Python
|
apache-2.0
| 482
| 0
|
"""URLs."""
from django.conf.urls import include, url
from django.contrib import admin
import apps.status.views
admin.autodiscover()
# Examples:
# url(r'^$', 'getting
|
started.views.home', name='home'),
# url(r'^blog/
|
', include('blog.urls')),
urlpatterns = [
url(r'^$', apps.status.views.index, name='index'),
url(r'^trackings/(?P<carrier_slug>[\w-]+)/(?P<tracking_number>[\w-]+)/$',
apps.status.views.trackings),
url(r'^admin/', include(admin.site.urls)),
]
|
tstenner/bleachbit
|
tests/TestCommon.py
|
Python
|
gpl-3.0
| 2,298
| 0.00087
|
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2020 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for Common
"""
from tests import common
import bleachbit
import os
class CommonTestCase(common.BleachbitTestCase):
"""Test case for Common."""
def test_expandvars(self):
"""Unit test for expandvars."""
var = os.path.expandvars('$HOME')
self.assertIsString(var)
def test_environment(self):
"""Test for important environment variables"""
# useful for researching
# grep -Poh "([\\$%]\w+)" cleaners/*xml | cut -b2- | sort | uniq -i
envs = {'posix': ['XDG_DATA_HOME', 'XDG_CONFIG_HOME', 'XDG_CACHE_HOME', 'HOME'],
'nt': ['AppData', 'CommonAppData', 'Documents', 'ProgramFiles', 'UserProfile', 'WinDir']}
for env in envs[os.name]:
e = os.getenv(env)
self.assertIsNotNone(e)
self.assertGreater(len(e), 4)
def test_expanduser(self):
"""Unit test for expanduser."""
# Return Unicode when given Unicode.
self.assertIsString(os.path.expanduser('~'))
# Blank input should give blank output.
self.assertEqual(os.path.expanduser(''), '')
# An absolute path should not be altered.
abs_dirs = {'posix': '$HOME', 'nt': '%USERPROFILE%'}
|
abs_dir = os.path.expandvars(abs_dirs[os.name])
self.assertExists(abs_dir)
self.assertEqual(os.path.expanduser(abs_dir), abs_dir)
# A relative path (without a reference to the home directory)
# should not be expanded.
|
self.assertEqual(os.path.expanduser('common'), 'common')
|
davidwilson-85/easymap
|
graphic_output/Pillow-4.2.1/Tests/test_file_cur.py
|
Python
|
gpl-3.0
| 1,063
| 0
|
from helper import unittest, PillowTestCase
from PIL import Image, CurImagePlugin
TEST_FILE = "Tests/images/deerstalker.cur"
class TestFileCur(PillowTestCase):
def test_sanity(self):
im = Image.open(TEST_FILE)
self.assertEqual(im.size, (32, 32))
self.assertIsInstance(im, CurImagePlugi
|
n.CurImageFile)
# Check some pixel colors to ensure image is loaded properly
self.assertEqual(im.getpixel((10, 1)), (0, 0, 0, 0
|
))
self.assertEqual(im.getpixel((11, 1)), (253, 254, 254, 1))
self.assertEqual(im.getpixel((16, 16)), (84, 87, 86, 255))
def test_invalid_file(self):
invalid_file = "Tests/images/flower.jpg"
self.assertRaises(SyntaxError,
lambda: CurImagePlugin.CurImageFile(invalid_file))
no_cursors_file = "Tests/images/no_cursors.cur"
cur = CurImagePlugin.CurImageFile(TEST_FILE)
with open(no_cursors_file, "rb") as cur.fp:
self.assertRaises(TypeError, cur._open)
if __name__ == '__main__':
unittest.main()
|
aheck/reflectrpc
|
tests/rpcsh-tests.py
|
Python
|
mit
| 16,724
| 0.011182
|
#!/usr/bin/env python3
from __future__ import unicode_literals
from builtins import bytes, dict, list, int, float, str
import os
import sys
import unittest
import pexpect
sys.path.append('..')
from reflectrpc.rpcsh import print_functions
from reflectrpc.rpcsh import split_exec_line
from reflectrpc.rpcsh import ReflectRpcShell
from reflectrpc.testing import ServerRunner
class RpcShTests(unittest.TestCase):
def test_split_exec_line(self):
tokens = split_exec_line('echo')
self.assertEqual(tokens, ['echo'])
tokens = split_exec_line('echo ')
self.assertEqual(tokens, ['echo'])
tokens = split_exec_line('echo "Hello Server"')
self.assertEqual(tokens, ['echo', 'Hello Server'])
tokens = split_exec_line(' echo "Hello Server" ')
self.assertEqual(tokens, ['echo', 'Hello Server'])
tokens = split_exec_line('add 4 5')
self.assertEqual(tokens, ['add', 4, 5])
tokens = split_exec_line('add 4452 5980')
self.assertEqual(tokens, ['add', 4452, 5980])
tokens = split_exec_line(' add 4 5 ')
self.assertEqual(tokens, ['add', 4, 5])
tokens = split_exec_line('test 4 5 "A String" "Another String" 3424 453.9 true null "Yet another String"')
self.assertEqual(tokens, ['test', 4, 5, 'A String', 'Another String',
3424, 453.9, True, None, 'Yet another String'])
tokens = split_exec_line('test 4 [5]')
self.assertEqual(tokens, ['test', 4, [5]])
tokens = split_exec_line('test 4 [5 ]')
self.assertEqual(tokens, ['test', 4, [5]])
tokens = split_exec_line(' test 4 [ 5 ]')
self.assertEqual(tokens, ['test', 4, [5]])
tokens = split_exec_line('test ["Hello Server", 5, "String"] [5]')
self.assertEqual(tokens, ['test', ["Hello Server", 5, "String"], [5]])
tokens = split_exec_line('test {"num": 5, "name": "object"}')
self.assertEqual(tokens, ['test', {'num': 5, 'name': 'object'}])
tokens = split_exec_line('func [1,2,3,4,5,6] [7,8,9] [10,11,12,13]')
self.assertEqual(tokens, ['func', [1,2,3,4,5,6], [7,8,9], [10,11,12,13]])
tokens = split_exec_line('func {"array": [{"key1": "value1", "key2": "value2"}]} 5 ["str1", "str2", 5, "str3"]')
self.assertEqual(tokens, ['func', {'array': [{'key1': 'value1', 'key2': 'value2'
|
}]}, 5, ['str1', 'str2', 5, 'str3']])
def test_rpcsh_compiles_and_runs(self):
python = sys.executable
exit_status = os.system("cd .. && %s rpcsh --help > /dev/null" % (python))
self.assertEqual(exit_status, 0)
def test_rpcsh_complete_function_names(self):
rpcsh = ReflectRpcShell(None)
rpcsh.functions = [
{'name': 'get_something'},
{'name': 'ge
|
t_anotherthing'},
{'name': 'echo'},
{'name': 'echoEcho'},
{'name': 'add'},
]
result = rpcsh.function_completion('', 'exec ')
self.assertEqual(result, ['get_something', 'get_anotherthing', 'echo', 'echoEcho', 'add'])
result = rpcsh.function_completion('get_', 'exec get_')
self.assertEqual(result, ['get_something', 'get_anotherthing'])
result = rpcsh.function_completion('ad', 'exec ad')
self.assertEqual(result, ['add'])
result = rpcsh.function_completion('add', 'exec add')
self.assertEqual(result, [])
result = rpcsh.function_completion('echo', 'exec echo')
self.assertEqual(result, ['echo', 'echoEcho'])
def test_rpcsh_complete_type_names(self):
rpcsh = ReflectRpcShell(None)
rpcsh.custom_types = [
{'name': 'AddressExtension'},
{'name': 'AddressEntry'},
{'name': 'CPU'},
{'name': 'CPUInfo'},
{'name': 'Order'},
]
result = rpcsh.complete_type('', 'exec ', 0, 0)
self.assertEqual(result, ['AddressExtension', 'AddressEntry', 'CPU', 'CPUInfo', 'Order'])
result = rpcsh.complete_type('Address', 'exec Address', 0, 0)
self.assertEqual(result, ['AddressExtension', 'AddressEntry'])
result = rpcsh.complete_type('Ord', 'exec Ord', 0, 0)
self.assertEqual(result, ['Order'])
result = rpcsh.complete_type('Order', 'exec Order', 0, 0)
self.assertEqual(result, [])
result = rpcsh.complete_type('CPU', 'exec CPU', 0, 0)
self.assertEqual(result, ['CPU', 'CPUInfo'])
def test_rpcsh_expect_simple(self):
try:
server = ServerRunner('../examples/server.py', 5500)
server.run()
python = sys.executable
child = pexpect.spawn('%s ../rpcsh localhost 5500' % (python))
child.expect('ReflectRPC Shell\r\n')
child.expect('================\r\n\r\n')
child.expect("Type 'help' for available commands\r\n\r\n")
child.expect('RPC server: localhost:5500\r\n\r\n')
child.expect('Self-description of the Service:\r\n')
child.expect('================================\r\n')
child.expect('Example RPC Service \(1.0\)\r\n')
child.expect('This is an example service for ReflectRPC\r\n')
child.expect('\(rpc\) ')
child.sendline('list')
child.expect('echo\(message\)\r\n')
child.expect('add\(a, b\)\r\n')
child.expect('sub\(a, b\)\r\n')
child.expect('mul\(a, b\)\r\n')
child.expect('div\(a, b\)\r\n')
child.expect('enum_echo\(phone_type\)\r\n')
child.expect('hash_echo\(address\)\r\n')
child.expect('notify\(value\)\r\n')
child.expect('is_authenticated\(\)\r\n')
child.expect('get_username\(\)\r\n')
child.expect('echo_ints\(ints\)\r\n')
child.sendline('exec echo "Hello Server"')
child.expect('Server replied: "Hello Server"\r\n')
child.sendline('exec add 5 6')
child.expect('Server replied: 11\r\n')
child.sendline('exec is_authenticated')
child.expect('Server replied: false\r\n')
child.sendline('exec get_username')
child.expect('Server replied: null\r\n')
finally:
child.close(True)
server.stop()
def test_rpcsh_expect_unix_socket(self):
try:
server = ServerRunner('../examples/serverunixsocket.py',
'/tmp/reflectrpc.sock')
server.run()
python = sys.executable
child = pexpect.spawn('%s ../rpcsh unix:///tmp/reflectrpc.sock' % (python))
child.expect('ReflectRPC Shell\r\n')
child.expect('================\r\n\r\n')
child.expect("Type 'help' for available commands\r\n\r\n")
child.expect('RPC server: unix:///tmp/reflectrpc.sock\r\n\r\n')
child.expect('Self-description of the Service:\r\n')
child.expect('================================\r\n')
child.expect('Example RPC Service \(1.0\)\r\n')
child.expect('This is an example service for ReflectRPC\r\n')
child.expect('\(rpc\) ')
child.sendline('list')
child.expect('echo\(message\)\r\n')
child.expect('add\(a, b\)\r\n')
child.expect('sub\(a, b\)\r\n')
child.expect('mul\(a, b\)\r\n')
child.expect('div\(a, b\)\r\n')
child.expect('enum_echo\(phone_type\)\r\n')
child.expect('hash_echo\(address\)\r\n')
child.expect('notify\(value\)\r\n')
child.expect('is_authenticated\(\)\r\n')
child.expect('get_username\(\)\r\n')
child.expect('echo_ints\(ints\)\r\n')
child.sendline('exec echo "Hello Server"')
child.expect('Server replied: "Hello Server"\r\n')
child.sendline('exec add 5 6')
child.expect('Server replied: 11\r\n')
child.sendline('exec is_authenticated')
child.expect('Server replied: false\r\n')
child.sendline('e
|
GoogleCloudPlatform/gcpdiag
|
gcpdiag/lint/gke/err_2021_001_logging_perm.py
|
Python
|
apache-2.0
| 1,986
| 0.007049
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enab
|
led():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
|
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np)
|
kickstandproject/ripcord
|
ripcord/openstack/common/config/generator.py
|
Python
|
apache-2.0
| 8,281
| 0.000121
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 SINA Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
from ripcord.openstack.common import gettextutils
from ripcord.openstack.common import importutils
gettextutils.install('ripcord')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def generate(srcfiles):
mods_by_pkg = dict()
for filepath in srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
pkg_names.sort()
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names.sort()
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list
|
is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
for module_name in os.getenv(
"OSLO_CONFIG_GENERATOR_EXTRA_MODULES", "").split(','):
module = _import_module(module_name)
if module:
for group, opts in _list_
|
opts(module):
opts_by_group.setdefault(group, []).append((module_name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
continue
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group, opts in opts_by_group.items():
print_group_opts(group, opts)
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except ImportError as ie:
sys.stderr.write("%s\n" % str(ie))
return None
except Exception:
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for key, value in group._opts.items():
if value['opt'] == opt:
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for key, value in cfg.CONF.items():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value == socket.gethostname() and 'host' in name:
return 'ripcord'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, basestring))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
|
vadimtk/chrome4sdp
|
build/android/pylib/remote/device/remote_device_gtest_run.py
|
Python
|
bsd-3-clause
| 2,746
| 0.008012
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this sour
|
ce code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run specific test on specific environment."""
import logging
import os
import tempfile
from pylib import constants
from pylib.base import base_test_result
from pylib.remote.device import remote_device_test_run
from pylib.remote.device import remote_device_helper
_EXTRA_COMMAND_LINE_FILE = (
'org.chromium.native_test.NativeTestActivity.CommandLineFile')
clas
|
s RemoteDeviceGtestTestRun(remote_device_test_run.RemoteDeviceTestRun):
"""Run gtests and uirobot tests on a remote device."""
DEFAULT_RUNNER_PACKAGE = (
'org.chromium.native_test.NativeTestInstrumentationTestRunner')
#override
def TestPackage(self):
return self._test_instance.suite
#override
def _TriggerSetUp(self):
"""Set up the triggering of a test run."""
logging.info('Triggering test run.')
if self._env.runner_type:
logging.warning('Ignoring configured runner_type "%s"',
self._env.runner_type)
if not self._env.runner_package:
runner_package = self.DEFAULT_RUNNER_PACKAGE
logging.info('Using default runner package: %s',
self.DEFAULT_RUNNER_PACKAGE)
else:
runner_package = self._env.runner_package
dummy_app_path = os.path.join(
constants.GetOutDirectory(), 'apks', 'remote_device_dummy.apk')
with tempfile.NamedTemporaryFile(suffix='.flags.txt') as flag_file:
env_vars = {}
filter_string = self._test_instance._GenerateDisabledFilterString(None)
if filter_string:
flag_file.write('_ --gtest_filter=%s' % filter_string)
flag_file.flush()
env_vars[_EXTRA_COMMAND_LINE_FILE] = os.path.basename(flag_file.name)
self._test_instance._data_deps.append(
(os.path.abspath(flag_file.name), None))
self._AmInstrumentTestSetup(
dummy_app_path, self._test_instance.apk, runner_package,
environment_variables=env_vars)
_INSTRUMENTATION_STREAM_LEADER = 'INSTRUMENTATION_STATUS: stream='
#override
def _ParseTestResults(self):
logging.info('Parsing results from stdout.')
results = base_test_result.TestRunResults()
output = self._results['results']['output'].splitlines()
output = (l[len(self._INSTRUMENTATION_STREAM_LEADER):] for l in output
if l.startswith(self._INSTRUMENTATION_STREAM_LEADER))
results_list = self._test_instance.ParseGTestOutput(output)
results.AddResults(results_list)
if self._env.only_output_failures:
logging.info('See logcat for more results information.')
self._DetectPlatformErrors(results)
return results
|
abelectronicsuk/ABElectronics_Python_Libraries
|
IOPi/demos/demo_iopireadwrite.py
|
Python
|
gpl-2.0
| 2,479
| 0.000403
|
#!/usr/bin/env python
"""
================================================
ABElectronics IO Pi | Digital I/O Read and Write Demo
Requires python smbus to be installed
For Python 2 install with: sudo apt-get install python-smbus
For Python 3 install with: sudo apt-get install python3-smbus
run with: python demo_iopireadwrite.py
================================================
This example reads pin 1
|
of bus 1 on the IO Pi board and sets
pin 1 of bus 2 to match.
The internal pull-up resistors are enabled so the input pin
will read as 1 unless the pin is connected to ground.
Initialise the IOPi device using the default addresses, you will need to
change the addresses if you have changed the jumpers on the IO Pi
"""
from __future__ import absolute_import, division, print_function, \
unicode_lit
|
erals
import time
try:
from IOPi import IOPi
except ImportError:
print("Failed to import IOPi from python system path")
print("Importing from parent folder instead")
try:
import sys
sys.path.append("..")
from IOPi import IOPi
except ImportError:
raise ImportError(
"Failed to import library from parent folder")
def main():
"""
Main program function
"""
# create two instances of the IoPi class called iobus1 and iobus2 and set
# the default i2c addresses
iobus1 = IOPi(0x20) # bus 1 will be inputs
iobus2 = IOPi(0x21) # bus 2 will be outputs
# Each bus is divided up two 8 bit ports. Port 0 controls pins 1 to 8,
# Port 1 controls pins 9 to 16.
# We will read the inputs on pin 1 of bus 1 so set port 0 to be inputs and
# enable the internal pull-up resistors
iobus1.set_port_direction(0, 0xFF)
iobus1.set_port_pullups(0, 0xFF)
# We will write to the output pin 1 on bus 2 so set port 0 to be outputs
# and turn off the pins on port 0
iobus2.set_port_direction(0, 0x00)
iobus2.write_port(0, 0x00)
while True:
# read pin 1 on bus 1. If pin 1 is high set the output on
# bus 2 pin 1 to high, otherwise set it to low.
# connect pin 1 on bus 1 to ground to see the output on
# bus 2 pin 1 change state.
if iobus1.read_pin(1) == 1:
iobus2.write_pin(1, 1)
else:
iobus2.write_pin(1, 0)
# wait 0.1 seconds before reading the pins again
time.sleep(0.1)
if __name__ == "__main__":
main()
|
goldhand/product-purchase
|
config/settings/local.py
|
Python
|
bsd-3-clause
| 2,534
| 0.001184
|
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
STRIPE_PUBLIC_KEY = os.environ.get("STRIPE_PUBLIC_KEY", "pk_test_4XMRbU6H6Jf5B2TXmICnvXS7")
STRIPE_SECRET_KEY = os.environ.get("STRIPE_SECRET_KEY", "sk_test_4XMRnH3aMfrhHN1nZO2uzcDE")
DJSTRIPE_PLANS = {
"m
|
onthly": {
"stripe_plan_id": "pro-monthly",
"name": "Web App Pro ($24.99/month)",
"description": "The monthly subscription plan to WebApp",
"price": 2499, # $24.99
"currency": "usd",
"interval": "month"
},
"yearly": {
"stripe_plan_id": "pro-yearly",
"name": "
|
Web App Pro ($199/year)",
"description": "The annual subscription plan to WebApp",
"price": 19900, # $199.00
"currency": "usd",
"interval": "year"
}
}
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/platform/gcutil/lib/google_compute_engine/gcutil_lib/mock_api_types.py
|
Python
|
gpl-3.0
| 13,909
| 0.008412
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for mocking Google Compute Engine API method calls."""
import base64
import collections
import json
import re
import urlparse
from gcutil_lib import gcutil_logging
LOGGER = gcutil_logging.LOGGER
Property = collections.namedtuple('Property', ('name', 'type'))
Parameter = collections.namedtuple('Parameter', ('name', 'type'))
class ValidationError(Exception):
"""Raised when request or response validation fails."""
class _ApiType(object):
__slots__ = tuple()
# pylint: disable=unused-argument
def Validate(self, method, path, value):
raise ValidationError(
|
'{name} doesn\'t support validation'.format(
name=self.__class__.__nam
|
e__))
# pylint: disable=unused-argument
def ValidateString(self, method, path, value):
raise ValidationError(
'{name} doesn\'t support validation of a string value'.format(
name=self.__class__.__name__))
class AnyType(_ApiType):
"""Represents discovery type 'any'."""
__slots__ = tuple()
def Validate(self, method, path, value):
pass
class BooleanType(_ApiType):
"""Represents discovery type 'bool'."""
__slots__ = tuple()
def Validate(self, method, path, value):
if not isinstance(value, bool):
raise ValidationError(
'{method} {path}: expected bool, but received {value}'.format(
method=method, path=path, value=value))
def ValidateString(self, method, path, value):
"""Validates boolean value serialized as string."""
if value == 'true':
boolean_value = True
elif value == 'false':
boolean_value = False
else:
raise ValidationError(
'{method} {path}: expected string with boolean value, '
'but received {value}'.format(method=method, path=path, value=value))
self.Validate(method, path, boolean_value)
class StringType(_ApiType):
"""Represents discovery types in the string family."""
__slots__ = ('_format',)
DATE_TIME_REGEX = re.compile(
r'^'
r'(?P<year>\d{4})'
r'-'
r'(?P<month>\d{2})'
r'-'
r'(?P<day>\d{2})'
r'[Tt]'
r'(?P<hour>\d{2})'
r':'
r'(?P<minute>\d{2})'
r':'
r'(?P<second>\d{2})'
r'(\.(?P<fraction>\d+))?'
r'[Zz]'
r'$')
DATE_REGEX = re.compile(
r'^'
r'(?P<year>\d{4})'
r'-'
r'(?P<month>\d{2})'
r'-'
r'(?P<day>\d{2})'
r'$')
MIN_UINT64 = 0
MAX_UINT64 = 2 ** 64 - 1
MIN_INT64 = -(2 ** 63)
MAX_INT64 = 2 ** 63 - 1
def __init__(self, value_format):
self._format = value_format
def _ValidateByte(self, method, path, value):
"""Validates base64url encoded string."""
try:
if type(value) is unicode:
base64.urlsafe_b64decode(value.encode('ascii'))
else:
base64.urlsafe_b64decode(value)
except TypeError:
raise ValidationError(
'{method} {path}: expected base64url but received {value}'.format(
method=method, path=path, value=value))
def _ValidateDate(self, method, path, value):
"""Validates an RFC3339 date."""
if not self.DATE_REGEX.match(value):
raise ValidationError(
'{method} {path}: expected RFC3339 date, but received {value}'.format(
method=method, path=path, value=value))
def _ValidateDateTime(self, method, path, value):
"""Validates RFC3339 timestamp."""
if not self.DATE_TIME_REGEX.match(value):
raise ValidationError(
'{method} {path}: expected RFC3339 date, but received {value}'.format(
method=method, path=path, value=value))
def _ValidateInt64(self, method, path, value):
"""Validates an int64 value MIN_INT64 <= value <= MAX_INT64."""
try:
long_value = long(value)
except ValueError:
raise ValidationError(
'{method} {path}: expected int64 but received {value}'.format(
method=method, path=path, value=value))
if not self.MIN_INT64 <= long_value <= self.MAX_INT64:
raise ValidationError(
'{method} {path}: int64 value {value} not in range '
'MIN_INT64..MAX_INT64'.format(
method=method, path=path, value=value))
def _ValidateUInt64(self, method, path, value):
"""Validates an uint64 value 0 <= value <= MAX_INT64."""
try:
long_value = long(value)
except ValueError:
raise ValidationError(
'{method} {path}: expected int64 but received {value}'.format(
method=method, path=path, value=value))
if not self.MIN_UINT64 <= long_value <= self.MAX_UINT64:
raise ValidationError(
'{method} {path}: int64 value {value} not in range '
'MIN_INT64..MAX_INT64'.format(
method=method, path=path, value=value))
def Validate(self, method, path, value):
if not isinstance(value, basestring):
raise ValidationError(
'{method} {path}: expected string, but received {value}'.format(
method=method, path=path, value=value))
if self._format == 'byte':
self._ValidateByte(method, path, value)
elif self._format == 'date':
self._ValidateDate(method, path, value)
elif self._format == 'date-time':
self._ValidateDateTime(method, path, value)
elif self._format == 'int64':
self._ValidateInt64(method, path, value)
elif self._format == 'uint64':
self._ValidateUInt64(method, path, value)
def ValidateString(self, method, path, value):
self.Validate(method, path, value)
class IntegerType(_ApiType):
"""Represents an integer type in the API type system."""
__slots__ = ('_format',)
def __init__(self, value_format):
self._format = value_format
def Validate(self, method, path, value):
if not isinstance(value, (int, long)):
raise ValidationError(
'{method} {path}: expected int32, but received {value}'.format(
method=method, path=path, value=value))
if self._format == 'uint32':
if not 0 <= value <= 4294967295:
raise ValidationError(
'{method} {path}: value {value} not in the uint32 range '
'0 .. 4294967295'.format(method=method, path=path, value=value))
elif not -2147483648 <= value <= 2147483647:
raise ValidationError(
'{method} {path}: value {value} not in the int32 range '
'-2147483648 .. 2147483647'.format(
method=method, path=path, value=value))
def ValidateString(self, method, path, value):
try:
integer_value = long(value)
except ValueError:
raise ValidationError(
'{method} {path}: value {value} not an integer'.format(
method=method, path=path, value=value))
self.Validate(method, path, integer_value)
class NumberType(_ApiType):
"""Represents a floating point number in the API type system."""
__slots__ = ('_format',)
def __init__(self, value_format):
self._format = value_format
def Validate(self, method, path, value):
if not isinstance(value, (int, long, float)):
raise ValidationError(
'{method} {path}: expected number but received {value}'.format(
method=method, path=path, value=value))
def ValidateString(self, method, path, value):
try:
float_value = float(value)
except ValueError:
raise ValidationError(
'{method} {path}: expected number but received {value}'.format(
method=method, path=path, value=value))
self.Validate(method, path, float_value)
class ArrayType(_ApiType):
__slots__ = ('_element',)
def __init__(self, elem
|
sameerparekh/pants
|
tests/python/pants_test/backend/jvm/tasks/jvm_compile/java/test_apt_compile_integration.py
|
Python
|
apache-2.0
| 4,227
| 0.009936
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.util.contextutil import temporary_dir
from pants_test.backend.jvm.tasks.jvm_compile.base_compile_integration_test import BaseCompileIT
class AptCompileIntegrationTest(BaseCompileIT):
def test_apt_compile(self):
with self.do_test_compile('testprojects/src/java/org/pantsbuild/testproject/annotation/processor',
expected_files=['ResourceMappingProcessor.class',
'javax.annotation.processing.Processor']) as found:
self.assertTrue(
self.get_only(found, 'ResourceMappingProcessor.class').endswith(
'org/pantsbuild/testproject/annotation/processor/ResourceMappingProcessor.class'))
processor_service_files = found['javax.annotation.processing.Processor']
# There should be only a per-target service info file.
self.assertEqual(1, len(processor_service_files))
processor_service_file = list(processor_service_files)[0]
self.assertTrue(processor_service_file.endswith(
'META-INF/services/javax.annotation.processing.Processor'))
with open(processor_service_file) as fp:
self.assertEqual('org.pantsbuild.testproject.annotation.processor.ResourceMappingProcessor',
fp.read().strip())
def test_apt_compile_and_run(self):
with self.do_test_compile('testprojects/src/java/org/pantsbuild/testproject/annotation/main',
expected_files=['Main.class',
'deprecation_report.txt']) as found:
self.assertTrue(
self.get_only(found, 'Main.class').endswith(
'org/pantsbuild/testproject/annotation/main/Main.class'))
# This is the proof that the ResourceMappingProcessor annotation processor was compiled in a
# round and then the Main was compiled in a later round with the annotation processor and its
# service info file from on its compile classpath.
with open(self.get_only(found, 'deprecation_report.txt')) as fp:
self.assertIn('org.pantsbuild.testproject.annotation.main.Main', fp.read().splitlines())
def test_stale_apt_with_deps(self):
"""An annotation processor with a dependency doesn't pollute other annotation processors.
At one point, when you added an annotation processor, it stayed configured for all subsequent
compiles. Meaning that if that annotation processor had a dep that wasn't on the classpath,
subsequent compiles would fail with missing symbols required by the stale annotation processor.
|
"""
# Demonstrate that the annotation processor is working
with self.do_test_compile(
'testprojects/src/java/org/pantsbuild/testproject/annotation/processorwithdep/main',
expected_files=['Main.class
|
', 'Main_HelloWorld.class', 'Main_HelloWorld.java']) as found:
gen_file = self.get_only(found, 'Main_HelloWorld.java')
self.assertTrue(gen_file.endswith(
'org/pantsbuild/testproject/annotation/processorwithdep/main/Main_HelloWorld.java'),
msg='{} does not match'.format(gen_file))
# Try to reproduce second compile that fails with missing symbol
with temporary_dir(root_dir=self.workdir_root()) as workdir:
with temporary_dir(root_dir=self.workdir_root()) as cachedir:
# This annotation processor has a unique external dependency
self.assert_success(self.run_test_compile(
workdir,
cachedir,
'testprojects/src/java/org/pantsbuild/testproject/annotation/processorwithdep::'))
# When we run a second compile with annotation processors, make sure the previous annotation
# processor doesn't stick around to spoil the compile
self.assert_success(self.run_test_compile(
workdir,
cachedir,
'testprojects/src/java/org/pantsbuild/testproject/annotation/processor::',
clean_all=False))
|
rickyHong/Tensorflow_modi
|
tensorflow/python/kernel_tests/gather_op_test.py
|
Python
|
apache-2.0
| 2,641
| 0.012874
|
"""Tests for tensorflow.ops.tf.gather."""
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class GatherTest(tf.test.TestCase):
def testScalar1D(self):
with self.test_session():
params = tf.constant([0, 1, 2, 3, 7, 5])
indices = tf.constant(4)
gather_t = tf.gather(params, indices)
gather_val = gather_t.eval()
self.assertAllEqual(7, gather_val)
self.assertEqual([], gather_t.get_shape())
def testScalar2D(self):
with self.test_session():
params = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
indices = tf.constant(2)
gather_t = tf.gather(params, indices)
gather_val = gather_t.eval()
self.assertAllEqual([6, 7, 8], gather_val)
self.assertEqual([3], gather_t.get_shape())
def testSimpleTwoD32(self):
with self.test_session():
params = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
indices = tf.constant([0, 4, 0, 2])
gather_t = tf.gather(params, indices)
gather_val = gather_t.eval()
self.assertAllEqual([[0, 1, 2], [12, 13, 14], [0, 1, 2], [6, 7, 8]],
gather_val)
self.assertEqu
|
al([4, 3], gather_t.get_shape())
def testHigherRank(self):
np.random.seed(1)
shape = (4, 3, 2)
params = np.random.randn(*shape)
indices = np.random.randint(shape[0], size=15).reshape(3, 5)
with self.test_session():
tf_params = tf.constant(params)
|
tf_indices = tf.constant(indices)
gather = tf.gather(tf_params, tf_indices)
self.assertAllEqual(params[indices], gather.eval())
self.assertEqual(indices.shape + params.shape[1:], gather.get_shape())
# Test gradients
gather_grad = np.random.randn(*gather.get_shape().as_list())
params_grad, indices_grad = tf.gradients(gather, [tf_params, tf_indices],
gather_grad)
self.assertEqual(indices_grad, None)
self.assertEqual(type(params_grad), tf.IndexedSlices)
params_grad = tf.convert_to_tensor(params_grad)
correct_params_grad = np.zeros(shape)
for i, g in zip(indices.ravel(), gather_grad.reshape((15,) + shape[1:])):
correct_params_grad[i] += g
self.assertAllEqual(correct_params_grad, params_grad.eval())
def testUnknownIndices(self):
params = tf.constant([[0, 1, 2]])
indices = tf.placeholder(tf.int32)
gather_t = tf.gather(params, indices)
self.assertEqual(None, gather_t.get_shape())
if __name__ == "__main__":
tf.test.main()
|
achedeuzot/django-envconf
|
envconf/__init__.py
|
Python
|
mit
| 131
| 0
|
# -*- coding: utf-8
|
-*-
from __future__ import unicode_literals
from .envconf import *
from .path import *
__version__ = '0.3.5'
| |
psachin/swift
|
swift/common/manager.py
|
Python
|
apache-2.0
| 26,766
| 0.000037
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import functools
import errno
import os
import resource
import signal
import time
import subprocess
import re
from swift import gettext_ as _
from swift.common.utils import search_tree, remove_file, write_file
from swift.common.exceptions import InvalidPidFileException
SWIFT_DIR = '/etc/swift'
RUN_DIR = '/var/run/swift'
PROC_DIR = '/proc'
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
'container-replicator', 'container-reconciler',
'container-server', 'container-sync',
'container-updater', 'object-auditor', 'object-server',
'object-expirer', 'object-replicator',
'object-reconstructor', 'object-updater',
'proxy-server', 'account-replicator', 'account-reaper']
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
'object-server']
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
# aliases mapping
ALIASES = {'all': ALL_SERVERS, 'main': MAIN_SERVERS, 'rest': REST_SERVERS}
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS
START_ONCE_SERVERS = REST_SERVERS
# These are servers that match a type (account-*, container-*, object-*) but
# don't use that type-server.conf file and instead use their own.
STANDALONE_SERVERS = ['object-expirer', 'container-reconciler']
KILL_WAIT = 15 # seconds to wait for servers to die (by default)
WARNING_WAIT = 3 # seconds to wait after message that may just be a warning
MAX_DESCRIPTORS = 32768
MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB
MAX_PROCS = 8192 # workers
|
* disks, can get high
def setup_env():
"""Try to increase resource limits of the OS. Mov
|
e PYTHON_EGG_CACHE to /tmp
"""
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
except ValueError:
print(_("WARNING: Unable to modify file descriptor limit. "
"Running as non-root?"))
try:
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print(_("WARNING: Unable to modify memory limit. "
"Running as non-root?"))
try:
resource.setrlimit(resource.RLIMIT_NPROC,
(MAX_PROCS, MAX_PROCS))
except ValueError:
print(_("WARNING: Unable to modify max process limit. "
"Running as non-root?"))
# Set PYTHON_EGG_CACHE if it isn't already set
os.environ.setdefault('PYTHON_EGG_CACHE', '/tmp')
def command(func):
"""
Decorator to declare which methods are accessible as commands, commands
always return 1 or 0, where 0 should indicate success.
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(*a, **kw):
rv = func(*a, **kw)
return 1 if rv else 0
return wrapped
def watch_server_pids(server_pids, interval=1, **kwargs):
"""Monitor a collection of server pids yielding back those pids that
aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on
Server objects
"""
status = {}
start = time.time()
end = start + interval
server_pids = dict(server_pids) # make a copy
while True:
for server, pids in server_pids.items():
for pid in pids:
try:
# let pid stop if it wants to
os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno not in (errno.ECHILD, errno.ESRCH):
raise # else no such child/process
# check running pids for server
status[server] = server.get_running_pids(**kwargs)
for pid in pids:
# original pids no longer in running pids!
if pid not in status[server]:
yield server, pid
# update active pids list using running_pids
server_pids[server] = status[server]
if not [p for server, pids in status.items() for p in pids]:
# no more running pids
break
if time.time() > end:
break
else:
time.sleep(0.1)
def safe_kill(pid, sig, name):
"""Send signal to process and check process name
: param pid: process id
: param sig: signal to send
: param name: name to ensure target process
"""
# check process name for SIG_DFL
if sig == signal.SIG_DFL:
try:
proc_file = '%s/%d/cmdline' % (PROC_DIR, pid)
if os.path.exists(proc_file):
with open(proc_file, 'r') as fd:
if name not in fd.read():
# unknown process is using the pid
raise InvalidPidFileException()
except IOError:
pass
os.kill(pid, sig)
def kill_group(pid, sig):
"""Send signal to process group
: param pid: process id
: param sig: signal to send
"""
# Negative PID means process group
os.kill(-pid, sig)
class UnknownCommandError(Exception):
pass
class Manager(object):
"""Main class for performing commands on groups of servers.
:param servers: list of server names as strings
"""
def __init__(self, servers, run_dir=RUN_DIR):
self.server_names = set()
self._default_strict = True
for server in servers:
if server in ALIASES:
self.server_names.update(ALIASES[server])
self._default_strict = False
elif '*' in server:
# convert glob to regex
self.server_names.update([
s for s in ALL_SERVERS if
re.match(server.replace('*', '.*'), s)])
self._default_strict = False
else:
self.server_names.add(server)
self.servers = set()
for name in self.server_names:
self.servers.add(Server(name, run_dir))
def __iter__(self):
return iter(self.servers)
@command
def status(self, **kwargs):
"""display status of tracked pids for server
"""
status = 0
for server in self.servers:
status += server.status(**kwargs)
return status
@command
def start(self, **kwargs):
"""starts a server
"""
setup_env()
status = 0
strict = kwargs.get('strict')
# if strict not set explicitly
if strict is None:
strict = self._default_strict
for server in self.servers:
status += 0 if server.launch(**kwargs) else 1
if not strict:
status = 0
if not kwargs.get('daemon', True):
for server in self.servers:
try:
status += server.interact(**kwargs)
except KeyboardInterrupt:
print(_('\nuser quit'))
self.stop(**kwargs)
break
elif kwargs.get('wait', True):
for server in self.servers:
status += server.wait(**kwargs)
return status
@command
def no_wait(self, **kwargs):
"""spawn server and return immediately
"""
kwargs['wait'] = False
return self.start(**kwargs)
@command
def no_daemon(self, **kwargs):
"""start a server interactively
"""
kwargs['dae
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/eggs/docutils-0.7-py2.7.egg/docutils/statemachine.py
|
Python
|
gpl-3.0
| 56,989
| 0.000544
|
# $Id: statemachine.py 6314 2010-04-26 10:04:17Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, i
|
n
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to
|
cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For example, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import sys
import re
import types
import unicodedata
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=0):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in self.states.values():
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, '\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>sys.stderr, ('\nStateMachine.run: bof transition')
context, result = state.bof(context)
results.extend(result)
while 1:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>sys.stderr, (
'\nStateMachine.run: line (source=%r, '
'offset=%r):\n| %s'
|
jokerdino/unity-tweak-tool
|
UnityTweakTool/section/appearance.py
|
Python
|
gpl-3.0
| 5,247
| 0.019821
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Team:
# J Phani Mahesh <phanimahesh@gmail.com>
# Barneedhar (jokerdino) <barneedhar@ubuntu.com>
# Amith KK <amithkumaran@gmail.com>
# Georgi Karavasilev <motorslav@gmail.com>
# Sam Tran <samvtran@gmail.com>
# Sam Hewitt <hewittsamuel@gmail.com>
# Angel Araya <al.arayaq@gmail.com>
#
# Description:
# A One-stop configuration tool for Unity.
#
# Legal Stuff:
#
# This file is a part of Unity Tweak Tool
#
# Unity Tweak Tool is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# Unity Tweak Tool is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <https://www.gnu.org/licenses/gpl-3.0.txt>
from UnityTweakTool.section.skeletonpage import Section,Tab
from UnityTweakTool.elements.fontbutton import FontButton
from UnityTweakTool.elements.cbox import ComboBox
from UnityTweakTool.elements.spin import SpinButton
from UnityTweakTool.elements.radio import Radio
from UnityTweakTool.elements.checkbox import CheckBox
from UnityTweakTool.section.spaghetti.theme import Themesettings as SpaghettiThemeSettings
from UnityTweakTool.elements.option import Option,HandlerObject
from collections import defaultdict
Appearance =Section(ui='appearance.ui',id='nb_themesettings')
#=============== THEME ==========================
#=============== ICONS ==========================
#=============== CURSOR =========================
#=============== FONTS ==========================
font_default= FontButton({
'id' : 'font_default',
'builder' : Appearance.builder,
'schema' : 'org.gnome.desktop.interface',
'path' : None,
'key' : 'font-name',
'type' : 'string'
})
font_document= FontButton({
'id' : 'font_document',
'builder' : Appearance.builder,
'schema' : 'org.gnome.desktop.interface',
'path' : None,
'key' : 'document-font-name',
'type' : 'string'
})
font_monospace= FontButton({
'id' : 'font_monospace',
'builder' : Appearance.builder,
'schema' : 'org.gnome.desktop.interface',
'path' : None,
'key' : 'monospace-font-name',
'type' : 'string'
})
font_window_title= FontButton({
'id' : 'font_window_title',
'builder' : Appearance.builder,
'schema' : 'org.gnome.desktop.wm.preferences',
'path' : None,
'key' : 'titlebar-font',
'type' : 'string'
})
cbox_antialiasing=ComboBox({
'id' : 'cbox_antialiasing',
'builder' : Appearance.builder,
'schema' : 'org.gnome.settings-daemon.plugins.xsettings',
'path' : None,
'key' : 'antialiasing',
'type' : 'string',
'map' : {'none':0,'grayscale':1,'rgba':2}
})
cbox_hinting=ComboBox({
'id' : 'cbox_hinting',
'builder' : Appearance.builder,
'schema' : 'org.gnome.settings-daemon.plugins.xsettings',
'path' : None,
'key' : 'hinting',
'type' : 'string',
'map' : {'none':0,'slight':1,'medium':2,'full':3}
})
spin_textscaling=SpinButton({
'id' : 'spin_textscaling',
'builder': Appearance.builder,
'schema' : 'org.gnome.desktop.interface',
'path' : None,
'key' : 'text-scaling-factor',
'type' : 'double',
'min' : 0.50,
'max' : 3.00
})
Fonts=Tab([font_default,
font_document,
font_monospace,
font_window_title,
cbox_antialiasing,
cbox_hinting,
spin_textscaling])
#========== WINDOW CONTROLS =====================
radio_left=Radio({
'id' : 'radio_left',
'builder' : Appearance.builder,
'schema' : 'org.gnome.desktop.wm.preferences',
'path' : None,
'key' : 'button-layout',
'type' : 'string',
'group' : 'radio_left',
'value' : 'close,minimize,maximize:',
'dependants': []
})
radio_right=Radio({
'id' : 'radio_right',
'builder' : Appearance.builder,
'schema' : 'org.gnome.desktop.wm.preferences',
'path' : None,
'key' : 'button-layout',
'type' : 'string',
'group' : 'radio_right',
'value' : ':minimize,maximize,close',
'dependants': []
})
WindowControls=Tab([radio_left,
|
radio_right])
# Pass in the id of restore defaults button to enable it.
Fonts.enable_restore('b_theme_font_reset')
WindowControls.enable_restore('b_window_control_reset')
# Each page must be added using add_page
Appearance.add_page(Fonts)
# XXX : Disabled since the implementation is inadequate
# Appearance.
|
add_page(WindowControls)
themesettings=HandlerObject(SpaghettiThemeSettings(Appearance.builder))
Appearance.add_page(themesettings)
# After all pages are added, the section needs to be registered to start listening for events
Appearance.register()
|
Yegorov/http-prompt
|
http_prompt/execution.py
|
Python
|
mit
| 11,589
| 0
|
import re
import click
import six
from httpie.context import Environment
from httpie.core import main as httpie_main
from parsimonious.exceptions import ParseError, VisitationError
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
from six import BytesIO
from six.moves.urllib.parse import urljoin
from .completion import ROOT_COMMANDS, ACTIONS, OPTION_NAMES, HEADER_NAMES
from .context import Context
from .utils import unescape
grammar = Grammar(r"""
command = mutation / immutation
mutation = concat_mut+ / nonconcat_mut
immutation = preview / action / help / exit / _
concat_mut = option_mut / full_quoted_mut / value_quoted_mut / unquoted_mut
nonconcat_mut = cd / rm
preview = _ tool _ (method _)? (urlpath _)? concat_mut*
action = _ method _ (urlpath _)? concat_mut*
urlpath = (~r"https?://" unquoted_string) / (!concat_mut string)
help = _ "help" _
exit = _ "exit" _
unquoted_mut = _ unquoted_mutkey mutop unquoted_mutval _
full_quoted_mut = full_squoted_mut / full_dquoted_mut
value_quoted_mut = value_squoted_mut / value_dquoted_mut
full_squoted_mut = _ "'" squoted_mutkey mutop squoted_mutval "'" _
full_dquoted_mut = _ '"' dquoted_mutkey mutop dquoted_mutval '"' _
value_squoted_mut = _ unquoted_mutkey mutop "'" squoted_mutval "'" _
value_dquoted_mut = _ unquoted_mutkey mutop '"' dquoted_mutval '"' _
mutop = ":" / "==" / "="
unquoted_mutkey = unquoted_mutkey_item+
unquoted_mutval = unquoted_stringitem*
unquoted_mutkey_item = unquoted_mutkey_char / escapeseq
unquoted_mutkey_char = ~r"[^\s'\"\\=:]"
squoted_mutkey = squoted_mutkey_item+
squoted_mutval = squoted_stringitem*
squoted_mutkey_item = squoted_mutkey_char / escapeseq
squoted_mutkey_char = ~r"[^\r\n'\\=:]"
dquoted_mutkey = dquoted_mutkey_item+
dquoted_mutval = dquoted_stringitem*
dquoted_mutkey_item = dquoted_mutkey_char / escapeseq
dquoted_mutkey_char = ~r'[^\r\n"\\=:]'
option_mut = flag_option_mut / value_option_mut
flag_option_mut = _ flag_optname _
flag_optname = "--json" / "-j" / "--form" / "-f" / "--verbose" / "-v" /
"--headers" / "-h" / "--body" / "-b" / "--stream" / "-S" /
"--download" / "-d" / "--continue" / "-c" / "--follow" /
"--check-status" / "--ignore-stdin" / "--help" /
"--version" / "--traceback" / "--debug"
value_option_mut = _ value_optname ~r"(\s+|=)" string _
value_optname = "--pretty" / "--style" / "-s" / "--print" / "-p" /
"--output" / "-o" / "--session" / "--session-read-only" /
"--auth" / "-a" / "--auth-type" / "--proxy" / "--verify" /
"--cert" / "--cert-key" / "--timeout"
cd = _ "cd" _ string _
rm = (_ "rm" _ "*" _) / (_ "rm" _ ~r"\-(h|q|b|o)" _ mutkey _)
tool = "httpie" / "curl"
method = ~r"get"i / ~r"head"i / ~r"post"i / ~r"put"i / ~r"delete"i /
~r"patch"i
mutkey = unquoted_mutkey / ("'" squoted_mutkey "'") /
('"' dquoted_mutkey '"') / flag_optname / value_optname
string = quoted_string / unquoted_string
quoted_string = ('"' dquoted_stringitem* '"') /
("'" squoted_stringitem* "'")
unquoted_string = unquoted_stringitem+
dquoted_stringitem = dquoted_stringchar / escapeseq
squoted_stringitem = squoted_stringchar / escapeseq
unquoted_stringitem = unquoted_stringchar / escapeseq
dquoted_stringchar = ~r'[^\r\n"\\]'
squoted_stringchar = ~r"[^\r\n'\\]"
unquoted_stringchar = ~r"[^\s'\"\\]"
escapeseq = ~r"\\."
_ = ~r"\s*"
""")
def urljoin2(base, path, **kwargs):
if not base.endswith('/'):
base += '/'
url = urljoin(base, path, **kwargs)
if url.endswith('/') and not path.endswith('/'):
url = url[:-1]
return url
def generate_help_text():
"""Return a formatted string listing commands, HTTPie options, and HTTP
actions.
"""
def generate_cmds_with_explanations(summary, cmds):
text = '{0}:\n'.format(summary)
for cmd, explanation in cmds:
text += '\t{0:<10}\t{1:<20}\n'.format(cmd, explanation)
return text + '\n'
text = generate_cmds_with_explanations('Commands', ROOT_COMMANDS.items())
text += generate_cmds_with_explanations('Options', OPTION_NAMES.items())
text += generate_cmds_with_explanations('Actions', ACTIONS.items())
text += generate_cmds_with_explanations('Headers', HEADER_NAMES.items())
return text
class ExecutionVisitor(NodeVisitor):
def __init__(self, context):
super(ExecutionVisitor, self).__init__()
self.context = context
self.context_override = Context(context.url)
self.method = None
self.tool = None
def visit_method(self, node, children):
self.method = node.text
return node
def visit_urlpath(self, node
|
, children):
path = node.text
self.context_override.url = urljoin2(self.context_override.url, path)
return node
def visit_cd(self, node, children):
_, _, _, path, _ = chi
|
ldren
self.context_override.url = urljoin2(self.context_override.url, path)
return node
def visit_rm(self, node, children):
children = children[0]
kind = children[3].text
if kind == '*':
# Clear context
for target in [self.context.headers,
self.context.querystring_params,
self.context.body_params,
self.context.options]:
target.clear()
return node
name = children[5]
if kind == '-h':
target = self.context.headers
elif kind == '-q':
target = self.context.querystring_params
elif kind == '-b':
target = self.context.body_params
else:
assert kind == '-o'
target = self.context.options
del target[name]
return node
def visit_help(self, node, children):
click.echo_via_pager(generate_help_text())
return node
def visit_exit(self, node, children):
self.context.should_exit = True
return node
def visit_mutkey(self, node, children):
if isinstance(children[0], list):
return children[0][1]
return children[0]
def _mutate(self, node, key, op, val):
if op == ':':
target = self.context_override.headers
elif op == '==':
target = self.context_override.querystring_params
elif op == '=':
target = self.context_override.body_params
target[key] = val
return node
def visit_unquoted_mut(self, node, children):
_, key, op, val, _ = children
return self._mutate(node, key, op, val)
def visit_full_squoted_mut(self, node, children):
_, _, key, op, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_full_dquoted_mut(self, node, children):
_, _, key, op, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_value_squoted_mut(self, node, children):
_, key, op, _, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_value_dquoted_mut(self, node, children):
_, key, op, _, val, _, _ = children
return self._mutate(node, key, op, val)
def visit_unquoted_mutkey(self, node, children):
return unescape(node.text)
def visit_squoted_mutkey(self, node, children):
return node.text
def visit_dquoted_mutkey(self, node, children):
return node.text
def visit_mutop(self, node, children):
return node.text
def visit_unquoted_mutval(self, node, children):
return unescape(node.text)
def visit_squoted_mutval(self, node, children):
return node.text
def visit_dquoted_mutval(self, node, children):
return node.text
def visit_flag_option_mut(self, node, children):
_, key, _ = children
self.context_override.optio
|
BernardFW/bernard
|
src/bernard/misc/start_project/files/src/__project_name_snake__/states.py
|
Python
|
agpl-3.0
| 1,539
| 0
|
# coding: utf-8
from bernard import (
layers as lyr,
)
from bernard.analytics import (
page_view,
)
from bernard.engine import (
BaseState,
)
from bernard.i18n import (
translate as t,
)
class __project_name_camel__State(BaseState):
"""
Root class for __project_name_readable__.
Here you must implement "error" and "confused" to suit your needs. They
are the default functions called when something goes wrong. The ERROR and
CONFUSED texts are defined in `i18n/en/responses.csv`.
"""
@page_view('/bot/error')
async def error(self) -> None:
"""
This happens when something goes wrong (it's the equivalent of the
HTTP error 500).
"""
self.send(lyr.Text(t.ERROR))
@page_view('/bot/confused')
async def confused(self) -> None:
"""
This is called when the user sends a message that triggers no
transitions.
"""
self.send(lyr.Text(t.CONFUSED))
async def handle(self) -> None:
raise NotImplementedError
class Hello(__project_name_camel__State):
"""
Example "Hello" state, to show you how it's done. You can remove it.
Please note the @page_view decorator that allows to track the viewing of
|
this page using
|
the analytics provider set in the configuration. If there
is no analytics provider, nothing special will happen and the handler
will be called as usual.
"""
@page_view('/bot/hello')
async def handle(self):
self.send(lyr.Text(t.HELLO))
|
andrasmaroy/pconf
|
tests/test_env.py
|
Python
|
mit
| 9,384
| 0.000639
|
from mock import MagicMock, mock_open, patch
from unittest import TestCase
from warnings import simplefilter
import pconf
from pconf.store.env import Env
TEST_ENV_BASE_VARS = {
"env__var": "result",
"env__var_2": "second_result",
}
TEST_ENV_MATCHED_VARS = {"matched_var": "match"}
TEST_ENV_WHITELIST_VARS = {"whitelisted_var": "whitelist"}
TEST_SEPARATED_VARS = {"env": {"var": "result", "var_2": "second_result"}}
TEST_ENV_VARS = dict(TEST_ENV_WHITELIST_VARS, **TEST_ENV_MATCHED_VARS)
TEST_SEPARATED_VARS = dict(TEST_SEPARATED_VARS, **TEST_ENV_VARS)
TEST_ENV_VARS = dict(TEST_ENV_VARS, **TEST_ENV_BASE_VARS)
TEST_ENV_CONVERTED = {
"env--var": "result",
"env--var-2": "second_result",
"matched-var": "match",
"whitelisted-var": "whitelist",
}
TEST_ENV_CONVERTED_SEPARATED = {
"env": {"var": "result", "var-2": "second_result"},
"matched-var": "match",
"whitelisted-var": "whitelist",
}
TEST_ENV_UPPERCASE = {
"ENV__VAR": "result",
"ENV__VAR_2": "second_result",
"MATCHED_VAR": "match",
"WHITELISTED_VAR": "whitelist",
}
TEST_ENV_TYPED_VARS = {
"key": "value",
"int": "123",
"float": "1.23",
"complex": "1+2j",
"list": "['list1', 'list2', {'dict_in_list': 'value'}]",
"dict": "{'nested_dict': 'nested_value'}",
"tuple": "(123, 'string')",
"bool": "True",
"boolstring": "false",
"string_with_specials": "Test!@#$%^&*()-_=+[]{};:,<.>/?\\'\"`~",
} # noqa: E501
TEST_ENV_TYPED_VARS_PARSED = {
"key": "value",
"int": 123,
"float": 1.23,
"complex": 1 + 2j,
"list": ["list1", "list2", {"dict_in_list": "value"}],
"dict": {"nested_dict": "nested_value"},
"tuple": (123, "string"),
"bool": True,
"boolstring": False,
"string_with_specials": "Test!@#$%^&*()-_=+[]{};:,<.>/?\\'\"`~",
} # noqa: E501
TEST_ENV_DOCKER_SECRETS = {"MY_EXAMPLE_SECRET_FILE": "/run/secrets/my_example_secret"}
TEST_ENV_DOCKER_SECRETS_INVALID_POSTFIX = {
"MY_EXAMPLE_SECRET": "/run/secrets/my_example_secret"
}
TEST_DOCKER_SECRET_CONTENT = "mysecret"
TEST_DOCKER_SECRETS_RESULT = {"MY_EXAMPLE_SECRET": TEST_DOCKER_SECRET_CONTENT}
TEST_SEPARATOR = "__"
TEST_MATCH = r"^matched"
TEST_WHITELIST = ["whitelisted_var", "whitelist2"]
TEST_PARSE_VALUES = True
TEST_TO_LOWER = True
TEST_CONVERT_UNDERSCORES = True
TEST_DOCKER_SECRETS = list(TEST_ENV_DOCKER_SECRETS.keys())
TEST_DOCKER_SECRETS_INVALID_POSTFIX = ["MY_EXAMPLE_SECRET"]
TEST_DOCKER_SECRETS_PATH = str(list(TEST_DOCKER_SECRETS_RESULT.values())[0])
MOCK_OPEN_FUNCTION = "builtins.open"
def throw_ioerror(*args, **kwargs):
raise IOError("test")
class TestEnv(TestCase):
def test_default_params(self):
env_store = Env()
self.assertEqual(env_store.separator, None)
self.assertEqual(env_store.match, None)
self.assertEqual(env_store.whitelist, None)
self.assertEqual(env_store.parse_values, False)
self.assertEqual(env_store.to_lower, False)
self.assertEqual(env_store.convert_underscores, False)
def test_optional_params(self):
env_store = Env(
separator=TEST_SEPARATOR,
match=TEST_MATCH,
whitelist=TEST_WHITELIST,
parse_values=TEST_PARSE_VALUES,
to_lower=TEST_TO_LOWER,
convert_underscores=TEST_CONVERT_UNDERSCORES,
)
self.assertEqual(env_store.separator, TEST_SEPARATOR)
self.assertEqual(env_store.match, TEST_MATCH)
self.assertEqual(env_store.whitelist, TEST_WHITELIST)
self.assertEqual(env_store.parse_values, TEST_PARSE_VALUES)
self.assertEqual(env_store.to_lower, TEST_TO_LOWER)
self.assertEqual(env_store.convert_underscores, TEST_CONVERT_UNDERSCORES)
@patch("pconf.store.env.os", new=MagicMock())
def test_get_all_vars(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env()
result = env_store.get()
self.assertEqual(result, TEST_ENV_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_get_idempotent(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env()
result = env_store.get()
self.assertEqual(result, TEST_ENV_VARS)
self.assertIsInstance(result, dict)
pconf.store.env.os.environ = TEST_ENV_BASE_VARS
result = env_store.get()
self.assertEqual(result, TEST_ENV_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_whitelist(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env(whitelist=TEST_WHITELIST)
result = env_store.get()
self.assertEqual(result, TEST_ENV_WHITELIST_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_match(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env(match=TEST_MATCH)
result = env_store.get()
self.assertEqual(result, TEST_ENV_MATCHED_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_whitelist_and_match(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env(match=TEST_MATCH, whitelist=TEST_WHITELIST)
result = env_store.get()
self.assertEqual(result, dict(TEST_ENV_MATCHED_VARS, **TEST_ENV_WHITELIST_VARS))
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_separator(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env(separator=TEST_SEPARATOR)
result = env_store.get()
self.assertEqual(result, TEST_SEPARATED_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_parse_values(self):
pconf.store.env.os.environ = TEST_ENV_TYPED_VARS
env_store = Env(parse_values=TEST_PARSE_VALUES)
result = env_store.get()
self.assertEqual(result, TEST_ENV_TYPED_VARS_PARSED)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_lowercase_conversion(self):
pconf.store.env.os.environ = TEST_ENV_UPPERCASE
env_store = Env(to_lower=TEST_TO_LOWER)
result = env_store.get()
self.assertEqual(result, TEST_ENV_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_lowercase_and_separator(self):
pconf.store.env.os.environ = TEST_ENV_UPPERCASE
env_store = Env(separator=TEST_SEPARATOR, to_lower=TEST_TO_LOWER)
result = env_store.get()
self.assertEqual(result, TEST_SEPARATED_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_convert_underscore_replacement(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env(convert_underscores=TEST_CONVERT_UNDERSCORES)
result = env_store.get()
self.assertEqual(result, TEST_ENV_CONVERTED)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_convert_underscore_and_separator(self):
pconf.store.env.os.environ = TEST_ENV_VARS
env_store = Env(
separator=TEST_SEPARATOR, convert_underscores=TEST_CONVERT_UNDERSCORES
)
result = env_store.get()
self.assertEqual(result, TEST_ENV_CONVERTED_SEPARATED)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", new=MagicMock())
def test_parse_and_split_order(self):
pconf.store.env.os.environ = TEST_ENV_VARS
try:
env_store = Env(separato
|
r=TEST_SEPARATOR, parse_values=TEST_PARSE_VALUES)
except AttributeError:
self.fail("Parsing environment variables raised AttributeError")
result = env_store.get()
self.assertEqual(result, TEST_SEPARATED_VARS)
self.assertIsInstance(result, dict)
@patch("pconf.store.env.os", n
|
ew=MagicMock())
@patch(MOCK_OPEN_FUNCTION, mock_open(read_data=TEST_DOCKER_SECRE
|
rolandwz/pymisc
|
utrader/strategy/bollingTrader.py
|
Python
|
mit
| 2,348
| 0.032794
|
# -*
|
- coding: utf-8 -*-
import datetime, time, csv, os
from utils.db import SqliteDB
from utils.rwlogging import log
from utils.rwlogging import strategyLogger as logs
from trader import Trader
from indicator import ma, macd, bolling, rsi, kdj
from strategy.pool import StrategyPool
highest = 0
def runStrategy(prices):
logs.info('STRATEGY,BUY TIMES, SELL TIMES, FINAL EQUITY')
#prices = SqliteDB().getAllPrices(table)
ps = [p['close'] for p in prices]
pool = StrategyPool(100)
#doB
|
ollingTrade(pool, prices, ps, 12, 2.4)
#pool.showStrategies()
#return
for i in range(2, 40):
j = 0
log.debug(i)
while j <= 5:
doBollingTrade(pool, prices, ps, i, j)
j += 0.1
pool.showStrategies()
def doBollingTrade(pool, prices, ps, period, deviate):
global highest
sname = 'BOLLING_' + str(period) + '_' + str(deviate)
bollings = bolling.calc_bolling(prices, period, deviate)
t = Trader(sname)
for i in range(period, len(prices)):
if ps[i-1] > bollings['lower'][i-1] and ps[i] < bollings['lower'][i] and t.bsflag < 1:
notes = 'LAST p: ' + str(ps[i - 1]) + ';boll lower: ' + str(bollings['lower'][i-1]) + 'CURRENT p: ' + str(ps[i]) + ';boll lower: ' + str(bollings['lower'][i])
t.buy(prices[i]['date'], prices[i]['time'], prices[i]['rmb'], notes)
if ps[i-1] < bollings['mean'][i-1] and ps[i] >= bollings['mean'][i] and t.bsflag == 1:
notes = 'LAST p: ' + str(ps[i - 1]) + ';boll mean: ' + str(bollings['mean'][i-1]) + 'CURRENT p: ' + str(ps[i]) + ';boll mean: ' + str(bollings['mean'][i])
t.buy(prices[i]['date'], prices[i]['time'], prices[i]['rmb'], notes)
if ps[i-1] < bollings['upper'][i-1] and ps[i] > bollings['upper'][i] and t.bsflag > -1:
notes = 'LAST p: ' + str(ps[i - 1]) + ';boll upper: ' + str(bollings['upper'][i-1]) + 'CURRENT p: ' + str(ps[i]) + ';boll upper: ' + str(bollings['upper'][i])
t.sell(prices[i]['date'], prices[i]['time'], prices[i]['rmb'], notes)
if ps[i-1] > bollings['mean'][i-1] and ps[i] <= bollings['mean'][i] and t.bsflag == -1:
notes = 'LAST p: ' + str(ps[i - 1]) + ';boll mean: ' + str(bollings['mean'][i-1]) + 'CURRENT p: ' + str(ps[i]) + ';boll mean: ' + str(bollings['mean'][i])
t.sell(prices[i]['date'], prices[i]['time'], prices[i]['rmb'], notes)
t.show(prices[i]['date'], prices[i]['time'], prices[i]['rmb'])
pool.estimate(t)
|
JoeGermuska/agate
|
agate/data_types/__init__.py
|
Python
|
mit
| 3,646
| 0.000274
|
#!/usr/bin/env python
"""
This module contains the :class:`.DataType` class and its subclasses. These
types define how data should be converted during the creation of a
:class:`.Table`.
A :class:`TypeTester` class is also included which be used to infer data
types from column data.
"""
from copy import copy
from agate.data_types.base import DEFAULT_NULL_VALUES, DataType # noqa
from agate.data_types.boolean import Boolean
from agate.data_types.date import Date
from agate.data_types.date_time import DateTime
from agate.data_types.number import Number
from agate.data_types.text import Text
from agate.data_types.time_delta import TimeDelta
from agate.exceptions import CastError # noqa
class TypeTester(object):
"""
Infer data types for the columns in a given set of data.
:param force:
A dictionary where each key is a column name and each value is a
:class:`.DataType` instance that overrides inference.
:param limit:
An optional limit on how many rows to evaluate before selecting the
most likely type. Note that applying a limit may mean errors arise when
the data is cast--if the guess is proved incorrect in further rows of
data.
:param types:
A sequence of possible types to test against. This be used to specify
what data formats you want to test against. For instance, you may want
to exclude :class:`TimeDelta` from testing. It can also be used to pass
options such as ``locale`` to :class:`.Number` or ``cast_nulls`` to
:class:`.Text`. Take care in specifying the order of the list. It is
the order they are tested in. :class:`.Text` should always be last.
"""
def __init__(self, force={}, limit=None, types=None):
self._force = force
self._limit = limit
if types:
self._possible_types = types
else:
# In order of preference
self._possible_types = [
Boolean(),
Number(),
TimeDelta(),
Date(),
DateTime(),
Text()
]
def run(self, rows, column_names):
"""
Apply type inference to the provided data and return an ar
|
ray of
column types.
:param rows:
The data as a sequence of any seque
|
nces: tuples, lists, etc.
"""
num_columns = len(column_names)
hypotheses = [set(self._possible_types) for i in range(num_columns)]
force_indices = [column_names.index(name) for name in self._force.keys()]
if self._limit:
sample_rows = rows[:self._limit]
elif self._limit == 0:
text = Text()
return tuple([text] * num_columns)
else:
sample_rows = rows
for row in sample_rows:
for i in range(num_columns):
if i in force_indices:
continue
h = hypotheses[i]
if len(h) == 1:
continue
for column_type in copy(h):
if len(row) > i and not column_type.test(row[i]):
h.remove(column_type)
column_types = []
for i in range(num_columns):
if i in force_indices:
column_types.append(self._force[column_names[i]])
continue
h = hypotheses[i]
# Select in prefer order
for t in self._possible_types:
if t in h:
column_types.append(t)
break
return tuple(column_types)
|
rhyolight/nupic.research
|
projects/thing_classification/l2l4l6_experiment.py
|
Python
|
gpl-3.0
| 8,785
| 0.006716
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This script creates simple experiment to compute the object classification
accuracy of L2-L4-L6 network using objects from YCB dataset and "Thing" sensor
"""
import glob
import json
import logging
import os
import random
from collections import defaultdict, OrderedDict
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from nupic.encoders import ScalarEncoder
from htmresearch.frameworks.location.location_network_creation import L246aNetwork
from htmresearch.support.expsuite import PyExperimentSuite
from htmresearch.support.register_regions import registerAllResearchRegions
logging.basicConfig(level=logging.ERROR)
def loadThingData(dataDir="data", n=150, w=11):
"""
Load Thing sensation data. There is one file per object, each row contains one
feature, location pairs. The format is as follows:
[(-33.6705, 75.5003, 2.4207)/10] => [[list of active bits of location],
[list of active bits of feature]]
The content before "=>" is the true 3D location / sensation
We ignore the encoded values after "=>" and use :class:`ScalarEncoder` to
encode the sensation in a way that is compatible with the experiment network.
:param dataDir: The location data files
:type dataDir: str
:param n: The number of bits in the feature SDR. Usually L4 column count
:type n: int
:param w: Number of 'on' bits in the feature SDR. Usually L4 sample size
:type w: int
:return: Dictionary mapping objects to sensations that can be used directly by
class L246aNetwork 'infer' and 'learn' methods
:rtype: dict[str,list]
"""
objects = defaultdict(list)
# Thing features are scalar values ranging from 1-25 inclusive
encoder = ScalarEncoder(n=n, w=w, minval=1, maxval=25, forced=True)
dataPath = os.path.dirname(os.path.realpath(__file__))
dataPath = os.path.join(dataPath, dataDir)
objFiles = glob.glob1(dataPath, "*.log")
for filename in objFiles:
obj, _ = os.path.splitext(filename)
# Read raw sensations from log file. Ignore SDRs after "=>"
sensations = []
with open(os.path.join(dataPath, filename)) as f:
for line in f.readlines():
# Parse raw location/feature values
line = line.split("=>")[0].translate(None, "[,]()")
locationStr, featureStr = line.split("/")
location = map(float, locationStr.split())
feature = encoder.encode(int(featureStr)).nonzero()[0].tolist()
sensations.append((location, feature))
# Assume single column
objects[obj] = [sensations]
return objects
class L2L4L6aExperiment(PyExperimentSuite):
"""
Compute the object classification accuracy of L2-L4-L6 network using objects
from YCB dataset and a single column "Thing" sensor.
"""
def reset(self, params, repetition):
"""
Take the steps necessary to reset the experiment before each repetition:
- Make sure random seed is different for each repetition
- Create the L2-L4-L6a network
- Load objects used by the experiment
- Learn all objects used by the experiment
"""
print params["name"], ":", repetition
self.debug = params.get("debug", False)
self.numLearningPoints = params["num_learning_points"]
self.numOfSensations = params["num_sensations"]
L2Params = json.loads('{' + params["l2_params"] + '}')
L4Params = json.loads('{' + params["l4_params"] + '}')
L6aParams = json.loads('{' + params["l6a_params"] + '}')
self.sdrSize = L2Params["sdrSize"]
# Make sure random seed is different for each repetition
seed = params.get("seed", 42)
np.random.seed(seed + repetition)
random.seed(seed + repetition)
L2Params["seed"] = seed + repetition
L4Params["seed"] = seed + repetition
L6aParams["seed"] = seed + repetition
# Configure L6a params
numModules = params["num_modules"]
L6aParams["scale"] = [params["scale"]] * numModules
angle = params["angle"] / numModules
orientation = range(angle / 2, angle * numModules, angle)
L6aParams["orientation"] = np.radians(orientation).tolist()
L6aParams["cellsPerAxis"] = params["cells_per_axis"]
# Create single column L2-L4-L6a network
self.network = L246aNetwork(numColumns=1, L2Params=L2Params,
L4Params=L4Params, L6aParams=L6aParams,
repeat=self.numLearningPoints,
logCalls=self.debug)
# Load Thing Objects
sampleSize = L4Params["sampleSize"]
columnCount = L4Params["columnCount"]
# Make sure w is odd per encoder requirement
sampleSize = sampleSize if sampleSize % 2 != 0 else sampleSize + 1
self.objects = loadThingData(dataDir=params["data_path"],
w=sampleSize, n=columnCount)
# Number of iterations must match the number of objects. This will allow us
# to execute one iteration per object and use the "iteration" parameter as
# the object index
assert params["iterations"] == len(self.objects)
# Learn objects
self.network.learn(self.objects)
def iterate(self, params, repetition, iteration):
"""
For each iteration try to infer the object represented by the 'iteration'
parameter returning Whether or not the object was unambiguously classified.
:param params: Specific parameters for this iteration. See 'experiments.cfg'
for list of parameters
:param repetition: Current repetition
:param iteration: Use the iteration to select the object to infer
:return: Whether or not the object was classified
"""
objname, sensations = self.objects.item
|
s()[iteration]
# Select sensations to infer
np.random.shuffle(sensations[0])
sensations = [sensations[0][:self.numOfSensations]]
self.network.sendReset()
# Collect all statistics for every inference.
# See L246aNetwork._updateInfer
|
enceStats
stats = defaultdict(list)
self.network.infer(sensations=sensations, stats=stats, objname=objname)
stats.update({"name": objname})
return stats
def plotAccuracy(suite, name):
"""
Plots classification accuracy
"""
path = suite.cfgparser.get(name, "path")
path = os.path.join(path, name)
accuracy = defaultdict(list)
sensations = defaultdict(list)
for exp in suite.get_exps(path=path):
params = suite.get_params(exp)
maxTouches = params["num_sensations"]
cells = params["cells_per_axis"]
res = suite.get_history(exp, 0, "Correct classification")
classified = [any(x) for x in res]
accuracy[cells] = float(sum(classified)) / float(len(classified))
touches = [np.argmax(x) or maxTouches for x in res]
sensations[cells] = [np.mean(touches), np.max(touches)]
plt.title("Classification Accuracy")
accuracy = OrderedDict(sorted(accuracy.items(), key=lambda t: t[0]))
fig, ax1 = plt.subplots()
ax1.plot(accuracy.keys(), accuracy.values(), "b")
ax1.set_xlabel("Cells per axis")
ax1.set_ylabel("Accuracy", color="b")
ax1.tick_params("y", colors="b")
sensations = OrderedDict(sorted(sensations.items(), key=lambda t: t[0]))
ax2 = ax1.twinx()
ax2.set_prop_cycle(lines
|
SohKai/ChronoLogger
|
web/flask/lib/python2.7/site-packages/sqlalchemy/util/_collections.py
|
Python
|
mit
| 25,079
| 0.001156
|
# util/_collections.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Collection classes and helpers."""
import sys
import itertools
import weakref
import operator
from langhelpers import symbol
from compat import time_func, threading
EMPTY_SET = frozenset()
class NamedTuple(tuple):
"""tuple() subclass that adds labeled names.
Is also pickleable.
"""
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
if labels:
t.__dict__.update(zip(labels, vals))
t._labels = labels
return t
def keys(self):
return [l for l in self._labels if l is not None]
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = \
update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self), )
def union(self, d):
if not self:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
"""Provide a __getattr__/__setattr__ interface over a dict."""
def __init__(self, data):
self.__dict__['_data'] = data
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.itervalues()
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, object):
self._data[key] = object
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, object):
self._data[key] = object
def __getstate__(self):
return {'_data': self.__dict__['_data']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
"""Return an immutable proxy for this :class:`.Properties`."""
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return self._data.keys()
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
"""Provide immutable dict/object attribute to an underlying dictionary."""
class OrderedDict(dict):
"""A dict that returns keys/values/items in the order they were added."""
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, 'keys'):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def values(self):
return [self[key] for key in self._list]
def itervalues(self):
return iter([self[key] for key in self._list])
def keys(self):
return list(self._list)
def iterkeys(self):
return iter(self.keys())
def items(self):
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, object):
if key not in self:
|
try:
self._list.append(key)
|
except AttributeError:
# work around Python pickle loads() with
# dict subclass (seems to ignore __setstate__?)
self._list = [key]
dict.__setitem__(self, key, object)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key, *default):
present = key in self
value = dict.pop(self, key, *default)
if present:
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
class OrderedSet(set):
def __init__(self, d=None):
set.__init__(self)
self._list = []
if d is not None:
self.update(d)
def add(self, element):
if element not in self:
self._list.append(element)
set.add(self, element)
def remove(self, element):
set.remove(self, element)
self._list.remove(element)
def insert(self, pos, element):
if element not in self:
self._list.insert(pos, element)
set.add(self, element)
def discard(self, element):
if element in self:
self._list.remove(element)
set.remove(self, element)
def clear(self):
set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
for e in iterable:
if e not in self:
self._list.append(e)
set.add(self, e)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = set(other)
return self.__class__(a for a in self if a in other)
__and__ = intersection
def symmetric_difference(self, other):
other = set(other)
result = self.__class__(a for a in self if a not in other)
result.update(a for a in other if a not in self)
return result
__xor__ = symmetric_difference
def difference(self, other):
other = set(other)
return self.__class__(a for a in self if a not in other)
__sub__ = difference
def intersection_update(self, other):
other = set(other)
set.intersection_update(self, other)
self._list = [ a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
set.symmetric_difference_update(self
|
pankajp/pysmoke
|
pysmoke/tests/test_marshal.py
|
Python
|
mit
| 987
| 0.005066
|
from __future__ import print_function, absolute_import
import random
import unittest
from pysmoke impor
|
t marshal
from pysmoke.smoke import ffi, Type, TypedValue, pystring, smokec, not_implemented, charp, dbg
from pysmoke import QtCore, QtGui
qtcore = QtCore.__binding__
qtgui
|
= QtGui.__binding__
class MarshalTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_qstring(self):
qstr = marshal.QString.from_py('aqstring')
print(qstr)
pstr = marshal.QString.to_py(qstr)
#dbg()
self.assertEqual(pstr, 'aqstring')
import gc; gc.collect()
qstr2 = marshal.QString.from_py(pstr)
print('QS:', qstr, pstr, qstr2, marshal.QString.to_py(qstr))
obj = QtGui.QObject()
print('obj', obj.__cval__.value.s_voidp)
obj.setObjectName('my_object')
self.assertEqual(obj.objectName(), 'my_object')
if __name__ == '__main__':
unittest.main()
|
btbuxton/python-pomodoro
|
research/multi_recv.py
|
Python
|
mit
| 864
| 0.002315
|
#http://pymotw.com/2/socket/multicast.html
import socket
import struct
import sys
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
# Receive
|
/respond loop
while True:
print >>sys.stderr, '\nwaiting to receive message'
data, address = sock.recvfrom(1024)
print >>sys.stderr, 'received %s bytes from %s' % (len(data), a
|
ddress)
print >>sys.stderr, data
print >>sys.stderr, 'sending acknowledgement to', address
sock.sendto('ack', address)
|
verityrise/Canine_Analysis
|
integrate_genome.py
|
Python
|
epl-1.0
| 753
| 0.01992
|
#!/usr/bin/python
'''
This programs is to integrate dog reference genome from chr to a single one.
Author: Hongzhi Luo
'''
import gzip
import glob
import shutil
path='/vlsci/LSC0007/shared/canine_alport_syndrome/ref_files/'
#path=''
prefix='cfa_ref_CanFam3.1'
def integrate_genome():
'''
@param: num: chr1...chrMT in the list.
'''
files=sorted(glob.glob(path+prefix+"*.fa.gz"))
#cat_together=[]
for f in file
|
s:
#cat_together.append(f)
#for files in cat_together:
outfile=gzip.open(path+prefix+".fa.gz",'wb')
for f in files:
gfile=gzip.open(f)
outfile.write(gfile.read())
gfile.close()
outfile.close()
if __name__ == '__main__':
integra
|
te_genome()
|
mwreuter/arcade
|
experimental/a_quick_test5.py
|
Python
|
mit
| 4,042
| 0.000247
|
"""
This example uses OpenGL via Pyglet and draws
a bunch of rectangles on the screen.
"""
import random
import time
import pyglet.gl as GL
import pyglet
import ctypes
# Set up the constants
SCREEN_WIDTH = 700
SCREEN_HEIGHT = 500
RECT_WIDTH = 50
RECT_HEIGHT = 50
class Shape():
def __init__(self):
self.x = 0
self.y = 0
class VertexBuffer():
""" Class to hold vertex buffer info. """
def __init__(self, vbo_id, size):
self.vbo_id = vbo_id
self.size = size
def add_rect(rect_list, x, y, width, height, color):
""" Create a vertex buffer for a rectangle. """
rect_list.extend([-width / 2, -height / 2,
width / 2, -height / 2,
width / 2, height / 2,
-width / 2, height / 2])
def create_vbo_for_rects(v2f):
vbo_id = GL.GLuint()
GL.glGenBuffers(1, ctypes.pointer(vbo_id))
data2 = (GL.GLfloat*len(v2f))(*v2f)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, vbo_id)
GL.glBufferData(GL.GL_ARRAY_BUFFER, ctypes.sizeof(data2), data2,
GL.GL_STATIC_DRAW)
shape = VertexBuffer(vbo_id, len(v2f)//2)
return shape
def render_rect_filled(shape, x, y):
""" Render the shape at the right spot. """
# Set color
GL.glDisable(GL.GL_BLEND)
GL.glColor4ub(shape.color[0], shape.color[1], shape.color[2], 255)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, shape.vbo_id)
GL.glVertexPointer(2, GL.GL_FLOAT, 0, 0)
GL.glLoadIdentity()
GL.glTranslatef(x + shape.width / 2, y + shape.height / 2, 0)
GL.glDrawArrays(GL.GL_QUADS, 0, shape.size)
class MyApplication():
""" Main application class. """
def setup(self):
""" Set up the game and initialize the variables. """
# Set background to white
GL.glClearColor(1, 1, 1, 1)
self.rect_list = []
self.shape_list = []
for i in range(2000):
x = random.randrange(0, SCREEN_WIDTH)
y = random.randrange(0, SCREEN_HEIGHT)
width = random.randrange(20, 71)
height = random.randrange(20, 71)
d_x = random.randrange(-3, 4)
d_y = random.randrange(-3, 4)
red = random.randrange(256)
blue = random.randrange(256)
green = random.randrange(256)
alpha = random.randrange(256)
color = (red, blue, green, alpha)
shape = Shape()
shape.x = x
shape.y = y
self.shape_list.append(shape)
add_rect(self.rect_list, 0, 0, width, height, color)
print("Creating vbo for {} vertices.".format(len(self.rect_list) // 2))
self.rect_vbo = create_vbo_for_rects(self.rect_list)
print("VBO {}".format(self.rect_vbo.vbo_id))
def animate(self, dt):
""" Move everything """
pass
def on_draw(self):
"""
Render the screen.
"""
start = time.time()
float_size = ctypes.sizeof(ctypes.c_float)
record_len = 10 * float_size
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glColor4ub(255, 0, 0, 255)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.rect_vbo.vbo_id)
GL.glVertexPointer(2, GL.GL_FLOAT, record_len, 0)
for i in range(len(self.shape_list)):
shape = self.shape_list[i]
GL.glLoadIdentity()
GL.
|
glTranslatef(shape.x, shape.y, 0)
GL.glDrawArrays(GL.GL_QUADS, i * 8, 8)
# GL.glDrawArrays(GL.GL_QUADS,
# 0,
# self.rect_vbo.size)
elapsed = time.time() - start
print(elapsed)
def main():
window = pyglet.window.Window(SCREEN_WIDTH, SCREEN_HEIGHT)
app = MyApplication()
app.setup()
pyglet.clock.schedule_interval(app.animate, 1/60)
@window.event
def on_draw():
|
window.clear()
app.on_draw()
pyglet.app.run()
main()
|
catapult-project/catapult
|
third_party/gsutil/gslib/vendored/boto/boto/cloudformation/stack.py
|
Python
|
bsd-3-clause
| 14,230
| 0.001195
|
from datetime import datetime
from boto.resultset import ResultSet
class Stack(object):
def __init__(self, connection=None):
self.connection = connection
self.creation_time = None
self.description = None
self.disable_rollback = None
self.notification_arns = []
self.outputs = []
self.parameters = []
self.capabilities = []
self.tags = []
self.stack_id = None
self.stack_status = None
self.stack_status_reason = None
self.stack_name = None
self.timeout_in_minutes = None
@property
def stack_name_reason(self):
return self.stack_status_reason
@stack_name_reason.setter
def stack_name_reason(self, value):
self.stack_status_reason = value
def startElement(self, name, attrs, connection):
if name == "Parameters":
self.parameters = ResultSet([('member', Parameter)])
return self.parameters
elif name == "Outputs":
self.outputs = ResultSet([('member', Output)])
return self.outputs
elif name == "Capabilities":
self.capabilities = ResultSet([('member', Capability)])
return self.capabilities
elif name == "Tags":
self.tags = Tag()
return self.tags
elif name == 'NotificationARNs':
self.notification_arns = ResultSet([('member', NotificationARN)])
return self.notification_arns
else:
return None
def endElement(self, name, value, connection):
if name == 'CreationTime':
try:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == "Description":
self.description = value
elif name == "DisableRollback":
if str(value).lower() == 'true':
self.disable_rollback = True
else:
self.disable_rollback = False
elif name == 'StackId':
self.stack_id = value
elif name == 'StackName':
self.stack_name = value
elif name == 'StackStatus':
self.stack_status = value
elif name == "StackStatusReason":
self.stack_status_reason = value
elif name == "TimeoutInMinutes":
self.timeout_in_minutes = int(value)
elif name == "member":
pass
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_stack(stack_name_or_id=self.stack_id)
def describe_events(self, next_token=None):
return self.connection.describe_stack_events(
stack_name_or_id=self.stack_id,
next_token=next_token
)
def describe_resource(self, logical_resource_id):
return self.connection.describe_stack_resource(
stack_name_or_id=self.stack_id,
logical_resource_id=logical_resource_id
)
def describe_resources(self, logical_resource_id=None,
physical_resource_id=None):
return self.connection.describe_stack_resources(
stack_name_or_id=self.stack_id,
logical_resource_id=logical_resource_id,
physical_resource_id=physical_resource_id
)
def list_resources(self, next_token=None):
return self.connection.list_stack_resources(
stack_name_or_id=self.stack_id,
next_token=next_token
)
def update(self):
rs = self.connection.describe_stacks(self.stack_id)
if len(rs) == 1 and rs[0].stack_id == self.stack_id:
self.__dict__.update(rs[0].__dict__)
else:
raise ValueError("%s is not a valid Stack ID or Name" %
self.stack_id)
def get_template(self):
return self.connection.get_template(stack_name_or_id=self.stack_id)
def get_policy(self):
"""
Returns the stack policy for this stack. If it has no policy
then, a null value is returned.
"""
return self.connection.get_stack_policy(self.stack_id)
def set_policy(self, stack_policy_body=None, stack_policy_url=None):
"""
Sets a stack policy for this stack.
:type stack_policy_body: string
:param stack_policy_body: Structure containing the stack policy body.
(For more information, go to ` Prevent Updates to Stack Resources`_
in the AWS CloudFormation User Guide.)
You must pass `StackPolicyBody` or `StackPolicyURL`. If both are
passed, only `StackPolicyBody` is used.
:type stack_policy_url: string
:param stack_policy_url: Location of a file containing the stack
policy. The URL must point to a policy (max size: 16KB) located in
an S3 bucket in the same region as the stack. You must pass
`StackPolicyBody` or `StackPolicyURL`. If both are passed, only
`StackPolicyBody` is used.
"""
return self.connection.set_stack_policy(self.stack_id,
stack_policy_body=stack_policy_body,
stack_policy_url=stack_policy_url)
class StackSummary(object):
def __init__(self, connection=None):
self.connection = connection
self.stack_id = None
self.stack_status = None
self.stack_name = None
self.creation_time = None
self.deletion_time = None
self.template_description = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'StackId':
self.stack_id = value
elif name == 'StackStatus':
self.stack_status = value
elif name == 'StackName':
self.stack_name = value
elif name == 'CreationTime':
try:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == "DeletionTime":
try:
self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == 'TemplateDescription':
self.template_description = value
elif name == "member":
pass
else:
setattr(self, name, value)
class Parameter(object):
def __init__(self, connection=None):
self.connection = None
self.key = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "ParameterKey":
self.key = value
elif name == "ParameterValue":
self.value = value
else:
setattr(self, name, value)
def __repr__(self):
return "Parameter:\"%s\"=\"%s\"" % (self.key, self.value)
class Output(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.key = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
elif name == "OutputKey":
self.key = value
elif name == "OutputValue":
self.value = value
else:
setattr(self, name, value)
def __repr__(self):
return "Output:\"%s\"=\"%s
|
\"" % (self.key, self.value)
class Capability(object):
def __init__(self, connection=None):
self.connection = None
self.value = Non
|
e
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
self.value = value
def __repr__(self):
return "Capability:\"%s\
|
CALlanoR/virtual_environments
|
web/api_rest/mini_facebook/python_users_relationships_service_api_llano/manual_tests/consumer.py
|
Python
|
apache-2.0
| 1,939
| 0.009799
|
import json
import datetime
import http.client
from time import time
########################################################################################################################
##################################################### ENVIRONMENTS #####################################################
########################################################################################################################
#local
conn = http.client.HTTPConnection("localhost:5000")
#container
# conn = http.client.HTTPConnection("localhost:5000")
########################################################################################################################
######################################################## USERS #########################################################
########################################################################################################################
headers = {
'Content-type': 'application/json'
}
#Create person
# create_person_post = {
# 'id': 3,
# 'name': 'Carlos',
# 'email': 'carlos@gmail.com',
# 'login': 'llano',
# 'password': '123456'
# }
# json_data_post = json.dumps(create_person_post)
# conn.request("POST", "/persons/", json_data_post, headers=headers)
#Friends of a person
#conn.request("GET", "/persons/
|
1/friends", headers=headers)
#Friends of the friends of a person
#conn.request("GET", "/persons/0/mayYouKnow", headers=headers)
#Add a new relationship
conn.request("POST", "/persons/person1/3/person2/4", headers=headers)
#Delete a relationship
# conn.request
|
("POST", "/persons/delete/person1/3/person2/4", headers=headers)
start = datetime.datetime.now()
res = conn.getresponse()
end = datetime.datetime.now()
data = res.read()
elapsed = end - start
print(data.decode("utf-8"))
print("\"" + str(res.status) + "\"")
print("\"" + str(res.reason) + "\"")
print("\"elapsed seconds: " + str(elapsed) + "\"")
|
cmutel/pandarus
|
tests/test_maps.py
|
Python
|
bsd-3-clause
| 3,212
| 0.001868
|
from pandarus.maps import Map, DuplicateFieldID
from rtree import Rtree
import fiona
import os
import pandarus
import pytest
dirpath = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
grid = os.path.join(dirpath, "grid.geojson")
duplicates = os.path.join(dirpath, "duplicates.geojson")
raster = os.path.join(dirpath, "test_raster_cfs.tif")
countries = os.path.join(dirpath, "test_countries.gpkg")
def test_init():
m = Map(grid, 'name')
assert m.filepath == grid
assert m.file
def test_raster_error(monkeypatch):
with pytest.raises(AssertionError):
m = Map(raster, None)
def test_metadata(monkeypatch):
m = Map(grid, 'name')
assert m.metadata == {}
def fake_open(filepath, **others):
return others
monkeypatch.setattr(
pandarus.maps,
'check_type',
lambda x: 'vector'
)
monkeypatch.setattr(
pandarus.maps.fiona,
'open',
fake_open
)
m = Map(grid, 'name', foo='bar')
assert m.metadata == {'foo': 'bar'}
assert m.file == {'foo': 'bar'}
def test_get_fieldnames_dictionary():
m = Map(grid, 'name')
expected = {0: 'grid cell 0', 1: 'grid cell 1',
2: 'grid cell 2', 3: 'grid cell 3'}
assert m.get_fieldnames_dictionary("name") == expected
def test_get_fieldnames_dictionary_errors():
m = Map(grid, 'name')
assert m.get_fieldnames_dictionary()
assert m.get_fieldnames_dictionary(None)
assert m.get_fieldnames_dictionary("")
with pytest.raises(AssertionError):
m.get_fieldnames_dictionary("bar")
dupes = Map(duplicates, 'name')
with pytest.raises(DuplicateFieldID):
dupes.get_fieldnames_dictionary()
def test_properties():
m = Map(grid, 'name')
assert m.geometry == 'Polygon'
assert m.hash
assert m.crs == '+init=epsg:4326'
def test_magic_methods():
m = Map(grid, 'name')
for i, x in enumerate(m):
pass
assert i == 3
expected = {
'geometry': {
'type': 'Polygon',
'coordinates': [[(1.0, 0.0), (1.0, 1.0), (2.0, 1.0), (2.0, 0.0), (1.0, 0.0)]]
},
'properties': {'name': 'grid cell 2'},
'id': '2',
'type': 'Feature'
}
assert m[2] == expected
assert len(m) == 4
def test_getitem():
print("Supported Fiona drivers:")
print(fiona.supported_drivers)
m = Map(grid, 'name')
expected = {
'geometry': {
'type': 'Polygon',
'coordinates': [[(1.0, 0.0), (1.
|
0, 1.0), (2.0, 1.0), (2.0, 0.0), (1.0, 0.0)]]
},
'properties': {'name': 'grid cell 2'},
'id': '2',
|
'type': 'Feature'
}
assert m[2] == expected
assert hasattr(m, "_index_map")
@pytest.mark.skipif('TRAVIS' in os.environ,
reason="No GPKG driver in Travis")
def test_getitem_geopackage():
print("Supported Fiona drivers:")
print(fiona.supported_drivers)
m = Map(countries, 'name')
assert m[0]
assert m[0]['id'] == '1'
assert hasattr(m, "_index_map")
def test_rtree():
m = Map(grid, 'name')
r = m.create_rtree_index()
assert r == m.rtree_index
assert isinstance(r, Rtree)
|
wdbm/datavision
|
datavision_examples_histograms_1.py
|
Python
|
gpl-3.0
| 1,130
| 0.040708
|
#!/usr/bin/env python
import numpy
import datavision
def main():
print("\ngenerate two arrays of data")
a = numpy.random.normal(2, 2, size = 120)
b = numpy.random.normal(2, 2, size = 120)
print("\narray 1:\n{array_1}\n\narray 2:\n{array_2}".format(
array_1 = a,
array_2 = b
))
filename = "histogram_1.png"
print("\nsave histogram of array 1 to {filename}".format(
filename = filename
))
datavision.save_histogram_matplotlib(
a,
filename = filename,
color_fill = "#000000"
)
filename = "histogram_comparison_1.png"
print("\nsave histogram comparison of array 1 and array 2 to {filename}".format(
filename = filename
))
datavision.save_histogram_comparison_matplotlib(
values_1 = a,
values_2 = b,
label_1 = "a",
label_2 = "b",
normalize = True,
label_ratio_x = "measurement",
label_y
|
= "",
title = "comparison of a and b",
filename = filename
)
if __name__ == "__main_
|
_":
main()
|
dajohnso/cfme_tests
|
cfme/containers/template.py
|
Python
|
gpl-2.0
| 3,341
| 0.001497
|
# -*- coding: utf-8 -*-
import random
import itertools
from functools import partial
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic_manageiq import Table
from cfme.base.ui import BaseLoggedInPage
from cfme.common import SummaryMixin, Taggable
from cfme.containers.provider import L
|
abelable
from cfme.fixtures import pytest_selenium as sel
from cfme.web_ui import toolbar as tb, match_location,\
PagedTable, CheckboxTable
from .provider import details_page
from utils.appliance import Navigatable
from utils.appliance.implementations.ui import navigator, CFMENavigateSte
|
p,\
navigate_to
list_tbl = CheckboxTable(table_locator="//div[@id='list_grid']//table")
paged_tbl = PagedTable(table_locator="//div[@id='list_grid']//table")
match_page = partial(match_location, controller='container_templates', title='Container Templates')
class Template(Taggable, Labelable, SummaryMixin, Navigatable):
PLURAL = 'Templates'
def __init__(self, name, project_name, provider, appliance=None):
self.name = name
self.project_name = project_name
self.provider = provider
Navigatable.__init__(self, appliance=appliance)
def load_details(self, refresh=False):
navigate_to(self, 'Details')
if refresh:
tb.refresh()
def click_element(self, *ident):
self.load_details(refresh=True)
return sel.click(details_page.infoblock.element(*ident))
def get_detail(self, *ident):
""" Gets details from the details infoblock
Args:
*ident: An InfoBlock title, followed by the Key name, e.g. "Relationships", "Images"
Returns: A string representing the contents of the InfoBlock's value.
"""
self.load_details(refresh=True)
return details_page.infoblock.text(*ident)
@classmethod
def get_random_instances(cls, provider, count=1, appliance=None):
"""Generating random instances."""
template_list = provider.mgmt.list_template()
random.shuffle(template_list)
return [cls(obj.name, obj.project_name, provider, appliance=appliance)
for obj in itertools.islice(template_list, count)]
class TemplateAllView(BaseLoggedInPage):
table = Table(locator="//div[@id='list_grid']//table")
@property
def is_displayed(self):
return match_page(summary='Container Templates')
@navigator.register(Template, 'All')
class All(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
VIEW = TemplateAllView
def step(self):
self.prerequisite_view.navigation.select('Compute', 'Containers', 'Container Templates')
def resetter(self):
# Reset view and selection
tb.select("List View")
from cfme.web_ui import paginator
paginator.check_all()
paginator.uncheck_all()
@navigator.register(Template, 'Details')
class Details(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def am_i_here(self):
return match_page(summary='{} (Summary)'.format(self.obj.name))
def step(self):
tb.select('List View')
sel.click(paged_tbl.find_row_by_cell_on_all_pages({'Name': self.obj.name,
'Project Name': self.obj.project_name}))
|
nicfit/eyed3
|
tests/test_jsonyaml_plugin.py
|
Python
|
gpl-2.0
| 1,938
| 0.001032
|
import os
import sys
import stat
from eyed3 import main, version
from . import RedirectStdStreams
def _initTag(afile):
afile.initTag()
afile.tag.artist = "Bad Religion"
afile.tag.title = "Suffer"
afile.tag.album = "Suffer"
afile.tag.release_date = "1988"
afile.tag.recording_date = "1987"
afile.tag.track_num = (9, 15)
afile.tag.save()
def _runPlugin(afile, plugin) -> str:
with RedirectStdStreams() as plugin_out:
args, _, config = main.parseCommandLine(["-P", plugin, str(afile.path)])
assert main.main(args, config) == 0
stdout = plugin_out.stdout.read().strip()
print(stdout)
return stdout
def _assertFormat(plugin: str, audio_file, format: str):
output = _runPlugin(audio_file, plugin)
print(output)
size_bytes = os.stat(audio_file.path)[stat.ST_SIZE]
assert output.strip() == format.strip()
|
% dict(path=audio_file.path, version=version,
size_bytes=size_bytes)
def testJsonPlugin(audiofile):
_initTag(audiofile)
_assertFormat("json", audiofile, """
{
"path": "%(path)s",
"
|
info": {
"time_secs": 10.68,
"size_bytes": %(size_bytes)d
},
"album": "Suffer",
"artist": "Bad Religion",
"best_release_date": "1988",
"recording_date": "1987",
"release_date": "1988",
"title": "Suffer",
"track_num": {
"count": 9,
"total": 15
},
"_eyeD3": "%(version)s"
}
""")
def testYamlPlugin(audiofile):
_initTag(audiofile)
omap, omap_list = "", " "
if sys.version_info[:2] <= (3, 7):
omap = " !!omap"
omap_list = "- "
_assertFormat("yaml", audiofile, f"""
---
_eyeD3: %(version)s
album: Suffer
artist: Bad Religion
best_release_date: '1988'
info:
size_bytes: %(size_bytes)d
time_secs: 10.68
path: %(path)s
recording_date: '1987'
release_date: '1988'
title: Suffer
track_num:{omap}
{omap_list}count: 9
{omap_list}total: 15
""")
|
bittner/django-allauth
|
allauth/socialaccount/providers/jupyterhub/tests.py
|
Python
|
mit
| 614
| 0
|
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import JupyterHubProv
|
ider
class JupyterHubTests(OAuth2TestsMixin, TestCase):
provider_id = JupyterHubProvider.id
def get_mocked_response(self):
return MockedResponse(200, """
{
"kind": "u
|
ser",
"name": "abc",
"admin": false,
"groups": [],
"server": null,
"pending": null,
"created": "2016-12-06T18:30:50.297567Z",
"last_activity": "2017-02-07T17:29:36.470236Z",
"servers": null}
""")
|
VanceKingSaxbeA/MarketsEngine
|
engine.py
|
Python
|
mit
| 3,899
| 0.020518
|
/*Owner & Copyrights: Vance King Saxbe. A.*/""" Copyright (c) <2014> Author Vance King Saxbe. A, and contributors Power Dominion Enterprise, Precieux Consulting and other contributors. Modelled, Architected and designed by Vance King Saxbe. A. with the geeks from GoldSax Consulting and GoldSax Technologies email @vsaxbe@yahoo.com. Development teams from Power Dominion Enterprise, Precieux Consulting. Project sponsored by GoldSax Foundation, GoldSax Group and executed by GoldSax Manager."""#!/usr/bin/env python3
import _thread
import os
import sys
import time
import gc
from src import googlequotemachine
from src import yahooquotemachine
from src import bloombergquotemachine
from src import createtablesgooglefinance
from src import createtablesyahoofinance
from src import createtablesbloomberg
from time import localtime, strftime
start1 = []
sys.setrecursionlimit(1000000)
database = "data/"
markettime = {}
with open("conf/MarketTimings.conf") as fillees:
mlist = fillees.read().splitlines()
fillees.close()
for line in mlist:
items = line.split(", ")
key, values = items[0], items[1]
markettime[key] = values
with open('conf/symbolname.conf') as fille:
synamelist = fille.read().splitlines()
fille.close()
timetorun = 1800
cycle = 1
while("TRUE"):
with open('conf/urls.conf') as openedfile:
fileaslist = openedfile.read().splitlines()
openedfile.close()
a_lock = _thread.allocate_lock()
thr = []
with a_lock:
print("locks placed and Market engine is running for the...", cycle)
for lines in fileaslist:
lisj = lines.split('", "')
m
|
time = markettime[lisj[2].replace('"','')]
mktime = mtime.split("-")
i
|
f mktime[1] < mktime[0]:
righto = mktime[1].split(":")
close = str(str(int(righto[0])+24)+":"+righto[1])
else:
close = mktime[1]
rightnow = strftime("%H:%M", localtime())
if rightnow < strftime("04:00"):
right = rightnow.split(":")
rightnow = str(str(int(right[0])+24)+":"+right[1])
if (close > rightnow > mktime[0]):
print("Market ", lisj[2].replace('.db"',''), " is starting at cycle ", cycle)
if lisj[1] =='g':
thr.append(_thread.start_new_thread(googlequotemachine.actionking, (a_lock, start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,synamelist,1,0,timetorun) ))
elif lisj[1] =='y':
thr.append(_thread.start_new_thread(yahooquotemachine.actionking, (a_lock,start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,synamelist,1,0,timetorun) ))
else:
thr.append(_thread.start_new_thread(bloombergquotemachine.actionking, (a_lock,start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,) ))
time.sleep(0.00001)
print("locks placed and Market engine is running for the....", cycle, " time...with threads", thr )
time.sleep(timetorun)
gc.collect()
print("locks released and Market engine is restarting for the...", cycle, " time...")
cycle = cycle + 1
/*email to provide support at vancekingsaxbe@powerdominionenterprise.com, businessaffairs@powerdominionenterprise.com, For donations please write to fundraising@powerdominionenterprise.com*/
|
bregmanstudio/cseparate
|
cseparate.py
|
Python
|
mit
| 2,850
| 0.04386
|
import numpy as np
from bregman.suite import *
from cjade import cjade
from scipy.optimize import curve_fit
from numpy.linalg.linalg import svd
def cseparate(x, M=None, N=4096, H =1024, W=4096, max_iter=200, pre_emphasis=True, magnitude_only=False, svd_only=False, transpose_spectrum=False):
"""
complex-valued frequency domain separation by independent components
using relative phase representation
inputs:
x - the audio signal to separate (1 row)
M - the number of sources to extract
options:
N - fft length in samples [4096]
H - hop size in samples [1024]
W - window length in samples (fft padded with N-W zeros) [4096]
max_iter - maximum JADE ICA iterations [200]
pre_emphasis - apply an exponential spectral pre-emphasis filter [False]
magnitude_only - whether to use magnitude-only spectrum (real-valued factorization)
svd_only - whether to use SVD instead of JADE
transpose_spectrum - whether to transpose the spectrum prior to factorization
output:
xhat - the separated signals (M rows)
xhat_all - the M separated signals mixed (1 row)
Copyright (C) 2014 Michael A. Casey, Bregman Media Labs,
Dartmouth College All Rights Reserved
"""
def pre_func(x, a, b, c):
return a * np.exp(-b * x) + c
M = 20 if M is None else M
phs_rec = lambda rp,dp: (np.angle(rp)+np.tile(np.atleast_2d(dp).T,rp.shape[1])).cumsum(1)
F = LinearFrequencySpectrum(x, nfft=N, wfft=W, nhop=H)
U = F._phase_map()
XX = np.absolute(F.STFT)
if pre_emphasis:
xx = np.arange(F.X.shape[0])
yy = XX.mean(1)
popt, pcov = curve_fit(pre_func, xx, yy)
XX = (XX.T * (1/pre_func(xx,*popt))).T
# w = np.r_[np.ones(64), .05*xx[64:]]
# XX = (XX.T * w).T
if magnitude_only:
X = XX
else:
X = XX * np.exp(1j * np.array(F.dPhi)) # Relative phase STFT
if transpose_spectrum:
X = X.T
if svd_only:
u,s,v = svd(X.T)
A = np.dot(u[:,:M], np.diag(s)[:M,:M])
S = v[:M,:] # v = V.H in np.linalg.svd
AS = np.dot(A,S).T # Non Hermitian transpose avoids complex conjugation
else:
A,S = cjade(X.T, M, max_iter) # complex-domain JADE by J. F. Cardoso
AS = np.array(A*S).T # Non Hermitian transpose avoids complex conjugation
if transpose_spectrum:
AS = AS.T
X_hat = np.absolute(AS)
if pre_emphasis:
#X_hat = (XX.T / (w)).T
X_hat = (XX.T * pre_func(xx,*popt)).T
Phi_hat = phs_rec(AS, F.dphi)
x_hat_all = F.inverse(X_hat=X_hat, Phi_hat=
|
Phi_hat, usewin=True)
x_hat = []
for k in np.arange(M):
if svd_only:
AS = np.dot(A[:,k][:,np.newaxis],S[k,:][np.newaxis,:]).T
else:
AS = np.array(A[:,k]*S[k,:]).
|
T
if transpose_spectrum:
AS = AS.T
X_hat = np.absolute(AS)
if pre_emphasis:
#X_hat = (XX.T / (w)).T
X_hat = (XX.T * pre_func(xx,*popt)).T
Phi_hat = phs_rec(AS, F.dphi)
x_hat.append(F.inverse(X_hat=X_hat, Phi_hat=Phi_hat, usewin=True))
return x_hat, x_hat_all
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractJstranslationBlogspotCom.py
|
Python
|
bsd-3-clause
| 564
| 0.033688
|
def extractJstranslationBlogspotCom(item):
'''
Parser for 'jstr
|
anslation.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel
|
'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
SymbiFlow/python-fpga-interchange
|
tests/example_netlist.py
|
Python
|
isc
| 13,854
| 0.000289
|
#/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" This file uses the fpga_interchange to create a very simple FPGA design.
This design is target the 7-series FPGA line, and the physical netlist is
suitable for a Artix 50T class fabric.
To test this flow:
- Invoke this script to output the logical netlist, physical netlist, and a
small XDC file to set the IOSTANDARD's on the ports.
- Use RapidWright's interchange branch to create a DCP using the entry point
com.xilinx.rapidwright.interchange.PhysicalNetlistToDcp
Example:
export RAPIDWRIGHT_PATH=~/RapidWright
$RAPIDWRIGHT_PATH/scripts/invoke_rapidwright.sh \
com.xilinx.rapidwright.interchange.PhysicalNetlistToDcp \
test.netlist test.phys test.xdc test.dcp
"""
import argparse
from fpga_interchange.interchange_capnp import Interchange, write_capnp_file
from fpga_interchange.logical_netlist import Library, Cell, Direction, CellInstance, LogicalNetlist
from fpga_interchange.physical_netlist import PhysicalNetlist, PhysicalBelPin, \
Placement, PhysicalPip, PhysicalSitePin, PhysicalSitePip, \
chain_branches, chain_pips, PhysicalNetType, PhysicalCellType
def example_logical_netlist():
hdi_primitives = Library('hdi_primitives')
cell = Cell('FDRE')
cell.add_port('D', Direction.Input)
cell.add_port('C', Direction.Input)
cell.add_port('CE', Direction.Input)
cell.add_port('R', Direction.Input)
cell.add_port('Q', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('IBUF')
cell.add_port('I', Direction.Input)
cell.add_port('O', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('OBUF')
cell.add_port('I', Direction.Input)
cell.add_port('O', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('BUFG')
cell.add_port('I', Direction.Input)
cell.add_port('O', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('VCC')
cell.add_port('P', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('GND')
cell.add_port('G', Direction.Output)
hdi_primitives.add_cell(cell)
top = Cell('top')
top.add_port('i', Direction.Input)
top.add_port('clk', Direction.Input)
top.add_port('o', Direction.Output)
top.add_cell_instance('ibuf', 'IBUF')
top.add_cell_instance('obuf', 'OBUF')
top.add_cell_instance('clk_ibuf', 'IBUF')
top.add_cell_instance('clk_buf', 'BUFG')
top.add_cell_instance('ff', 'FDRE')
top.add_cell_instance('VCC', 'VCC')
top.add_cell_instance('GND', 'GND')
top.add_net('i')
top.connect_net_to_cell_port('i', 'i')
top.connect_net_to_instance('i', 'ibuf', 'I')
top.add_net('i_buf')
top.connect_net_to_instance('i_buf', 'ibuf', 'O')
top.connect_net_to_instance('i_buf', 'ff', 'D')
top.add_net('o_buf')
top.connect_net_to_instance('o_buf', 'ff', 'Q')
top.connect_net_to_instance('o_buf', 'obuf', 'I')
top.add_net('o')
top.connect_net_to_instance('o', 'obuf', 'O')
top.connect_net_to_cell_port('o', 'o')
top.add_net('clk')
top.connect_net_to_cell_port('clk', 'clk')
top.connect_net_to_instance('clk', 'clk_ibuf', 'I')
top.add_net('clk_ibuf')
top.connect_net_to_instance('clk_ibuf', 'clk_ibuf', 'O')
top.connect_net_to_instance('clk_ibuf', 'clk_buf', 'I')
top.add_net('clk_buf')
top.connect_net_to_instance('clk_buf', 'clk_buf', 'O')
top.connect_net_to_instance('clk_buf', 'ff', 'C')
top.add_net('GLOBAL_LOGIC1')
top.connect_net_to_instance('GLOBAL_LOGIC1', 'VCC', 'P')
top.connect_net_to_instance('GLOBAL_LOGIC1', 'ff', 'CE')
top.add_net('GLOBAL_LOGIC0')
top.connect_net_to_instance('GLOBAL_LOGIC0', 'GND', 'G')
top.connect_net_to_instance('GLOBAL_LOGIC0', 'ff', 'R')
work = Library('work')
work.add_cell(top)
logical_netlist = LogicalNetlist(
name='top',
top_instance_name='top',
top_instance=CellInstance(
cell_name='top',
view='netlist',
property_map={},
),
property_map={},
libraries={
'work': work,
'hdi_primitives': hdi_primitives,
})
return logical_netlist
def example_physical_netlist():
phys_netlist = PhysicalNetlist(part='xc7a50tfgg484-1')
ibuf_placement = Placement(
cell_type='IBUF', cell_name='ibuf', site='IOB_X0Y12', bel='INBUF_EN')
ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='PAD', cell_pin='I')
ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='OUT', cell_pin='O')
phys_netlist.add_placement(ibuf_placement)
phys_netlist.add_site_instance(site_name='IOB_X0Y12', site_type='IOB33')
obuf_placement = Placement(
cell_type='OBUF', cell_name='obuf', site='IOB_X0Y11', bel='OUTBUF')
obuf_placement.add_bel_pin_to_cell_pin(bel_pin='IN', cell_pin='I')
obuf_placement.add_bel_pin_to_cell_pin(bel_pin='OUT', cell_pin='O')
phys_netlist.add_placement(obuf_placement)
phys_netlist.add_site_instance(site_name='IOB_X0Y11', site_type='IOB33')
clk_ibuf_placement = Placement(
cell_type='IBUF',
cell_name='clk_ibuf',
site='IOB_X0Y24',
bel='INBUF_EN')
clk_ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='PAD', cell_pin='I')
clk_ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='OUT', cell_pin='O')
phys_netlist.add_placement(clk_ibuf_placement)
phys_netlist.ad
|
d_site_instance(site_name='IOB_X0Y24', site_type='IOB33')
clk_buf_placement = Placement(
cell_type='BUFG',
cell_name='clk_buf',
site='BUFGCTRL_X0Y0',
|
bel='BUFG')
clk_buf_placement.add_bel_pin_to_cell_pin(bel_pin='I0', cell_pin='I')
clk_buf_placement.add_bel_pin_to_cell_pin(bel_pin='O', cell_pin='O')
phys_netlist.add_placement(clk_buf_placement)
phys_netlist.add_site_instance(site_name='BUFGCTRL_X0Y0', site_type='BUFG')
ff_placement = Placement(
cell_type='FDRE', cell_name='ff', site='SLICE_X1Y12', bel='AFF')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='SR', cell_pin='R')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='D', cell_pin='D')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='Q', cell_pin='Q')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='CE', cell_pin='CE')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='CK', cell_pin='C')
phys_netlist.add_placement(ff_placement)
phys_netlist.add_site_instance(site_name='SLICE_X1Y12', site_type='SLICEL')
i_root = chain_branches((PhysicalBelPin('IOB_X0Y12', 'PAD', 'PAD'),
PhysicalBelPin('IOB_X0Y12', 'INBUF_EN', 'PAD')))
phys_netlist.add_physical_net(net_name='i', sources=[i_root], stubs=[])
i_buf_root = chain_branches(
(PhysicalBelPin('IOB_X0Y12', 'INBUF_EN', 'OUT'),
PhysicalSitePip('IOB_X0Y12', 'IUSED', '0'),
PhysicalBelPin('IOB_X0Y12', 'I', 'I'),
PhysicalSitePin('IOB_X0Y12', 'I')) +
chain_pips('LIOI3_X0Y11', ('LIOI_IBUF0', 'LIOI_I0', 'LIOI_ILOGIC0_D',
'IOI_ILOGIC0_O', 'IOI_LOGIC_OUTS18_1')) +
(PhysicalPip('IO_INT_INTERFACE_L_X0Y12',
'INT_INTERFACE_LOGIC_OUTS_L_B18',
'INT_INTERFACE_LOGIC_OUTS_L18'),
PhysicalPip('INT_L_X0Y12', 'LOGIC_OUTS_L18', 'EE2BEG0'),
PhysicalPip('INT_L_X2Y12', 'EE2END0', 'BYP_ALT0'),
PhysicalPip('INT_L_X2Y12', 'BYP_ALT0', 'BYP_L0'),
PhysicalPip('CLBLL_L_X2Y12', 'CLBLL_BYP0', 'CLBLL_L_AX'),
PhysicalSitePin('SLICE_X1Y12', 'AX'),
PhysicalBelPin('SLICE_X1Y12', 'AX', 'AX'),
PhysicalSitePip('SLICE_X1Y12', 'AFFMUX', 'AX'),
PhysicalBelPin('SLICE_X1Y12', 'AFF', 'D')))
phys_netlist.add_physical_net(
net_name='i_buf', sources=[i_buf_root], stubs=[])
o_buf_root = chain_branches(
(PhysicalBelPin('SLICE_X1Y12', 'AFF', 'Q'),
PhysicalBelPin('SLICE_X1Y12
|
tomasjames/citsciportal
|
app/agentex/migrations/0004_lastlogins.py
|
Python
|
gpl-3.0
| 591
| 0.001692
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('agentex', '0003_auto_20150622_1101'),
]
operations = [
migrations.CreateModel(
name='LastLogins',
fields=[
('i
|
d', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_leng
|
th=255)),
('slug', models.SlugField(unique=True)),
],
),
]
|
dims/heat
|
heat/engine/clients/os/glance.py
|
Python
|
apache-2.0
| 4,221
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unle
|
ss required by applicable law or agreed to in writing, software
# distributed under the License is distributed
|
on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient import client as gc
from glanceclient import exc
from glanceclient.openstack.common.apiclient import exceptions
from heat.engine.clients import client_plugin
from heat.engine.clients import os as os_client
from heat.engine import constraints
CLIENT_NAME = 'glance'
class GlanceClientPlugin(client_plugin.ClientPlugin):
exceptions_module = [exceptions, exc]
service_types = [IMAGE] = ['image']
def _create(self):
con = self.context
endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type')
endpoint = self.url_for(service_type=self.IMAGE,
endpoint_type=endpoint_type)
args = {
'auth_url': con.auth_url,
'service_type': self.IMAGE,
'project_id': con.tenant_id,
'token': self.auth_token,
'endpoint_type': endpoint_type,
'cacert': self._get_client_option(CLIENT_NAME, 'ca_file'),
'cert_file': self._get_client_option(CLIENT_NAME, 'cert_file'),
'key_file': self._get_client_option(CLIENT_NAME, 'key_file'),
'insecure': self._get_client_option(CLIENT_NAME, 'insecure')
}
return gc.Client('1', endpoint, **args)
def _find_with_attr(self, entity, **kwargs):
"""Find a item for entity with attributes matching ``**kwargs``."""
matches = list(self._findall_with_attr(entity, **kwargs))
num_matches = len(matches)
if num_matches == 0:
msg = ("No %(name)s matching %(args)s.") % {
'name': entity,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def _findall_with_attr(self, entity, **kwargs):
"""Find all items for entity with attributes matching ``**kwargs``."""
func = getattr(self.client(), entity)
filters = {'filters': kwargs}
return func.list(**filters)
def is_not_found(self, ex):
return isinstance(ex, (exceptions.NotFound, exc.HTTPNotFound))
def is_over_limit(self, ex):
return isinstance(ex, exc.HTTPOverLimit)
def is_conflict(self, ex):
return isinstance(ex, (exceptions.Conflict, exc.Conflict))
def find_image_by_name_or_id(self, image_identifier):
"""Return the ID for the specified image name or identifier.
:param image_identifier: image name or a UUID-like identifier
:returns: the id of the requested :image_identifier:
"""
return self._find_image_id(self.context.tenant_id,
image_identifier)
@os_client.MEMOIZE_FINDER
def _find_image_id(self, tenant_id, image_identifier):
# tenant id in the signature is used for the memoization key,
# that would differentiate similar resource names across tenants.
return self.get_image(image_identifier).id
def get_image(self, image_identifier):
"""Return the image object for the specified image name/id.
:param image_identifier: image name
:returns: an image object with name/id :image_identifier:
"""
try:
return self.client().images.get(image_identifier)
except exc.HTTPNotFound:
return self._find_with_attr('images', name=image_identifier)
class ImageConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exceptions.NotFound, exceptions.NoUniqueMatch)
resource_client_name = CLIENT_NAME
resource_getter_name = 'find_image_by_name_or_id'
|
EmbeditElectronics/Python_for_PSoC
|
API_Python/setup.py
|
Python
|
mit
| 3,289
| 0.057464
|
#!/usr/bin/env python
#from distutils.core import setup
from setuptools import setup
import subprocess
import os
import platform
import re
def get_pi_version():
pi_versions = {
"0002" : "Model B Revision 1.0",
"0003" : "Model B Revision 1.0",
"0004" : "Model B Revision 2.0",
"0005" : "Model B Revision 2.0",
"0006" : "Model B Revision 2.0",
"0007" : "Model A",
"0008" : "Model A",
"0009" : "Model A",
"000d" : "Model B Revision 2.0",
"000e" : "Model B Revision 2.0",
"000f" : "Model B Revision 2.0",
"0010" : "Model B+",
"0011" : "Compute Module",
"0012" : "Model A+",
"a01041" : "Pi 2 Model B",
"a21041" : "Pi 2 Model B",
"900092" : "PiZero",
"a02082" : "Pi3 Model B",
"a22082" : "Pi3 Model B",
}
with open('/proc/cpuinfo', 'r') as cpuinfo:
info = cpuinfo.read()
soc = re.search('^Hardware\s+:\s+(\w+)$', info,flags=re.MULTILINE | re.IGNORECASE)
rev = re.search('^Revision\s+:\s+(\w+)$', info,flags=re.MULTILINE | re.IGNORECASE)
if not soc: #Not a Pi
return None
if soc.group(1).find("BCM") < 0: #Not a Pi
return None
if not rev: #What are the odds... Still not a pi.
return None
model = pi_versions.get(rev.group(1), "Unknown") #default of Unknown indicates it is likely a pi, but an unknown revision.
return model
DEPENDS_ON = []
if __name__ == "__main__":
backend = {
"Model B Revision 1.0" : "UART",
"Model B Revision 2.0" : "U
|
ART",
"Model A": "UART",
"Model B Revision 2.0": "UART",
"Model B+": "UART",
"Compute Module": "UART",
"Model A+": "UART",
"Pi 2 Model B": "UART",
"PiZero": "UART",
"Pi3 Model B" : "I2C",
"Unknown": "I2C",
"unresolved": "I2C"
}
plat = None
if platform.platform().find("Linux") >=0: #We ar
|
e on linux... Is it a pi?
if os.uname()[4][:3] == 'arm': #probably a pi
plat = get_pi_version()
if plat is None: #oh no! Maybe another SBC?
plat = "unresolved"
if plat is None: #Likely on a PC of some sort...
DEPENDS_ON.append("pyserial==2.7")
elif backend[plat] == "UART":
try:
import serial
except:
DEPENDS_ON.append("pyserial==2.6")
elif backend[plat] == "I2C":
try:
import smbus
except:
#pypi version of smbus does not appear to work. Lets get the version from the Raspbian package repository instead...
if os.geteuid() != 0:
print("Some dependencies should be installed via the Linux package manager but are unable to \
Either run this script with sudo privileges or apt-get the following packages before proceeding to use the pisoc package: \
\ni2c-tools \
\npython-smbus")
else:
proc = subprocess.Popen('apt-get install -y i2c-tools python-smbus', shell=True, stdin=None, stdout=subprocess.PIPE, bufsize = 1, executable = "/bin/bash")
for line in iter(proc.stdout.readline, b''):
print(line.rstrip())
proc.stdout.close()
proc.wait()
setup(name='pisoc',
version='2.0.1',
description='PiSoC Python API',
author='Brian Bradley',
license = 'MIT',
install_requires = DEPENDS_ON,
author_email='bradley@embeditelectronics.com',
packages=['pisoc']
)
|
ilyanesterov/browser-csp-compatibility
|
utils/server.py
|
Python
|
mit
| 4,620
| 0
|
import os
import subprocess
import requests
import time
from urlparse import urlparse
from config import config
class Server(object):
"""
Simple helper to start/stop and interact with a tests server
TODO: add method to check what request has been send last by browser
might need this for connect-src testing. To make sure nothing is send
over network
"""
def __init__(self, address, port):
self.addr
|
ess = address
self.port = port
self.logfile_name = config['server_log_f
|
ilename']
self.log = None
self.log_pointer = 0
def start(self):
"""
Starts test server with stdout and stderr output to /dev/null
"""
FNULL = open(os.devnull, 'w')
command_line = ['python', 'server/server.py']
self.process = subprocess.Popen(command_line, shell=False,
stdout=FNULL, stderr=FNULL)
self.wait_for_server_to_start()
self.clean_server_log()
def stop(self):
"""
Shutdown test server child process
"""
self.process.terminate()
def wait_for_server_to_start(self, timeout=5):
"""
Waits for server process to start
Raises Exception if server didn't start
TODO: create exceptions class and raise smth like ServerError
"""
end_time = time.time() + timeout
while time.time() < end_time:
if self.server_is_running():
return True
else:
print('Waiting for start...')
time.sleep(1)
raise Exception('Cannot start server')
def server_is_running(self):
"""
Checks if server is running
"""
target_url = 'http://{0}:{1}/ping'.format(self.address, self.port)
try:
response = requests.get(target_url, timeout=1)
except Exception:
return False
if response.status_code == 200 and response.content == 'pong':
return True
else:
print('Got unexpected response form server:')
print('Status: {}\n Content: {}').format(response.status,
response.content)
return False
def clean_server_log(self):
with open(self.logfile_name, 'w') as f:
f.write('')
f.close()
def is_request_received(self, method, url, ignore_query=False):
"""
Method checks if request to specific url has been received by
server.
if path_only set to True, then only query string will be ignored during
comparison.
Returns True if yes otherwise returns False
"""
logs = self.get_new_log_messages()
parsed_logs = self._parse_logs(logs)
result = False
for message in parsed_logs:
if ignore_query:
msg_url = urlparse(message['url'].lower()).path
else:
msg_url = message['url'].lower()
if (method.lower() == message['method'].lower() and
url.lower() == msg_url):
result = True
return result
def update_log_pointer(self):
"""
Method to update log read position in case you want to get latest
logs. e.g. call it before your test to get server log's for test
"""
with open(self.logfile_name, 'r') as f:
f.seek(0, 2)
self.log_pointer = f.tell()
def get_new_log_messages(self):
"""
Method to get new log messages from server log
'new' means since last call for update_log_pointer
"""
with open(self.logfile_name, 'r') as f:
f.seek(self.log_pointer)
messages = f.readlines()
self.log_pointer = f.tell()
return messages
def _parse_logs(self, logs):
"""
Method to parse log messages
Returns array of dict for each log message, parsed by
_parse_log_message method
"""
parsed_logs = []
for log_message in logs:
parsed_logs.append(self._parse_log_message(log_message))
return parsed_logs
@staticmethod
def _parse_log_message(log_message):
"""
Method to parse log message from server log
returns dict {'method': 'method_from_log_message',
'url': 'url_from_log_message'}
"""
url = log_message.split(' ')[6]
method = log_message.split(' ')[5][1:]
return {'method': method,
'url': url}
|
cloudviz/agentless-system-crawler
|
crawler/plugins/applications/apache/feature.py
|
Python
|
apache-2.0
| 1,135
| 0.000881
|
from collections import namedtuple
def get_feature(stats):
feature_attributes = ApacheFeature(
stats['BusyWorkers'],
stats['IdleWorkers'],
stats['waiting_for_connection'],
stats['starting_up'],
stats['reading_request'],
stats['sending_reply'],
stats['keepalive_read'],
stats['dns_lookup'],
stats['closing_connection'],
stats['logging'],
stats['graceful_finishing'],
stats['idle_worker_cleanup'
|
],
stats['BytesPerSec'],
stats['BytesPerReq'],
stats['ReqPerSec'],
stats['Uptime'],
stats['Total_kBytes'],
stats['Total_Accesses']
)
return feature_attributes
ApacheFeature = namedtuple('ApacheFeature', [
'BusyWorkers',
'IdleWorkers',
'waiting_for_connection',
'starting_up',
'reading_request',
'sending_reply',
'keepalive_read',
'dns_lookup',
'closing_connection'
|
,
'logging',
'graceful_finishing',
'idle_worker_cleanup',
'BytesPerSec',
'BytesPerReq',
'ReqPerSec',
'Uptime',
'Total_kBytes',
'Total_Accesses'
])
|
urbn/kombu
|
kombu/connection.py
|
Python
|
bsd-3-clause
| 38,239
| 0.000026
|
"""Client (Connection)."""
from __future__ import absolute_import, unicode_literals
import os
import socket
import sys
from collections import OrderedDict
from contextlib import contextmanager
from itertools import count, cycle
from operator import itemgetter
try:
from ssl import CERT_NONE
ssl_available = True
except ImportError: # pragma: no cover
CERT_NONE = None
ssl_available = False
# jython breaks on relative import for .exceptions for some reason
# (Issue #112)
from kombu import exceptions
from .five import (
bytes_if_py2, python_2_unicode_compatible, reraise, string_t, text_t,
)
from .log import get_logger
from .resource import Resource
from .transport import get_transport_cls, supports_librabbitmq
from .utils.collections import HashedSeq
from .utils.functional import dictfilter, lazy, retry_over_time, shufflecycle
from .utils.objects import cached_property
from .utils.url import as_url, parse_url, quote, urlparse, maybe_sanitize_url
__all__ = ('Connection', 'ConnectionPool', 'ChannelPool')
logger = get_logger(__name__)
roundrobin_failover = cycle
resolve_aliases = {
'pyamqp': 'amqp',
'librabbitmq': 'amqp',
}
failover_strategies = {
'round-robin': roundrobin_failover,
'shuffle': shufflecycle,
}
_log_connection = os.environ.get('KOMBU_LOG_CONNECTION', False)
_log_channel = os.environ.get('KOMBU_LOG_CHANNEL', False)
@python_2_unicode_compatible
class Connection(object):
"""A connection to the broker.
Example:
>>> Connection('amqp://guest:guest@localhost:5672//')
>>> Connection('amqp://foo;amqp://bar',
... failover_strategy='round-robin')
>>> Connection('redis://', transport_options={
... 'visibility_timeout': 3000,
... })
>>> import ssl
>>> Connection('amqp://', login_method='EXTERNAL', ssl={
... 'ca_certs': '/etc/pki/tls/certs/something.crt',
... 'keyfile': '/etc/something/system.key',
... 'certfile': '/etc/something/system.cert',
... 'cert_reqs': ssl.CERT_REQUIRED,
... })
Note:
SSL currently only works with the py-amqp, and qpid
transports. For other transports you can use stunnel.
Arguments:
URL (str, Sequence): Broker URL, or a list of URLs.
Keyword Arguments:
ssl (bool): Use SSL to connect to the server. Default is ``False``.
May not be supported by the specified transport.
transport (Transport): Default transport if not specified in the URL.
connect_timeout (float): Timeout in seconds for connecting to the
server. May not be supported by the specified transport.
transport_options (Dict): A dict of additional connection arguments to
pass to alternate kombu channel implementations. Consult the
transport documentation for available options.
heartbeat (float): Heartbeat interval in int/float seconds.
Note that if heartbeats are enabled then the
:meth:`heartbeat_check` method must be called regularly,
around once per second.
Note:
The connection is established lazily when needed. If you need the
connection to be established, then force it by calling
:meth:`connect`::
>>> conn = Connection('amqp://')
>>> conn.connect()
and always remember to close the connection::
>>> conn.release()
These options have been replaced by the URL argument, but are still
supported for backwards compatibility:
:keyword hostname: Host name/address.
NOTE: You cannot specify both the URL argument and use the hostname
keyword argument at the same time.
:keyword userid: Default user name if not provided in the URL.
:keyword password: Default password if not provided in the URL.
:keyword virtual_host: Default virtual host if not provided in the URL.
:keyword port: Default port if not provided in the URL.
"""
port = None
virtual_host = '/'
connect_timeout = 5
_closed = None
_connection = None
_default_channel = None
_transport = None
_logger = False
uri_prefix = None
#: The cache of declared entities is per connection,
#: in case the server loses data.
declared_entities = None
#: Iterator returning the next broker URL to try in the event
#: of connection failure (initialized by :attr:`failover_strategy`).
cycle = None
#: Additional transport specific options,
#: passed on to the transport instance.
transport_options = None
#: Strategy used to select new hosts when reconnecting after connection
#: failure. One of "round-robin", "shuffle" or any custom iterator
#: constantly yielding new URLs to try.
failover_strategy = 'round-robin'
#: Heartbeat value, currently only supported by the py-amqp transport.
heartbeat = None
resolve_aliases = resolve_aliases
failover_strategies = failover_strategies
hostname = userid = password = ssl = login_method = None
def __init__(self, hostname='localhost', userid=None,
password=None, virtual_host=None, port=None, insist=False,
ssl=False, transport=None, connect_timeout=5,
transport_options=None, login_method=None, uri_prefix=None,
heartbeat=0, failover_strategy='round-robin',
alternates=None, **kwargs):
alt = [] if alternates is None else alternates
# have to spell the args out, just to get nice docstrings :(
params = self._initial_params = {
'hostname': hostname, 'userid': userid,
'password': password, 'virtual_host': virtual_host,
'port': port, 'insist': insis
|
t, 'ssl': ssl,
'transport':
|
transport, 'connect_timeout': connect_timeout,
'login_method': login_method, 'heartbeat': heartbeat
}
if hostname and not isinstance(hostname, string_t):
alt.extend(hostname)
hostname = alt[0]
params.update(hostname=hostname)
if hostname:
if ';' in hostname:
alt = hostname.split(';') + alt
hostname = alt[0]
params.update(hostname=hostname)
if '://' in hostname and '+' in hostname[:hostname.index('://')]:
# e.g. sqla+mysql://root:masterkey@localhost/
params['transport'], params['hostname'] = \
hostname.split('+', 1)
self.uri_prefix = params['transport']
elif '://' in hostname:
transport = transport or urlparse(hostname).scheme
if not get_transport_cls(transport).can_parse_url:
# we must parse the URL
url_params = parse_url(hostname)
params.update(
dictfilter(url_params),
hostname=url_params['hostname'],
)
params['transport'] = transport
self._init_params(**params)
# fallback hosts
self.alt = alt
# keep text representation for .info
# only temporary solution as this won't work when
# passing a custom object (Issue celery/celery#3320).
self._failover_strategy = failover_strategy or 'round-robin'
self.failover_strategy = self.failover_strategies.get(
self._failover_strategy) or self._failover_strategy
if self.alt:
self.cycle = self.failover_strategy(self.alt)
next(self.cycle) # skip first entry
if transport_options is None:
transport_options = {}
self.transport_options = transport_options
if _log_connection: # pragma: no cover
self._logger = True
if uri_prefix:
self.uri_prefix = uri_prefix
self.declared_entities = set()
def switch(self, conn_str):
"""Switch connection parameters to use a new URL or hostname.
Note:
Does not reconnect!
Arguments:
conn_st
|
bewiwi/sauna
|
sauna/scheduler.py
|
Python
|
bsd-2-clause
| 3,120
| 0
|
import time
import fractions
from functools import reduce
import logging
class Scheduler:
def __init__(self, jobs):
"""
Create a new Scheduler.
>
|
>> s = Scheduler([Job(1, max, 100, 200)])
>>> for jobs in s:
... time.sleep(s.tick_duration)
:param jobs: Sequence of jobs to schedule
"""
periodicities = {jo
|
b.periodicity for job in jobs}
self.tick_duration = reduce(lambda x, y: fractions.gcd(x, y),
periodicities)
self._ticks = self.find_minimum_ticks_required(self.tick_duration,
periodicities)
self._jobs = jobs
self._current_tick = 0
logging.debug('Scheduler has {} ticks, each one is {} seconds'.
format(self._ticks, self.tick_duration))
@staticmethod
def find_minimum_ticks_required(tick_duration, periodicities):
"""Find the minimum number of ticks required to execute all jobs
at once."""
ticks = 1
for periodicity in reversed(sorted(periodicities)):
if ticks % periodicity != 0:
ticks *= int(periodicity / tick_duration)
return ticks
def __iter__(self):
return self
def __next__(self):
jobs = [job for job in self._jobs
if ((self._current_tick * self.tick_duration)
% job.periodicity) == 0
]
if jobs:
logging.debug('Tick {}, scheduled {}'.
format(self._current_tick, jobs))
self._current_tick += 1
if self._current_tick >= self._ticks:
self._current_tick = 0
for job in jobs:
job()
return jobs
def run(self):
"""Shorthand for iterating over all jobs forever.
>>> print_time = lambda: print(time.time())
>>> s = Scheduler([Job(1, print_time)])
>>> s.run()
1470146095.0748773
1470146096.076028
"""
for _ in self:
time.sleep(self.tick_duration)
class Job:
def __init__(self, periodicity, func, *func_args, **func_kwargs):
"""
Create a new Job to be scheduled and run periodically.
:param periodicity: Number of seconds to wait between job runs
:param func: callable that perform the job action
:param func_args: arguments of the callable
:param func_kwargs: keyword arguments of the callable
"""
if not callable(func):
raise ValueError('func attribute must be callable')
self.periodicity = periodicity
self.func = func
self.func_args = func_args
self.func_kwargs = func_kwargs
def __repr__(self):
try:
name = self.func.__name__
except AttributeError:
name = 'unknown'
return '<Job {} every {} seconds>'.format(name,
self.periodicity)
def __call__(self, *args, **kwargs):
self.func(*self.func_args, **self.func_kwargs)
|
alexey-grom/django-userflow
|
userflow/forms/signin.py
|
Python
|
mit
| 2,222
| 0.00045
|
# encoding: utf-8
from django.contrib import auth
from django.contrib.auth import get_user_model
from django.core.exceptions import MultipleObjectsReturned
from django import forms
from django.utils.translation import ugettext_lazy as _
__all__ = ()
class SigninForm(forms.Form):
email = forms.EmailField(required=True, label=_('Email'))
password = forms.CharField(required=True, widget=forms.PasswordInput, label=_('Password'))
error_messages = {
'invalid_login': _('Please enter a correct email and password. '
'Note that both fields may be case-sensitive.'),
'inactive': _('This account is inactive.'),
'removed': _('This account is removed.'),
}
def __init__(self, *args, **kwargs):
self.user_cache = None
super(SigninForm, self).__init__(*args, **kwargs)
def clean(self):
data = self.cleaned_data
try:
|
self.user_cache = self.check_user(**data)
except forms.ValidationError as e:
self.add_error('email', e)
return data
@proper
|
ty
def username_field(self):
model = get_user_model()
username_field = model.USERNAME_FIELD
return get_user_model()._meta.get_field(username_field)
def check_user(self, email=None, password=None, **kwargs):
credentials = {self.username_field.name: email,
'password': password}
try:
user = auth.authenticate(**credentials)
except MultipleObjectsReturned:
return
if user is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
if user.is_removed:
raise forms.ValidationError(
self.error_messages['removed'],
code='removed',
)
return user
@property
def user(self):
return self.user_cache
|
portfoliome/postpy
|
postpy/pg_encodings.py
|
Python
|
mit
| 627
| 0
|
from encodings import normalize_encoding, aliases
from types import MappingProxyType
from psycopg2.extensions import encodings as _PG_ENCODING_MAP
PG_ENCODING_MAP = MappingP
|
roxyType(_PG_ENCODING_MAP)
# python to postgres encoding map
_PYTHON_ENCODING_MAP = {
v: k for k, v in PG_ENCODING_MAP.items()
}
def get_postgres_encoding(python_encoding: str) -> str:
"""Python to postgres encoding map."""
encoding = normalize_encoding(python_encoding.lower())
encoding_ = aliases
|
.aliases[encoding.replace('_', '', 1)].upper()
pg_encoding = PG_ENCODING_MAP[encoding_.replace('_', '')]
return pg_encoding
|
jlongever/redfish-client-python
|
on_http_redfish_1_0/models/power_1_0_0_power_supply.py
|
Python
|
apache-2.0
| 11,886
| 0.001599
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class Power100PowerSupply(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Power100PowerSupply - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'line_input_voltage_type': 'Power100LineInputVoltageType',
'member_id': 'str',
'oem': 'ResourceOem',
'power_supply_type': 'Power100PowerSupplyType',
'redundancy': 'list[RedundancyRedundancy]',
'redundancyodata_count': 'Odata400Count',
'redundancyodata_navigation_link': 'Odata400IdRef',
'related_item': 'list[Odata400IdRef]',
'related_itemodata_count': 'Odata400Count',
'related_itemodata_navigation_link': 'Odata400IdRef',
'status': 'ResourceStatus'
}
self.attribute_map = {
'line_input_voltage_type': 'LineInputVoltageType',
'member_id': 'MemberId',
|
'oem': 'Oem',
'power_supply_type': 'PowerSupplyType',
'redundancy': 'Redundancy',
'redundancyodata_count': 'Redundancy@odata.count',
'redundancyodata_navigation_link': 'Redundancy@odata.navigationLink',
'related_item': 'RelatedItem',
'related_itemodata_count': 'RelatedItem@odata.count',
'related_itemodata_nav
|
igation_link': 'RelatedItem@odata.navigationLink',
'status': 'Status'
}
self._line_input_voltage_type = None
self._member_id = None
self._oem = None
self._power_supply_type = None
self._redundancy = None
self._redundancyodata_count = None
self._redundancyodata_navigation_link = None
self._related_item = None
self._related_itemodata_count = None
self._related_itemodata_navigation_link = None
self._status = None
@property
def line_input_voltage_type(self):
"""
Gets the line_input_voltage_type of this Power100PowerSupply.
The line voltage type supported as an input to this Power Supply
:return: The line_input_voltage_type of this Power100PowerSupply.
:rtype: Power100LineInputVoltageType
"""
return self._line_input_voltage_type
@line_input_voltage_type.setter
def line_input_voltage_type(self, line_input_voltage_type):
"""
Sets the line_input_voltage_type of this Power100PowerSupply.
The line voltage type supported as an input to this Power Supply
:param line_input_voltage_type: The line_input_voltage_type of this Power100PowerSupply.
:type: Power100LineInputVoltageType
"""
self._line_input_voltage_type = line_input_voltage_type
@property
def member_id(self):
"""
Gets the member_id of this Power100PowerSupply.
This is the identifier for the member within the collection.
:return: The member_id of this Power100PowerSupply.
:rtype: str
"""
return self._member_id
@member_id.setter
def member_id(self, member_id):
"""
Sets the member_id of this Power100PowerSupply.
This is the identifier for the member within the collection.
:param member_id: The member_id of this Power100PowerSupply.
:type: str
"""
self._member_id = member_id
@property
def oem(self):
"""
Gets the oem of this Power100PowerSupply.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:return: The oem of this Power100PowerSupply.
:rtype: ResourceOem
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this Power100PowerSupply.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:param oem: The oem of this Power100PowerSupply.
:type: ResourceOem
"""
self._oem = oem
@property
def power_supply_type(self):
"""
Gets the power_supply_type of this Power100PowerSupply.
The Power Supply type (AC or DC)
:return: The power_supply_type of this Power100PowerSupply.
:rtype: Power100PowerSupplyType
"""
return self._power_supply_type
@power_supply_type.setter
def power_supply_type(self, power_supply_type):
"""
Sets the power_supply_type of this Power100PowerSupply.
The Power Supply type (AC or DC)
:param power_supply_type: The power_supply_type of this Power100PowerSupply.
:type: Power100PowerSupplyType
"""
self._power_supply_type = power_supply_type
@property
def redundancy(self):
"""
Gets the redundancy of this Power100PowerSupply.
This structure is used to show redundancy for fans. The Component ids will reference the members of the redundancy groups.
:return: The redundancy of this Power100PowerSupply.
:rtype: list[RedundancyRedundancy]
"""
return self._redundancy
@redundancy.setter
def redundancy(self, redundancy):
"""
Sets the redundancy of this Power100PowerSupply.
This structure is used to show redundancy for fans. The Component ids will reference the members of the redundancy groups.
:param redundancy: The redundancy of this Power100PowerSupply.
:type: list[RedundancyRedundancy]
"""
self._redundancy = redundancy
@property
def redundancyodata_count(self):
"""
Gets the redundancyodata_count of this Power100PowerSupply.
:return: The redundancyodata_count of this Power100PowerSupply.
:rtype: Odata400Count
"""
return self._redundancyodata_count
@redundancyodata_count.setter
def redundancyodata_count(self, redundancyodata_count):
"""
Sets the redundancyodata_count of this Power100PowerSupply.
:param redundancyodata_count: The redundancyodata_count of this Power100PowerSupply.
:type: Odata400Count
"""
self._redundancyodata_count = redundancyodata_count
@property
def redundancyodata_navigation_link(self):
"""
Gets the redundancyodata_navigation_link of this Power100PowerSupply.
:return: The redundancyodata_navigation_link of this Power100PowerSupply.
:rtype: Odata400IdRef
"""
return self._redundancyodata_navigation_link
@redundancyodata_navigation_link.setter
def redundancyodata_navigation_link(self, redundancyodata_navigation_link):
"""
Sets the redundancyodata_navigation_link of this Power100PowerSupply.
:param redundancyodata_navigation_link: The redundancyodata_navigation_link of this Power100PowerSupply.
:type: Odata400IdRef
"""
self._redundancyodata_navigation_link = redundancyodata_navigation_link
@property
def related_item(self):
"""
Gets the related_item of thi
|
vgrem/Office365-REST-Python-Client
|
office365/communications/callrecords/call_record.py
|
Python
|
mit
| 630
| 0.003175
|
from office365.directory.identities.identity_set import IdentitySet
from office365.entity import Entity
class CallRecord(Entity):
"""Represents a single peer-to-p
|
eer call or a group call between multiple participants,
sometimes referred to as an online meeting."""
@property
def join_web_url(self):
"""Meeting URL associated to the call. May not be available fo
|
r a peerToPeer call record type."""
return self.properties.get("joinWebUrl", None)
@property
def organizer(self):
"""The organizing party's identity.."""
return self.properties.get("organizer", IdentitySet())
|
will-Do/avocado-vt
|
scripts/regression.py
|
Python
|
gpl-2.0
| 20,876
| 0.002299
|
#!/usr/bin/python
"""
Program that parses standard format results,
compute and check regression bug.
:copyright: Red Hat 2011-2012
:author: Amos Kong <akong@redhat.com>
"""
import os
import sys
import re
import commands
import warnings
import ConfigParser
import MySQLdb
def exec_sql(cmd, conf="../../global_config.ini"):
config = ConfigParser.ConfigParser()
config.read(conf)
user = config.get("AUTOTEST_WEB", "user")
passwd = config.get("AUTOTEST_WEB", "password")
db = config.get("AUTOTEST_WEB", "database")
db_type = config.get("AUTOTEST_WEB", "db_type")
if db_type != 'mysql':
print "regression.py: only support mysql database!"
sys.exit(1)
conn = MySQLdb.connect(host="localhost", user=user,
passwd=passwd, db=db)
cursor = conn.cursor()
cursor.execute(cmd)
rows = cursor.fetchall()
lines = []
for row in rows:
line = []
for c in row:
line.append(str(c))
lines.append(" ".join(line))
cursor.close()
conn.close()
return lines
def get_test_keyval(jobid, keyname, default=''):
idx = exec_sql("select job_idx from tko_jobs where afe_job_id=%s"
% jobid)[-1]
test_idx = exec_sql('select test_idx from tko_tests where job_idx=%s'
% idx)[3]
try:
return exec_sql('select value from tko_test_attributes'
' where test_idx=%s and attribute="%s"'
% (test_idx, keyname))[-1]
except:
return default
class Sample(object):
""" Collect test results in same environment to a sample """
def __init__(self, sample_type, arg):
def generate_raw_table(test_dict):
ret_dict = []
tmp = []
sample_type = category = None
for i in test_dict:
line = i.split('|')[1:]
if not sample_type:
sample_type = line[0:2]
if sample_type != line[0:2]:
ret_dict.append('|'.join(sample_type + tmp))
sample_type = line[0:2]
tmp = []
if "e+" in line[-1]:
tmp.append("%f" % float(line[-1]))
elif 'e-' in line[-1]:
tmp.append("%f" % float(line[-1]))
elif not (re.findall("[a-zA-Z]", line[-1]) or is_int(line[-1])):
tmp.append("%f" % float(line[-1]))
else:
tmp.append(line[-1])
if category != i.split('|')[0]:
category = i.split('|')[0]
ret_dict.append("Category:" + category.strip())
ret_dict.append(self.categories)
ret_dict.append('|'.join(sample_type + tmp))
return ret_dict
if sample_type == 'filepath':
files = arg.split()
self.files_dict = []
for i in range(len(files)):
fd = open(files[i], "r")
f = []
for l in fd.readlines():
l = l.strip()
if re.findall("^### ", l):
if "kvm-userspace-ver" in l:
self.kvmver = l.split(':')[-1]
elif "kvm_version" in l:
self.hostkernel = l.split(':')[-1]
elif "guest-kernel-ver" in l:
self.guestkernel = l.split(':')[-1]
elif "session-length" in l:
self.len = l.split(':')[-1]
else:
f.append(l.strip())
self.files_dict.append(f)
fd.close()
sysinfodir = os.path.join(os.path.dirname(files[0]), "../../sysinfo/")
sysinfodir = os.path.realpath(sysinfodir)
cpuinfo = commands.getoutput("cat %s/cpuinfo" % sysinfodir)
lscpu = commands.getoutput("cat %s/lscpu" % sysinfodir)
meminfo = commands.getoutput("cat %s/meminfo" % sysinfodir)
lspci = commands.getoutput("cat %s/lspci_-vvnn" % sysinfodir)
partitions = commands.getoutput("cat %s/partitions" % sysinfodir)
fdisk = commands.getoutput("cat %s/fdisk_-l" % sysinfodir)
status_path = os.path.join(os.path.dirname(files[0]), "../status")
status_file = open(status_path, 'r')
content = status_file.readlines()
self.testdata = re.findall("localtime=(.*)\t", content[-1])[-1]
cpunum = len(re.findall("processor\s+: \d", cpuinfo))
cpumodel = re.findall("Model name:\s+(.*)", lscpu)
socketnum = int(re.findall("Socket\(s\):\s+(\d+)", lscpu)[0])
corenum = int(re.findall("Core\(s\) per socket:\s+(\d+)", lscpu)[0]) * socketnum
threadnum = int(re.findall("Thread\(s\) per core:\s+(\d+)", lscpu)[0]) * corenum
numanodenum = int(re.findall("NUMA node\(s\):\s+(\d+)", lscpu)[0])
memnum = float(re.findall("MemTotal:\s+(\d+)", meminfo)[0]) / 1024 / 1024
nicnum = len(re.findall("\d+:\d+\.0 Ethernet", lspci))
disknum = re.findall("sd\w+\S", partitions)
fdiskinfo = re.findall("Disk\s+(/dev/sd.*\s+GiB),", fdisk)
elif sample_type == 'database':
jobid = arg
self.kvmver = get_test_keyval(jobid, "kvm-userspace-ver")
self.hostkernel = get_test_keyval(jobid, "kvm_version")
self.guestkernel = get_test_keyval(jobid, "guest-kernel-ver")
self.len = get_test_keyval(jobid, "session-length")
self.categories = get_test_keyval(jobid, "category")
idx = exec_sql("select job_idx from tko_jobs where afe_job_id=%s"
% jobid)[-1]
data = exec_sql("select test_idx,iteration_key,iteration_value"
" from tko_perf_view where job_idx=%s" % idx)
testidx = None
job_dict = []
test_dict = []
for l in data:
s = l.split()
if not testidx:
testidx = s[0]
if testidx != s[0]:
job_dict.append(generate_raw_table(test_dict))
test_dict = []
testidx = s[0]
test_dict.append(' | '.join(s[1].split('--')[0:] + s[-1:]))
job_dict.append(generate_raw_table(test_dict))
self.files_dict = job_dict
self.version = " userspace: %s\n host kernel: %s\n guest kernel: %s" % (
self.kvmver, self.hostkernel, self.guestkernel)
nrepeat = len(self.files_dict)
if nrepeat < 2:
print "`nrepeat' should be lar
|
ger than 1!"
sys.exit(1)
self.desc = """<hr>Machine Inf
|
o:
o CPUs(%s * %s), Cores(%s), Threads(%s), Sockets(%s),
o NumaNodes(%s), Memory(%.1fG), NICs(%s)
o Disks(%s | %s)
Please check sysinfo directory in autotest result to get more details.
(eg: http://autotest-server.com/results/5057-autotest/host1/sysinfo/)
<hr>""" % (cpunum, cpumodel, corenum, threadnum, socketnum, numanodenum, memnum, nicnum, fdiskinfo, disknum)
self.desc += """ - Every Avg line represents the average value based on *%d* repetitions of the same test,
and the following SD line represents the Standard Deviation between the *%d* repetitions.
- The Standard deviation is displayed as a percentage of the average.
- The significance of the differences between the two averages is calculated using unpaired T-test that
takes into account the SD of the averages.
- The paired t-test is computed for the averages of same category.
""" % (nrepeat, nrepeat)
def getAvg(self, avg_update=None):
return self._process_files(self.files_dict, self._get_list_avg,
avg_update=avg_update)
def getAvgPercent(self, avgs_dict):
return self._process_files(avgs_dict, self._get_augment_rate)
def getSD(self):
return self._process_files(self.files_dict, self._get_list_sd)
def getSDRate(self,
|
nrz/ylikuutio
|
external/bullet3/examples/pybullet/gym/pybullet_envs/deep_mimic/env/humanoid_stable_pd.py
|
Python
|
agpl-3.0
| 42,338
| 0.010345
|
from pybullet_utils import pd_controller_stable
from pybullet_envs.deep_mimic.env import humanoid_pose_interpolator
import math
import numpy as np
chest = 1
neck = 2
rightHip = 3
rightKnee = 4
rightAnkle = 5
rightShoulder = 6
rightElbow = 7
leftHip = 9
leftKnee = 10
leftAnkle = 11
leftShoulder = 12
leftElbow = 13
jointFrictionForce = 0
class HumanoidStablePD(object):
def __init__( self, pybullet_client, mocap_data, timeStep,
useFixedBase=True, arg_parser=None, useComReward=False):
self._pybullet_client = pybullet_client
self._mocap_data = mocap_data
self._arg_parser = arg_parser
print("LOADING humanoid!")
flags=self._pybullet_client.URDF_MAINTAIN_LINK_ORDER+self._pybullet_client.URDF_USE_SELF_COLLISION+self._pybullet_client.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS
self._sim_model = self._pybullet_client.loadURDF(
"humanoid/humanoid.urdf", [0, 0.889540259, 0],
globalScaling=0.25,
useFixedBase=useFixedBase,
flags=flags)
#self._pybullet_client.setCollisionFilterGroupMask(self._sim_model,-1,collisionFilterGroup=0,collisionFilterMask=0)
#for j in range (self._pybullet_client.getNumJoints(self._sim_model)):
# self._pybullet_client.setCollisionFilterGroupMask(self._sim_model,j,collisionFilterGroup=0,collisionFilterMask=0)
self._end_effectors = [5, 8, 11, 14] #ankle and wrist, both left and right
self._kin_model = self._pybullet_client.loadURDF(
"humanoid/humanoid.urdf", [0, 0.85, 0],
globalScaling=0.25,
useFixedBase=True,
flags=self._pybullet_client.URDF_MAINTAIN_LINK_ORDER)
self._pybullet_client.changeDynamics(self._sim_model, -1, lateralFriction=0.9)
for j in range(self._pybullet_client.getNumJoints(self._sim_model)):
self._pybullet_client.changeDynamics(self._sim_model, j, lateralFriction=0.9)
self._pybullet_client.changeDynamics(self._sim_model, -1, linearDamping=0, angularDamping=0)
self._pybullet_client.changeDynamics(self._kin_model, -1, linearDamping=0, angularDamping=0)
#todo: add feature to disable simulation for a particular object. Until then, disable all collisions
self._pybullet_client.setCollisionFilterGroupMask(self._kin_model,
-1,
collisionFilterGroup=0,
collisionFilterMask=0)
self._pybullet_client.changeDynamics(
self._kin_model,
-1,
activationState=self._pybullet_client.ACTIVATION_STATE_SLEEP +
self._pybullet_client.ACTIVATION_STATE_ENABLE_SLEEPING +
self._pybullet_client.ACTIVATION_STATE_DISABLE_WAKEUP)
alpha = 0.4
self._pybullet_client.changeVisualShape(self._kin_model, -1, rgbaColor=[1, 1, 1, alpha])
for j in range(self._pybullet_client.getNumJoints(self._kin_model)):
self._pybullet_client.setCollisionFilterGroupMask(self._kin_model,
j,
collisionFilterGroup=0,
collisionFilterMask=0)
self._pybullet_client.changeDynamics(
self._kin_model,
j,
activationState=self._pybullet_client.ACTIVATION_STATE_SLEEP +
self._pybullet_client.ACTIVATION_STATE_ENABLE_SLEEPING +
self._pybullet_client.ACTIVATION_STATE_DISABLE_WAKEUP)
self._pybullet_client.changeVisualShape(self._kin_model, j, rgbaColor=[1, 1, 1, alpha])
self._poseInterpolator = humanoid_pose_interpolator.HumanoidPoseInterpolator()
for i in range(self._mocap_data.NumFrames() - 1):
frameData = self._mocap_data._motion_data['Frames'][i]
self._poseInterpolator.PostProcessMotionData(frameData)
self._stablePD = pd_controller_stable.PDControllerStableMultiDof(self._pybullet_client)
self._timeStep = timeStep
self._kpOrg = [
0, 0, 0, 0, 0, 0, 0, 1000, 1000, 1000, 1000, 100, 100, 100, 100, 500, 500, 500, 500, 500,
400, 400, 400, 400, 400, 400, 400, 400, 300, 500, 500, 500, 500, 500, 400, 400, 400, 400,
400, 400, 400, 400, 300
]
self._kdOrg = [
0, 0, 0, 0, 0, 0, 0, 100, 100, 100, 100, 10, 10, 10, 10, 50, 50, 50, 50, 50, 40, 40, 40,
40, 40, 40,
|
40, 40, 30, 50, 50, 50, 50, 50, 40, 40, 40, 40, 40, 40, 40, 40, 30
]
self._jointIndicesAll = [
chest, neck, rightHip, rightKnee, rightAnkle, rightShoulder, rightElbow, leftHip, leftKnee,
leftAnkle,
|
leftShoulder, leftElbow
]
for j in self._jointIndicesAll:
#self._pybullet_client.setJointMotorControlMultiDof(self._sim_model, j, self._pybullet_client.POSITION_CONTROL, force=[1,1,1])
self._pybullet_client.setJointMotorControl2(self._sim_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=0,
positionGain=0,
targetVelocity=0,
force=jointFrictionForce)
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=[0, 0, 0, 1],
targetVelocity=[0, 0, 0],
positionGain=0,
velocityGain=1,
force=[jointFrictionForce, jointFrictionForce, jointFrictionForce])
self._pybullet_client.setJointMotorControl2(self._kin_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=0,
positionGain=0,
targetVelocity=0,
force=0)
self._pybullet_client.setJointMotorControlMultiDof(
self._kin_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=[0, 0, 0, 1],
targetVelocity=[0, 0, 0],
positionGain=0,
velocityGain=1,
force=[jointFrictionForce, jointFrictionForce, 0])
self._jointDofCounts = [4, 4, 4, 1, 4, 4, 1, 4, 1, 4, 4, 1]
#only those body parts/links are allowed to touch the ground, otherwise the episode terminates
fall_contact_bodies = []
if self._arg_parser is not None:
fall_contact_bodies = self._arg_parser.parse_ints("fall_contact_bodies")
self._fall_contact_body_parts = fall_contact_bodies
#[x,y,z] base position and [x,y,z,w] base orientation!
self._totalDofs = 7
for dof in self._jointDofCounts:
self._totalDofs += dof
self.setSimTime(0)
self._useComReward = useComReward
self.resetPose()
def resetPose(self):
#print("resetPose with self._frame=", self._frame, " and self._frameFraction=",self._frameFraction)
pose = self.computePose(self._frameFraction)
self.initializePose(self._poseInterpolator, self._sim_model, initBase=True)
self.initializePose(self._poseInterpolator, self._kin_model, initBase=False)
def initializePose(self, pose, phys_model, initBase, initializeVelocity=True):
useArray = True
if initializeVelocity:
if initBase:
self._pybullet_client.resetBasePositionAndOrientation(phys_model, pose._basePos,
pose._baseOrn)
self._pybullet_client.resetBaseVelocity(phys_model, pose._baseLinVel, pose._baseAngVel)
if useArray:
indices = [chest,neck,rightHip,rightKnee,
rightAnkle, rightShoulder, rightElbow,leftHip,
leftKnee, leftAnkle, leftShoulder,leftElbow]
jointPositions = [pose._chestRot, pose._neckRot, pose._rightHipRot, pose._rightKneeRot,
pose._rightAnkleRot, pose._rightShoulderRot, pose._rightElbowRot, pose
|
gr33ndata/rivellino
|
ruleset/__init__.py
|
Python
|
mit
| 4,470
| 0.003803
|
import os
import sys
import yaml
from etllib.conf import Conf
from etllib.yaml_helper import YAMLHelper
from plugins import PluginEngine
class RulesEngine(list):
def __init__(self):
self.rules_path = os.path.dirname(os.path.realpath(__file__))
self.conf = Conf()
self.load()
self.filter_recursion()
self.pe = PluginEngine()
def parse_rule_file(self, file_path):
yaml_data = YAMLHelper(file_path).read()
yaml_data['rule_name'] = os.path.split(file_path)[1]
if yaml_data['rule_type'] == 'group':
# Group Rule, i.e. with child rules
pass
else:
# Single Rule, i.e. with no child rules
# Get Data Nodes parameters from Config file
src = yaml_data['source_node']
dst = yaml_data['destination_node']
yaml_data['source_node'] = self.conf.get_data_nodes(src)
yaml_data['destination_node'] = self.conf.get_data_nodes(dst)
return yaml_data
def load(self):
rule_files = [os.path.join(self.rules_path, f)
for f in os.listdir(self.rules_path)
if os.path.isfile(os.path.join(self.rules_path, f))
and f.endswith('.yml')
]
for rule_file in rule_files:
self.append(self.parse_rule_file(rule_file))
def filter_recursion(self):
# Filter out group rules with members of type groups
for rule in self:
if rule['rule_type'] == 'group':
rule_members = [
child for child in rule['members']
if self.get_rule_by_name(child)['rule_type'] == 'single'
]
rule['members'] = rule_members
def get_rule_by_name(self, rule_name):
for rule in self:
if rule['rule_name'] == rule_name:
return rule
#print 'rule not found'
def expand_action(self, action):
if isinstance(action, str):
if action.startswith('$rule:'):
_, subrule_name, subrule_field = action.strip().split(':')
subrule = self
|
.get_rule_by_name(subrule_name)
return self.apply_rule_ingress(subrule)[subrule_field]
else:
return action
elif isinstance(action, dict):
for key, val in action.iteritems():
action[key] = self.expand_action(val)
|
return action
else:
return action
def apply_rule_ingress(self, rule):
ingress_plugin_name = rule['ingress_plugin']
ingress_plugin_runnable = self.pe[ingress_plugin_name].init(rule)
data = ingress_plugin_runnable.run(rule, None)
ingress_plugin_runnable.exit()
return data
def apply_rule_egress(self, rule, data):
egress_plugin_name = rule['egress_plugin']
egress_plugin_runnable = self.pe[egress_plugin_name].init(rule)
egress_plugin_runnable.run(rule, data)
egress_plugin_runnable.exit()
def apply_data_processors(self, rule, data):
if not rule.get('data_processors', False):
return data
if type(rule['data_processors']) is str:
data_processors = [rule['data_processors']]
else:
data_processors = rule['data_processors']
for processor_plugin_name in data_processors:
processor_plugin_runnable = self.pe[processor_plugin_name].init(rule)
data = processor_plugin_runnable.run(rule, data)
processor_plugin_runnable.exit()
return data
def apply_rule(self, rule):
print 'Applying {0}'.format(rule['rule_name'])
if rule['rule_type'] == 'single':
rule['action'] = self.expand_action(rule['action'])
data = self.apply_rule_ingress(rule)
data = self.apply_data_processors(rule, data)
self.apply_rule_egress(rule, data)
else:
for child_rule_name in rule['members']:
self.apply_rule_by_name(child_rule_name)
def apply_rule_by_name(self, rule_name):
for rule in self:
if rule['rule_name'] == rule_name:
self.apply_rule(rule)
break
else:
sys.exit('Error! Rule not found')
def apply_rules(self):
for rule in self:
if rule['active']:
self.apply_rule(rule)
|
JonnyWong16/plexpy
|
lib/pyparsing/__init__.py
|
Python
|
gpl-3.0
| 9,095
| 0.001649
|
# module pyparsing.py
#
# Copyright (c) 2003-2021 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and
executing simple grammars, vs. the traditional lex/yacc approach, or the
use of regular expressions. With pyparsing, you don't need to learn
a new syntax for defining grammars or matching expressions - the parsing
module provides a library of classes that you use to construct the
grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
:class:`Literal`, and :class:`And` elements
(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
and the strings are auto-converted to :class:`Literal` expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print(hello, "->", greet.parse_string(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the
self-explanatory class names, and the use of :class:`'+'<And>`,
:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
The :class:`ParseResults` object returned from
:class:`ParserElement.parseString` can be
accessed as a nested list, a dictionary, or an object with named
attributes.
The pyparsing module handles some of the problems that are typically
vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle
"Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from :class:`Literal` and
:class:`CaselessLiteral` classes
- construct character word-group expressions using the :class:`Word`
class
- see how to create repetitive expressions using :class:`ZeroOrMore`
and :class:`OneOrMore` classes
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
and :class:`'&'<Each>` operators to combine simple expressions into
more complex ones
- associate names with your parsed results using
:class:`ParserElement.setResultsName`
- access the parsed data, which is returned as a :class:`ParseResults`
object
- find some helpful expression short-cuts like :class:`delimitedList`
and :class:`oneOf`
- find more useful common expressions in the :class:`pyparsing_common`
namespace class
"""
from typing import NamedTuple
class version_info(NamedTuple):
major: int
minor: int
micro: int
releaselevel: str
serial: int
@property
def __version__(self):
return "{}.{}.{}".format(self.major, self.minor, self.micro) + (
"{}{}{}".format(
"r" if self.releaselevel[0] == "c" else "",
self.releaselevel[0],
self.serial,
),
"",
)[self.releaselevel == "final"]
def __str__(self):
return "{} {} / {}".format(__name__, self.__version__, __version_time__)
def __repr__(self):
return "{}.{}({})".format(
__name__,
type(self).__name__,
", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
)
__version_info__ = version_info(3, 0, 6, "final", 0)
__version_time__ = "12 Nov 2021 16:06 UTC"
__version__ = __version_info__.__version__
__versionTime__ = __version_time__
__author__ = "Paul McGuire <ptmcg.gm+pyparsing@gmail.com>"
from .util import *
from .exceptions import *
from .actions import *
from .core import __diag__, __compat__
from .results import *
from .core import *
from .core import _builtin_exprs as core_builtin_exprs
from .helpers import *
from .helpers import _builtin_exprs as helper_builtin_exprs
from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
from .testing import pyparsing_test as testing
from .common import (
pyparsing_common as common,
_builtin_exprs as common_builtin_exprs,
)
# define backward compat synonyms
if "pyparsing_unicode" not in globals():
pyparsing_unicode = unicode
if "pyparsing_common" not in globals():
pyparsing_common = common
if "pyparsing_test" not in globals():
pyparsing_test = testing
core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
__all__ = [
"__version__",
"__version_time__",
"__author__",
"__compat__",
"__diag__",
"And",
"AtLineStart",
"AtStringStart",
"CaselessKeyword",
"CaselessLiteral",
"CharsNotIn",
"Combine",
"Dict",
"Each",
"Empty",
"FollowedBy",
"Forward",
"GoToColumn",
"Group",
"IndentedBlock",
"Keyword",
"LineEnd",
"LineStart",
"Literal",
"Located",
"PrecededBy",
"MatchFirst",
"NoMatch",
"NotAny",
"OneOrMore",
"OnlyOnce",
"OpAssoc",
"Opt",
"Optional",
"Or",
"ParseBaseException",
"ParseElementEnhance",
"ParseException",
"ParseExpression",
"ParseFatalException",
"ParseResults",
"ParseSyntaxException",
"ParserElement",
"PositionToken",
"QuotedString",
"RecursiveGrammarException",
"Regex",
"SkipTo",
"StringEnd",
"StringStart",
"Suppress",
"Token",
"TokenConverter",
"White",
"Word",
"WordEnd",
"WordStart",
"ZeroOrMore",
"Char",
"alphanums",
"alphas",
"alphas8bit",
"any_close_tag",
"any_open_tag",
"c_style_comment",
"col",
"common_html_entity",
"counted_array",
"cpp_style_comment",
"dbl_quoted_string",
"dbl_slash_comment",
"delimited_list",
"dict_of",
"empty",
"hexnums",
"html_comment",
"identchars",
"identbodychars",
"java_style_comment",
"line",
"line_end",
"line_start",
"lineno",
"make_html_tags",
"make_xml_tags",
"match_only_at_col",
"match_previous_expr",
"match_previous_literal",
"nested_expr",
"null_debug_action",
"nums",
"one_of",
"printables",
"punc8bit",
"python_style_comment",
"quoted_string",
"remove_quotes",
"replace_with",
"replace_html_entity",
"rest_of_line",
"sgl_quoted
|
_string",
"srange",
"string_end",
"string_start",
"trace_parse_action",
"unicode_string",
"with_attribute",
"indentedBlock",
"original_text_for",
|
"ungroup",
"infix_notation",
"locatedExpr",
"with_class",
"CloseMatch",
"token_map",
"pyparsing_common",
"pyparsing_unicode",
"unicode_set",
"condition_as_parse_action",
"pyparsing_test",
# pre-PEP8 compatibility names
|
guaix-ucm/numina
|
numina/util/objimport.py
|
Python
|
gpl-3.0
| 1,031
| 0.00097
|
#
# Copyright 2011-2019 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
"""Import objects by name"""
import importlib
import inspect
|
import warnings
def imp
|
ort_object(path):
"""Import an object given its fully qualified name."""
spl = path.split('.')
if len(spl) == 1:
return importlib.import_module(path)
# avoid last part for the moment
cls = spl[-1]
mods = '.'.join(spl[:-1])
mm = importlib.import_module(mods)
# try to get the last part as an attribute
try:
obj = getattr(mm, cls)
return obj
except AttributeError:
pass
# Try to import the last part
rr = importlib.import_module(path)
return rr
def fully_qualified_name(obj, sep='.'):
warnings.warn(
"use numina.util.fqn.fully_qualified_name instead",
DeprecationWarning, stacklevel=2
)
import numina.util.fqn as fqn
return fqn.fully_qualified_name(obj, sep)
|
jskDr/jamespy_py3
|
kmath.py
|
Python
|
mit
| 3,201
| 0.000312
|
# Python3
import numpy as np
import math
def nCr(n,r):
f = math.factorial
return f(n) // f(r) // f(n-r)
def long_to_int64_array(val, ln):
sz = ln / 64 + 1
ar = np.zeros(sz, dtype=int)
i64 = 2**64 - 1
for ii in range(sz):
ar[ii] = int(val & i64)
val = val >> 64
return ar
def int64_array_ro_long(ar):
val = long(0)
for ii in range(ar.shape[0]):
val = val | ar[-ii - 1]
print(val)
if ii < ar.shape[0] - 1:
val = val << 64
print(val)
return val
def count(a_l, a, inverse=False):
"""
|
It returns the number of elements which are equal to
the target value.
In order to resolve when x is an array with more than
one dimensions, converstion from array to list is used.
"""
if inverse is False:
x = np.where(np.array(a_l) == a)
else:
x = np.where(np.array(a_l) != a)
# return len(x[ ].tolist())
return len(x)
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[int(result.size / 2):]
def autoc
|
orr_kmlee_k(y, k):
ybar = np.mean(y)
N = len(y)
# print( N)
# cross_sum = np.zeros((N-k,1))
cross_sum = np.zeros(N - k)
# print( cross_sum.shape, N, k)
# Numerator, unscaled covariance
# [Matlab] for i = (k+1):N
for i in range(k, N):
# [Matlab] cross_sum(i) = (y(i)-ybar)*(y(i-k)-ybar) ;
# print( cross_sum.shape, i, k, N)
# print( cross_sum[i-k])
# print( (y[i]-ybar)*(y[i-k]-ybar))
# cross_sum[i] = (y[i]-ybar)*(y[i-k]-ybar)
cross_sum[i - k] = (y[i] - ybar) * (y[i - k] - ybar)
# Denominator, unscaled variance
yvar = np.dot(y - ybar, y - ybar)
ta2 = np.sum(cross_sum) / yvar
return ta2
def autocorr_kmlee(y, p=None):
if p is None:
p = len(y)
# The results
# ta = np.zeros((p,1))
ta = np.zeros(p)
# global N
N = len(y)
ybar = np.mean(y)
# Generate ACFs at each lag i
for i in range(p):
ta[i] = autocorr_kmlee_k(y, i)
return ta
def autocorrelate(x, method='numpy'):
"""
Multiple approaches are considered.
# kmlee method
function ta2 = acf_k(y,k)
% ACF_K - Autocorrelation at Lag k
% acf(y,k)
%
% Inputs:
% y - series to compute acf for
% k - which lag to compute acf
%
global ybar
global N
cross_sum = zeros(N-k,1) ;
% Numerator, unscaled covariance
for i = (k+1):N
cross_sum(i) = (y(i)-ybar)*(y(i-k)-ybar) ;
end
% Denominator, unscaled variance
yvar = (y-ybar)'*(y-ybar) ;
ta2 = sum(cross_sum) / yvar ;
"""
if method == 'numpy':
return autocorr(x)
elif method == 'zeropadding':
return np.correlate(x, x, mode='full')
elif method == 'kmlee':
return autocorr_kmlee(x)
def autocorrelate_m(X_org, method):
"""
autocorrelate_m(X_org, method)
Inputs
======
method, string
'numpy', 'zeropadding', 'kmlee'
"""
X_l = []
for i in range(X_org.shape[0]):
x_org = X_org[i, :]
x_ac = autocorrelate(x_org, method=method)
X_l.append(x_ac)
X = np.array(X_l)
return X
|
eapearson/eapearson_TestRichReports
|
test/eapearson_TestRichReports_server_test.py
|
Python
|
mit
| 5,023
| 0.004181
|
# -*- coding: utf-8 -*-
import unittest
import os # noqa: F401
import json # noqa: F401
import time
import requests
from os import environ
try:
from ConfigParser import ConfigParser # py2
except:
from configparser import ConfigParser # py3
from pprint import pprint # noqa: F401
from biokbase.workspace.client import Workspace as workspaceService
from eapearson_TestRichReports.eapearson_TestRichReportsImpl import eapearson_TestRichReports
from eapearson_TestRichReports.eapearson_TestRichReportsServer import MethodContext
class eapearson_TestRichReportsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = environ.get('KB_AUTH_TOKEN', None)
user_id = requests.post(
'https://kbase.us/services/authorization/Sessions
|
/Login',
data='token={}&fields=user_id'.format(token)).json()['user_id']
# WARNING: don't call any lo
|
gging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'eapearson_TestRichReports',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('eapearson_TestRichReports'):
cls.cfg[nameval[0]] = nameval[1]
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=token)
cls.serviceImpl = eapearson_TestRichReports(cls.cfg)
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_eapearson_TestRichReports_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
# NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
def test_filter_contigs_ok(self):
obj_name = "contigset.1"
contig1 = {'id': '1', 'length': 10, 'md5': 'md5', 'sequence': 'agcttttcat'}
contig2 = {'id': '2', 'length': 5, 'md5': 'md5', 'sequence': 'agctt'}
contig3 = {'id': '3', 'length': 12, 'md5': 'md5', 'sequence': 'agcttttcatgg'}
obj1 = {'contigs': [contig1, contig2, contig3], 'id': 'id', 'md5': 'md5', 'name': 'name',
'source': 'source', 'source_id': 'source_id', 'type': 'type'}
self.getWsClient().save_objects({'workspace': self.getWsName(), 'objects':
[{'type': 'KBaseGenomes.ContigSet', 'name': obj_name, 'data': obj1}]})
ret = self.getImpl().filter_contigs(self.getContext(), {'workspace': self.getWsName(),
'contigset_id': obj_name, 'min_length': '10'})
obj2 = self.getWsClient().get_objects([{'ref': self.getWsName()+'/'+obj_name}])[0]['data']
self.assertEqual(len(obj2['contigs']), 2)
self.assertTrue(len(obj2['contigs'][0]['sequence']) >= 10)
self.assertTrue(len(obj2['contigs'][1]['sequence']) >= 10)
self.assertEqual(ret[0]['n_initial_contigs'], 3)
self.assertEqual(ret[0]['n_contigs_removed'], 1)
self.assertEqual(ret[0]['n_contigs_remaining'], 2)
def test_filter_contigs_err1(self):
with self.assertRaises(ValueError) as context:
self.getImpl().filter_contigs(self.getContext(), {'workspace': self.getWsName(),
'contigset_id': 'fake', 'min_length': 10})
self.assertTrue('Error loading original ContigSet object' in str(context.exception))
def test_filter_contigs_err2(self):
with self.assertRaises(ValueError) as context:
self.getImpl().filter_contigs(self.getContext(), {'workspace': self.getWsName(),
'contigset_id': 'fake', 'min_length': '-10'})
self.assertTrue('min_length parameter shouldn\'t be negative' in str(context.exception))
def test_filter_contigs_err3(self):
with self.assertRaises(ValueError) as context:
self.getImpl().filter_contigs(self.getContext(), {'workspace': self.getWsName(),
'contigset_id': 'fake', 'min_length': 'ten'})
self.assertTrue('Cannot parse integer from min_length parameter' in str(context.exception))
|
anntzer/scipy
|
scipy/special/utils/convert.py
|
Python
|
bsd-3-clause
| 3,448
| 0.00058
|
# This script is used to parse BOOST special function test data into something
# we can easily import in numpy.
import re
import os
# Where to put the data (directory will be created)
DATA_DIR = 'scipy/special/tests/data/boost'
# Where to pull out boost data
BOOST_SRC = "boostmath/test"
CXX_COMMENT = re.compile(r'^\s+//')
DATA_REGEX = re.compile(r'^\s*/*\{*\s*SC_')
ITEM_REGEX = re.com
|
pile(r'[+-]?\d*\.?\d+(?:[eE][+-]?\d+)?')
HEADER_REGEX = re.compile(
r'const boost::array\<boost::array\<.*, (\d+)\>, (\d+)\> ([a-zA-Z_\d]+)')
IGNORE_PATTERNS = [
|
# Makes use of ldexp and casts
"hypergeometric_1F1_big_double_limited.ipp",
"hypergeometric_1F1_big_unsolved.ipp",
# Makes use of numeric_limits and ternary operator
"beta_small_data.ipp",
# Doesn't contain any data
"almost_equal.ipp",
# Derivatives functions don't exist
"bessel_y01_prime_data.ipp",
"bessel_yn_prime_data.ipp",
"sph_bessel_prime_data.ipp",
"sph_neumann_prime_data.ipp",
# Data files not needed by scipy special tests.
"ibeta_derivative_",
r"ellint_d2?_",
"jacobi_",
"heuman_lambda_",
"hypergeometric_",
"nct_",
r".*gammap1m1_",
"trig_",
"powm1_data.ipp",
]
def _raw_data(line):
items = line.split(',')
l = []
for item in items:
m = ITEM_REGEX.search(item)
if m:
q = m.group(0)
l.append(q)
return l
def parse_ipp_file(filename):
print(filename)
with open(filename, 'r') as a:
lines = a.readlines()
data = {}
i = 0
while (i < len(lines)):
line = lines[i]
m = HEADER_REGEX.search(line)
if m:
d = int(m.group(1))
n = int(m.group(2))
print(f"d = {d}, n = {n}")
cdata = []
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
while DATA_REGEX.match(line):
cdata.append(_raw_data(line))
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
if not len(cdata) == n:
raise ValueError(f"parsed data: {len(cdata)}, expected {n}")
data[m.group(3)] = cdata
else:
i += 1
return data
def dump_dataset(filename, data):
fid = open(filename, 'w')
try:
for line in data:
fid.write("%s\n" % " ".join(line))
finally:
fid.close()
def dump_datasets(filename):
base, ext = os.path.splitext(os.path.basename(filename))
base += '_%s' % ext[1:]
datadir = os.path.join(DATA_DIR, base)
os.makedirs(datadir)
datasets = parse_ipp_file(filename)
for k, d in datasets.items():
print(k, len(d))
dfilename = os.path.join(datadir, k) + '.txt'
dump_dataset(dfilename, d)
if __name__ == '__main__':
for filename in sorted(os.listdir(BOOST_SRC)):
# Note: Misses data in hpp files (e.x. powm1_sqrtp1m1_test.hpp)
if filename.endswith(".ipp"):
if any(re.match(pattern, filename) for pattern in IGNORE_PATTERNS):
continue
path = os.path.join(BOOST_SRC, filename)
print(f"================= {path} ===============")
dump_datasets(path)
|
RobMcZag/python-algorithms3
|
graph/tests/bfs_test.py
|
Python
|
apache-2.0
| 2,306
| 0.000434
|
import unittest
import graph
class BreadthFirstSearchTest(unittest.TestCase):
__runSlowTests = False
def testTinyGraph(self):
g = graph.Graph.from_file('tinyG.txt')
bfs = graph.BreadthFirstSearch(g, 0)
self.assertEqual(
|
7, bf
|
s.count())
self.assertFalse(bfs.connected(7))
self.assertIsNone(bfs.path_to(7))
self.assertFalse(bfs.connected(8))
self.assertIsNone(bfs.path_to(8))
self.assertFalse(bfs.connected(9))
self.assertIsNone(bfs.path_to(9))
self.assertFalse(bfs.connected(12))
self.assertIsNone(bfs.path_to(12))
self.assertEqual([2, 0], bfs.path_to(2))
self.assertEqual(1, bfs.distance(2))
self.assertEqual([3, 5, 0], bfs.path_to(3))
self.assertEqual(2, bfs.distance(3))
self.assertEqual([4, 5, 0], bfs.path_to(4))
self.assertEqual(2, bfs.distance(4))
self.assertEqual([5, 0], bfs.path_to(5))
self.assertEqual(1, bfs.distance(5))
def testMedGraph(self):
g = graph.Graph.from_file('mediumG.txt')
bfs = graph.BreadthFirstSearch(g, 0)
self.assertEqual(250, bfs.count())
self.assertTrue(bfs.connected(123))
self.assertEqual(9, bfs.distance(123))
self.assertEqual([123, 246, 244, 207, 122, 92, 171, 165, 68, 0], bfs.path_to(123))
def testTinyDG(self):
g = graph.Graph.from_file('tinyDG.txt', directed=True)
bfs = graph.BreadthFirstSearch(g, 0)
self.assertEqual(6, bfs.count())
self.assertTrue(bfs.connected(4))
self.assertIsNotNone(bfs.path_to(4))
self.assertFalse(bfs.connected(7))
self.assertIsNone(bfs.path_to(7))
self.assertEqual([2, 4, 5, 0], bfs.path_to(2))
self.assertEqual(3, bfs.distance(2))
def testTinyDAG(self):
g = graph.Graph.from_file('tinyDAG.txt', directed=True)
bfs = graph.BreadthFirstSearch(g, 0)
self.assertEqual(9, bfs.count())
self.assertTrue(bfs.connected(4))
self.assertIsNotNone(bfs.path_to(4))
self.assertFalse(bfs.connected(7))
self.assertIsNone(bfs.path_to(7))
self.assertEqual([12, 9, 6, 0], bfs.path_to(12))
self.assertEqual(3, bfs.distance(12))
if __name__ == '__main__':
unittest.main()
|
danielfrg/datasciencebox
|
datasciencebox/cli/install.py
|
Python
|
apache-2.0
| 6,874
| 0.004946
|
from __future__ import absolute_import
import click
from datasciencebox.cli.main import cli, default_options
@cli.group(short_help='Install packages, applications and more')
@click.pass_context
def install(ctx):
pass
@install.command('miniconda', short_help='Install miniconda in the instances')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@click.option('--target', '-t', required=False, help='Wildcard matching salt minions')
@default_options
@click.pass_context
def install_miniconda(ctx, ssh, target):
project = ctx.obj['project']
out = project.salt('state.sls', args=['miniconda'], target=target, ssh=ssh)
click.echo(out)
if not ssh:
out = project.salt('saltutil.sync_all', target=target)
click.echo(out)
@install.command('salt', short_help='Install salt master and minion(s) via salt-ssh')
@default_options
@click.pass_context
def install_salt(ctx):
project = ctx.obj['project']
click.echo('Installing salt (master mode)')
out = project.salt('state.sls', args=['salt.cluster'], target='*', ssh=True)
click.echo(out)
click.echo('Syncing formulas')
from datasciencebox.cli.base import sync
ctx.invoke(sync)
@install.command('pkg', short_help='Install a package using system package manager')
@click.argument('pkg', required=True)
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@click.option('--target', '-t', required=False, help='Wildcard matching salt minions')
@default_options
@click.pass_context
def install_pkg(ctx, pkg, ssh, target):
project = ctx.obj['project']
args = [pkg]
out = project.salt('pkg.install', args=args, target=target, ssh=ssh)
click.echo(out)
@install.command('conda', short_help='Install conda package')
@click.argument('pkg', required=True)
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@click.option('--target', '-t', required=False, help='Wildcard matching salt minions')
@default_options
@click.pass_context
def install_conda(ctx, pkg, ssh, target):
project = ctx.obj['project']
out = project.salt('conda.install',
args=[pkg],
kwargs={'user': project.settings['USERNAME']},
target=target,
ssh=ssh)
click.echo(out)
@install.command('cloudera-manager', short_help='Install Cloudera Manager in the cluster')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_hdfs(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/1: Cloudera Manager')
out = project.salt('state.sls', args=['cdh5.manager.cluster'], target='*', ssh=ssh)
click.echo(out)
@install.command('notebook', short_help='Install Jupyter notebook in the head node')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_notebook(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/2: Conda (head only)')
out = project.salt('state.sls', args=['miniconda'], target='head', ssh=ssh)
click.echo(out)
if not ssh:
out = project.salt('saltutil.sync_all', target='head')
click.echo(out)
click.echo('Step 2/2: Jupyter Notebook')
out = project.salt('state.sls', args=['ipython.notebook'], target='head', ssh=ssh)
click.echo(out)
@install.command('hdfs', short_help='Install hdfs in the cluster')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_hdfs(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/1: HDFS')
out = project.salt('state.sls', args=['cdh5.hdfs.cluster'], target='*', ssh=ssh)
click.echo(out)
@install.command('mesos', short_help='Install mesos in the cluster')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_mesos(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/2: Zookeeper')
out = project.salt('state.sls', args=['cdh5.zookeeper.cluster'], target='head', ssh=ssh)
click.echo(out)
click.echo('Step 2/2: Mesos')
out = project.salt('state.sls', args=['mesos.cluster'], target='*', ssh=ssh)
click.echo(out)
@install.command('marathon', short_help='Install mesos in the cluster')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_marathon(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/3: Zookeeper')
out = project.salt('state.sls', args=['cdh5.zookeeper.cluster'], target='head', ssh=ssh)
click.echo(out)
click.echo('Step 2/3: Mesos')
out = project.salt('state.sls', args=['mesos.cluster'], target='*', ssh=ssh)
click.echo(out)
click.echo('Step 3/3: Marathon')
out = project.salt('state.sls', args=['mesos.marathon'], target='head', ssh=ssh)
click.echo(out)
@install.command('spark', short_help='Install spark (on Mesos)')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_spark(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/4: Zookeeper')
out = project.salt('state.sls', args=['cdh5.zookeeper.cluster'], target='head', ssh=ssh)
click.echo(out)
click.echo('Step 2/4: HDFS')
out = project.salt('state.sls', args=['cdh5.hdfs.cluster'], target='*', ssh=ssh)
click.echo(out)
click.echo('S
|
tep 3/4: Mesos')
out = p
|
roject.salt('state.sls', args=['mesos.cluster'], target='*', ssh=ssh)
click.echo(out)
click.echo('Step 4/4: Spark on Mesos')
out = project.salt('state.sls', args=['mesos.spark'], target='head', ssh=ssh)
click.echo(out)
@install.command('impala', short_help='Install Impala')
@click.option('--ssh', is_flag=True, required=False, show_default=True, help='Whether to use ssh')
@default_options
@click.pass_context
def install_impala(ctx, ssh):
project = ctx.obj['project']
click.echo('Step 1/4: Zookeeper')
out = project.salt('state.sls', args=['cdh5.zookeeper.cluster'], target='head', ssh=ssh)
click.echo(out)
click.echo('Step 2/4: HDFS')
out = project.salt('state.sls', args=['cdh5.hdfs.cluster'], target='*', ssh=ssh)
click.echo(out)
click.echo('Step 3/4: Hive Metastore')
out = project.salt('state.sls', args=['cdh5.hive.metastore'], target='head', ssh=ssh)
click.echo(out)
click.echo('Step 4/4: Impala')
out = project.salt('state.sls', args=['cdh5.impala.cluster'], target='*', ssh=ssh)
click.echo(out)
|
zhangwenyu/packages
|
volt/volt/openstack/common/sslutils.py
|
Python
|
apache-2.0
| 2,842
| 0
|
# Copyright 2013 IBM Corp.
#
#
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in complian
|
ce with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from oslo.config import cfg
from volt.openstack.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients."),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely."),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely."),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23,
"sslv3": ssl.PROTOCOL_SSLv3
}
try:
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
except AttributeError:
pass
def validate_ssl_version(version):
key = version.lower()
try:
return _SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)
|
pombredanne/core-serializers
|
core_serializers/renderers.py
|
Python
|
bsd-2-clause
| 2,026
| 0
|
from jinja2 import Environment, PackageLoader
import json
env = Environment(loader=PackageLoader('core_serializers', 'templates'))
class FormRenderer:
template_name = 'form.html'
def render_field(self, field_result, **options):
field, value, error = field_result
class_name = field.__class__.__name__
layout = options.get('layout', 'vertical')
context = {}
if class_name == 'BooleanField':
base = 'checkbox.html'
elif class_name == 'IntegerField':
base = 'input.html'
context = {'input_type': 'number'}
elif class_name == 'ChoiceField':
if field.style.get('type') == 'radio':
base = 'select_radio.html'
else:
base = 'select.html'
elif class_name == 'MultipleChoiceField':
if field.style.get('type') == 'checkbox':
base = 'select_checkbox.html'
else:
base = 'select_multiple.html'
else:
# CharField, and anything unknown
if field.style.get('type') == 'textarea' and layout != 'inline':
|
base = 'textarea.html'
else:
base = 'input.html'
context = {'input_type': 'text'}
template_name = 'fields/' + layout + '/' + base
template = env.get_template(template_n
|
ame)
return template.render(field=field, value=value, **context)
def render(self, form, **options):
style = getattr(getattr(form, 'Meta', None), 'style', {})
layout = style.get('layout', 'vertical')
template = env.get_template(self.template_name)
return template.render(form=form, renderer=self, layout=layout)
class JSONRenderer:
indent = None
def __init__(self, indent=None):
self.indent = self.indent if (indent is None) else indent
def render(self, data, **options):
indent = options.get('indent', self.indent)
return json.dumps(data, indent=indent)
|
2mv/raapija
|
last_transactions_parser.py
|
Python
|
isc
| 884
| 0.015837
|
import csv
import tempfile
import os
from transaction import Transaction
class LastTransactionsParser:
LAST_TRANSACTIONS_FILENAME = os.path.join(tempfile.gettempdir(), 'raapija_transactions_last.csv')
@staticmethod
def read():
try:
with open(LastTransactionsParser.LAST_TRANSACTIONS_FILENAME, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
return [Transaction(**transaction_dict) for transaction_dict in reader]
except FileNotFoundError:
return None
@staticmethod
def write(transactions):
with open(LastTransaction
|
sParser.LAST_TRANSACTIONS_FILENAME, 'w', encoding='utf-8') as csvfile:
csv_fieldnames = transactions[0].__dict__.keys()
writer = csv.Dic
|
tWriter(csvfile, csv_fieldnames)
writer.writeheader()
for transaction in transactions:
writer.writerow(transaction.__dict__)
|
mprinc/McMap
|
src/scripts/CSN_Archive2/parse_csvn.py
|
Python
|
mit
| 7,899
| 0.019243
|
#!/usr/bin/env python
# Copyright (c) 2015, Scott D. Peckham
#------------------------------------------------------
# S.D. Peckham
# July 9, 2015
#
# Tool to break CSDMS Standard Variable Names into
# all of their component parts, then save results in
# various formats. (e.g. Turtle TTL format)
#
# Example of use at a Unix prompt:
#
# % ./parse_csvn.py CSN_VarNames_v0.83.txt
#------------------------------------------------------
#
# Functions:
# parse_names()
#
#------------------------------------------------------
import os.path
import sys
#------------------------------------------------------
def parse_names( in_file='CSN_VarNames_v0.83.txt' ):
#--------------------------------------------------
# Open input file that contains copied names table
#--------------------------------------------------
try:
in_unit = open( in_file, 'r' )
except:
print 'SORRY: Could not open TXT file named:'
print ' ' + in_file
#-------------------------
# Open new CSV text file
#-------------------------
## pos = in_file.rfind('.')
## prefix = in_file[0:pos]
## out_file = prefix + '.ttl'
out_file = 'CSN_VarNames_v0.83.ttl'
#-------------------------------------------
OUT_EXISTS = os.path.exists( out_file )
if (OUT_EXISTS):
print 'SORRY, A TTL file with the name'
print ' ' + out_file
print ' already exists.'
return
out_unit = open( out_file, 'w' )
#------------------------
# Write TTL file header
#------------------------
out_unit.write( '@prefix dc: <http://purl.org/dc/elements/1.1/> .' + '\n' )
out_unit.write( '@prefix ns: <http://example.org/ns#> .' + '\n' )
out_unit.write( '@prefix vcard: <http://www.w3.org/2001/vcard-rdf/3.0#> .' + '\n')
out_unit.write( '@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .' + '\n' )
out_unit.write( '@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .' + '\n' )
out_unit.write( '@prefix owl: <http://www.w3.org/2002/07/owl#> .' + '\n' )
out_unit.write( '@prefix csn: <http://ecgs.ncsa.illinois.edu/2015/csn#> .' + '\n' )
out_unit.write( '\n' ) # (blank line)
root_quan_list = list() # (list to save all root quantities)
#---------------------------
# Parse all variable names
#---------------------------
indent = ' ' # (four spaces)
n_names = 0
while (True):
#------------------------------
# Read data line from in_file
#------------------------------
line = in_unit.readline()
if (line == ''):
break
#-----------------------------------------
# Write entire variable name to TTL file
#-----------------------------------------
line = line.strip() # (strip leading/trailing white space)
out_unit.write( '<csn:' + line + '>\n' )
#--------------------------------------------------
# Write object and quantity fullnames to TTL file
#--------------------------------------------------
main_parts = line.split('__')
object_part = main_parts[0]
quantity_part = main_parts[1]
out_unit.write( indent + 'a csn:name ;\n' )
out_unit.write( indent + "csn:object_fullname '" + object_part + "' ;\n" )
out_unit.write( indent + "csn:quantity_fullname '" + quantity_part + "' ;\n" )
#---------------------------------------------
# Write parts of object_fullname to TTL file
#---------------------------------------------
object_list = object_part.split('_')
n_objects = len( object_list )
for k in xrange( n_objects ):
object = object_list[k]
obj_string = " '" + object + "' "
obj_prefix = indent + "csn:object" + str(k+1)
out_unit.write( obj_prefix + obj_string + ";\n")
adj_list = object.split('~')
n_adjectives = len(adj_list) - 1 # (first one in list is the object)
for j in xrange( n_adjectives ):
adj_string = " '" + adj_list[j+1] + "' "
adj_prefix = obj_prefix + "_adjective" + str(j+1)
out_unit.write( adj_prefix + adj_string + ";\n" )
#-------------------------------------
# Write root object name to TTL file
#-------------------------------------
root_object = object_list[-1] # (last object in list)
root_obj_string = " '" + root_object + "' "
root_obj_prefix = indent + "csn:root_object"
out_unit.write( root_obj_prefix + root_obj_string + ";\n" )
#--------------------------------------------------------
# Write all operations in quantity_fullname to TTL file
#--------------------------------------------------------
operation_list = quantity_part.split('_of_')
n_operations = len(operation_list) - 1 # (last one in list is the quantity)
for k in xrange( n_operations ):
operation = operation_list[k]
op_string = " '" + operation + "' "
op_prefix = indent + "csn:operation" + str(k+1)
out_unit.write( op_prefix + op_string + ";\n" )
#----------------------------------
# Write quantity name to TTL file
#----------------------------------
quantity = operation_list[-1]
quan_string = " '" + quantity + "' "
quan_prefix = indent + "csn:quantity"
out_unit.write( quan_prefix + quan_string + ";\n" )
#---------------------------------------
# Write root quantity name to TTL file
#---------------------------------------
quantity_parts = quantity.spli
|
t('_')
root_quantity = quantity_parts[-1]
ro
|
ot_quan_string = " '" + root_quantity + "' "
root_quan_prefix = indent + "csn:root_quantity"
out_unit.write( root_quan_prefix + root_quan_string + ".\n" ) # (Notice "." vs. ";" here.)
out_unit.write( '\n' ) # (blank line)
root_quan_list.append( root_quantity ) # (save in root_quan_list)
n_names += 1
#----------------------
# Close the input file
#----------------------
in_unit.close()
#----------------------------
# Close the TXT output file
#----------------------------
out_unit.close()
print 'Finished writing CSN var names as TTL.'
print 'Number of names =', n_names, '.'
print ' '
#-----------------------------------------
# Write unique root quantities to a file
#-----------------------------------------
uniq_root_quan_list = sorted( set(root_quan_list) )
n_uniq_root_quans = len( uniq_root_quan_list )
root_quan_unit = open( 'Root_Quantities.txt', 'w' )
for k in xrange( n_uniq_root_quans ):
root_quantity = uniq_root_quan_list[k]
root_quan_unit.write( root_quantity + '\n' )
root_quan_unit.close()
print 'Number of root quantities =', n_uniq_root_quans, '.'
print ' '
# parse_names()
#------------------------------------------------------
if (__name__ == "__main__"):
#-----------------------------------------------------
# Note: First arg in sys.argv is the command itself.
#-----------------------------------------------------
n_args = len(sys.argv)
if (n_args < 2):
print 'ERROR: This tool requires an input'
print ' text file argument.'
print 'sys.argv =', sys.argv
print ' '
elif (n_args == 2):
parse_names( sys.argv[1] )
else:
print 'ERROR: Invalid number of arguments.'
#-----------------------------------------------------------------------
|
teoreteetik/api-snippets
|
client/response-twiml/response-twiml.5.x.py
|
Python
|
mit
| 383
| 0
|
from flask import Flask, Response
import twilio.twiml
app = Flask(__name__)
@app.route("/voice", methods=['POST'])
def get_voice_twiml():
"""Respond to incoming calls with a simple text mes
|
sage."""
resp =
|
twilio.twiml.Response()
resp.say("Thanks for calling!")
return Response(str(resp), mimetype='text/xml')
if __name__ == "__main__":
app.run(debug=True)
|
VinnieJohns/ggrc-core
|
src/ggrc_workflows/migrations/versions/20140715214934_26d9c9c91542_add_cycletaskgroupobject_object.py
|
Python
|
apache-2.0
| 1,141
| 0.006135
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Add CycleTaskGroupObject.object
Revision ID: 26d9c9c91542
Revises: 19a67dc67c3
Create Date: 2014-07-15 21:49:34.073412
"""
# revision identifiers, used by Alembic.
revision = '26d9c9c91542'
down_revision = '19a67dc67c3'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('cycle_task_group_objects', sa.Column('object_id', sa.Integer(), nullable=False))
op.add_column('cycle_task_group_objects', sa.Column('object_type', sa.String(length=250), nullable=False))
op.execute('''
UPDATE cycle_task_group_objects
JOIN task_group_objects
ON cycle_task_group_objects.task_group_object_id = task_group_objects.id
SET
cycle_ta
|
sk_group_objects.object_id = task_group_objects.object_id,
cycle_task_group_objects.object_type = task_group_objects.object_type;
''')
def downgrade():
op.drop_column('cycle_task_group_objects', 'object_type')
|
op.drop_column('cycle_task_group_objects', 'object_id')
|
linkinwong/word2vec
|
src/crf-paper-script/preprocessor6_ssr_rep_increase_5_scale.py
|
Python
|
apache-2.0
| 8,443
| 0.008923
|
# coding: utf-8
__author__ = 'linlin'
import os
import logging
import re
import pdb
logger = logging.getLogger(__name__)
################################################################
root_dir = '/home/linlin/time/0903_classify_false_start/1003_raw_features/'
separator = '\t\t'
################################################################
def MakeNewFolderVersionHigher(data_directory, dir_name):
## 在选定的文件夹里生成更高版本号的文件夹 data_directory - can be relative directory
## dir_name - the new folder name you want to creat
abs_data_directory = os.path.abspath(os.path.dirname(data_directory))
version_number = 1
dirs = os.listdir(abs_data_directory)
for dir in dirs:
if dir_name in dir:
version_str = re.findall(r'Dir_\d+',dir)
number_str =''.join((version_str[-1])[4:])
if True == number_str.isdigit():
number= int (number_str)
if number>version_number:
version_number = number
new_folder_name = dir_name + "_%d" %(version_number+1)
folderFullPath = os.path.join(abs_data_directory,new_folder_name )
os.makedirs(folderFullPath)
return folderFullPath
#########################################################
output_root_dir = MakeNewFolderVersionHigher(root_dir, 'processDir' )
data_dir = root_dir + 'data1'
code_dir = root_dir + 'src/'
##############################################################
def DirProcessing(source_path, dest_path):
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
abs_file_path = os.path.join(root, filespath)
logger.debug("Visited one file!")
Standardize(abs_file_path, dest_path, ' ')
def DirProcessingForSSR(source_path, dest_path):
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
abs_file_path = os.path.join(root, filespath)
logger.debug("Visited one file!")
GetSsrFeature(abs_file_path, dest_path, '\t')
def GetAttributes(source_path, dest_path):
################################################################
script_file = code_dir + 'chunker6_only_ssr_repetition.py'
################################################################
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
abs_file_path = os.path.join(root, filespath)
logger.debug("Visited one file!")
crf_path = dest_path + '/' + os.path.basename(abs_file_path) + '.crfsuite'
os.system('cat ' + abs_file_path +' | python ' + script_file + " > " + crf_path )
def RunClassifier(source_path, dest_path):
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
if 'tr.txt' in filespath:
train_path = os.path.join(root, filespath)
elif 'te.txt
|
' in filespath:
test_path = os.path.join(root, filespath)
#pdb.set_trace()
result_path = dest_path + '/' + 'result.txt'
os.system('crfsuite learn -e2 ' + train_path + " " + test_path + " > " + result_path )
def FindNeig
|
hborTokenSubscript(first_token_list, current_pos , up_or_down ):
pos = current_pos
ind = up_or_down
li = first_token_list
if ind == 1:
i = 1
while len(li[pos+i]) < 1:
i += 1
return pos+i
if ind == -1:
i = 1
while len(li[pos-i]) < 1:
i += 1
return pos-i
def Standardize(path, dest_dir, sep):
#####################################
scale_str = ':5'
#####################################
output_path = dest_dir+ '/' + os.path.basename(path) + '.standard'
output_file_obj = open(output_path,'w')
file_obj = open(path)
line_list = file_obj.readlines()
token_list = []
for j in range(len(line_list)):
word_list = line_list[j].split()
if len(word_list) < 2:
token_list.append('')
else:
token_list.append(word_list[0])
repetition_vec_list = []
for i in range(len(line_list)):
if len(token_list[i]) == 0:
repetition_vec_list.append('')
else:
if i < 4 or i > len(line_list)- 5:
repetition_vec_list.append(['diff', 'diff','diff', 'diff'])
else:
previous_subscript = FindNeighborTokenSubscript(token_list, i, -1)
prev_prev_subscript = FindNeighborTokenSubscript(token_list, previous_subscript, -1)
next_subscript = FindNeighborTokenSubscript(token_list, i, 1)
next_next_subscript = FindNeighborTokenSubscript(token_list, next_subscript, 1)
prev_prev_label = 'same'+scale_str if (token_list[i] == token_list[prev_prev_subscript]) else "diff"
prev_label = 'same'+scale_str if (token_list[i] == token_list[previous_subscript]) else "diff"
next_label = 'same'+scale_str if (token_list[i] == token_list[next_subscript]) else "diff"
next_next_subscript = 'same'+scale_str if (token_list[i] == token_list[next_next_subscript]) else "diff"
repetition_vec_list.append([prev_prev_label, prev_label, next_label, next_next_subscript])
for k in range(len(line_list)):
line = line_list[k]
if len(line)<13:
label = ''
else:
word_list = line.split()
if 'filler' in word_list[4]:
label = 'filler'
elif 'repeat' in word_list[4] or 'nsert' in word_list[4]:
label = 'repeat'
elif 'restart' in word_list[4] or 'extraneou' in word_list[4]:
label = 'false_start'
elif 'elete' in word_list[4]:
label = 'other'
else:
label = 'OK'
if '-' in word_list[0]:
patial = 'patial'+scale_str
else:
patial = 'nonpatial'
label = label
token = word_list[0]
pos = word_list[1]
word = word_list[2]
sem = word_list[3]
patial = patial
#pdb.set_trace()
pp = repetition_vec_list[k][0]
p = repetition_vec_list[k][1]
n = repetition_vec_list[k][2]
nn = repetition_vec_list[k][3]
#pdb.set_trace()
if len(line)<13:
line_format = ''
else:
line_format = (
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
%(label, sep, token,sep,pos, sep,word,sep,sem, sep, patial, sep,
pp, sep, p, sep, n,sep, nn))
output_file_obj.write(line_format)
output_file_obj.write('\n')
output_file_obj.close()
file_obj.close()
def GetSsrFeature(path, dest_dir, sep):
output_path = dest_dir+ '/' + os.path.basename(path) + '.noSpace'
output_file_obj = open(output_path,'w')
file_obj = open(path)
for line in file_obj:
if len(line)<3:
newLine = ''
else:
word_list = line[54:].split()
newLine = '_'.join(word_list)
token = line[:15].strip()
pos = line[15:25].strip()
word = line[25:40].strip()
sem = line[40:54].strip()
label = newLine
if len(line)<3:
line_format = ''
else:
line_format = "%s%s%s%s%s%s%s%s%s%s" %(token,sep,pos,sep,word,sep,sem, sep, label, sep)
output_file_obj.write(line_format)
output_file_obj.write('\n')
output_file_obj.close()
file_obj.close()
if __name__ == '__main__':
logFile = output_root_dir + "/logFile.txt"
logging.basicConfig(filename=logFile, level = logging.DEBUG)
os.makedirs(output_root_dir + "/standardStep1")
dest_dir = output_root_dir + "/standardStep1"
DirProcessing(data_dir, dest_dir)
# os.makedirs(output_root_dir + "/standardStep2") #
# dest_dir = output_root_dir + "/standardStep2"
# DirProcessing(data_dir, dest_dir) #
os.makedirs(output_root_dir + "/attributesStep3")
attr_dir = output_r
|
orangeduck/PyMark
|
tests/test3.py
|
Python
|
bsd-2-clause
| 247
| 0
|
import pymark
pets_mod = pymark.unpack_file("pets_two.pmk")
print "TypeID: %i" %
|
pets_mod["pets"]["catherine"]["typ
|
e"]
print "Name: %s" % pets_mod["pets"]["catherine"]["name"]
print "Color: (%i, %i, %i)" % pets_mod["pets"]["catherine"]["color"]
|
MungoRae/home-assistant
|
homeassistant/components/sensor/mvglive.py
|
Python
|
apache-2.0
| 6,071
| 0
|
"""
Support for real-time departure information for public transport in Munich.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.mvglive/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, ATTR_ATTRIBUTION, STATE_UNKNOWN
)
REQUIREMENTS = ['PyMVGLive==1.1.4']
_LOGGER = logging.getLogger(__name__)
CONF_NEXT_DEPARTURE = 'nextdeparture'
CONF_STATION = 'station'
CONF_DESTINATIONS = 'destinations'
CONF_DIRECTIONS = 'directions'
CONF_LINES = 'lines'
CONF_PRODUCTS = 'products'
CONF_TIMEOFFSET = 'timeoffset'
DEFAULT_PRODUCT = ['U-Bahn', 'Tram', 'Bus', 'S-Bahn']
ICONS = {
'U-Bahn': 'mdi:subway',
'Tram': 'mdi:tram',
'Bus': 'mdi:bus',
'S-Bahn': 'mdi:train',
'SEV': 'mdi:checkbox-blank-circle-outline',
'-': 'mdi:clock'
}
ATTRIBUTION = "Data provided by MVG-live.de"
SCAN_INTERVAL = timedelta(seconds=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NEXT_DEPARTURE): [{
vol.Required(CONF_STATION): cv.stri
|
ng,
vol.Optional(CONF_DESTINATIONS, default=['']): cv.ensure_list_csv,
vol.Optional(CONF_DIRECTIONS, default=['']): cv.ensure_list_csv,
vol.Optional(CONF_LINES, default=['']): cv.ensure_list_csv,
vol.Optional(CONF_PRODUCTS, default=DEFAULT_PRODUCT):
cv.ensure_list_csv,
vol.Optional(CONF_TIM
|
EOFFSET, default=0): cv.positive_int,
vol.Optional(CONF_NAME): cv.string}]
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the MVGLive sensor."""
sensors = []
for nextdeparture in config.get(CONF_NEXT_DEPARTURE):
sensors.append(
MVGLiveSensor(
nextdeparture.get(CONF_STATION),
nextdeparture.get(CONF_DESTINATIONS),
nextdeparture.get(CONF_DIRECTIONS),
nextdeparture.get(CONF_LINES),
nextdeparture.get(CONF_PRODUCTS),
nextdeparture.get(CONF_TIMEOFFSET),
nextdeparture.get(CONF_NAME)))
add_devices(sensors, True)
# pylint: disable=too-few-public-methods
class MVGLiveSensor(Entity):
"""Implementation of an MVG Live sensor."""
def __init__(self, station, destinations, directions,
lines, products, timeoffset, name):
"""Initialize the sensor."""
self._station = station
self._name = name
self.data = MVGLiveData(station, destinations, directions,
lines, products, timeoffset)
self._state = STATE_UNKNOWN
self._icon = ICONS['-']
@property
def name(self):
"""Return the name of the sensor."""
if self._name:
return self._name
return self._station
@property
def state(self):
"""Return the next departure time."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
return self.data.departures
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return "min"
def update(self):
"""Get the latest data and update the state."""
self.data.update()
if not self.data.departures:
self._state = '-'
self._icon = ICONS['-']
else:
self._state = self.data.departures.get('time', '-')
self._icon = ICONS[self.data.departures.get('product', '-')]
class MVGLiveData(object):
"""Pull data from the mvg-live.de web page."""
def __init__(self, station, destinations, directions,
lines, products, timeoffset):
"""Initialize the sensor."""
import MVGLive
self._station = station
self._destinations = destinations
self._directions = directions
self._lines = lines
self._products = products
self._timeoffset = timeoffset
self._include_ubahn = True if 'U-Bahn' in self._products else False
self._include_tram = True if 'Tram' in self._products else False
self._include_bus = True if 'Bus' in self._products else False
self._include_sbahn = True if 'S-Bahn' in self._products else False
self.mvg = MVGLive.MVGLive()
self.departures = {}
def update(self):
"""Update the connection data."""
try:
_departures = self.mvg.getlivedata(
station=self._station, ubahn=self._include_ubahn,
tram=self._include_tram, bus=self._include_bus,
sbahn=self._include_sbahn)
except ValueError:
self.departures = {}
_LOGGER.warning("Returned data not understood")
return
for _departure in _departures:
# find the first departure meeting the criteria
if ('' not in self._destinations[:1] and
_departure['destination'] not in self._destinations):
continue
elif ('' not in self._directions[:1] and
_departure['direction'] not in self._directions):
continue
elif ('' not in self._lines[:1] and
_departure['linename'] not in self._lines):
continue
elif _departure['time'] < self._timeoffset:
continue
# now select the relevant data
_nextdep = {ATTR_ATTRIBUTION: ATTRIBUTION}
for k in ['destination', 'linename', 'time', 'direction',
'product']:
_nextdep[k] = _departure.get(k, '')
_nextdep['time'] = int(_nextdep['time'])
self.departures = _nextdep
break
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_language_tools/inspect_getsource_method.py
|
Python
|
apache-2.0
| 76
| 0
|
import inspect
import examp
|
le
print(inspect.getsource(example.A
|
.get_name))
|
rsimba/cloudelements
|
tests/__init__.py
|
Python
|
mit
| 197
| 0.010152
|
'''
cloudelements: tests module.
Meant for use
|
with py.test.
Organize tests into files, each named xxx_test.py
Read more here: http://
|
pytest.org/
Copyright 2015, LeadGenius
Licensed under MIT
'''
|
IfcOpenShell/IfcOpenShell
|
src/ifcopenshell-python/ifcopenshell/api/profile/add_parameterized_profile.py
|
Python
|
lgpl-3.0
| 1,101
| 0
|
# IfcOpenShell - IFC toolkit and geometry engine
# Copyright (C) 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of IfcOpenShell.
#
# IfcOpenShell is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IfcOpe
|
nShell is distributed in the hope
|
that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with IfcOpenShell. If not, see <http://www.gnu.org/licenses/>.
class Usecase:
def __init__(self, file, **settings):
self.file = file
self.settings = {"ifc_class": None}
for key, value in settings.items():
self.settings[key] = value
def execute(self):
return self.file.create_entity(self.settings["ifc_class"])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.