repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
deepmind/acme | acme/utils/loggers/base.py | Python | apache-2.0 | 1,965 | 0.006616 | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base logger."""
import abc
from typing import Any, Mapping
import numpy as np
import tree
LoggingData = Mapping[str, Any]
class Logger(abc.ABC):
"""A logger has a `write` method."""
@abc.abstractmethod
def write(self, data: LoggingData) -> None:
"""Writes `data` to destination (file, terminal, database, etc)."""
@abc.abstractmethod
def close(self) -> None:
"""Closes the logger, not expecting any further write."""
class NoOpLogger(Logger):
"""Simple Logger which does nothing and outputs no logs.
This should be used sparingly, but it can prove useful if we want to quiet an
individual component and have it produce no logging whatsoever.
"""
def write(self, data: LoggingData):
pass
def c | lose(self):
pass
def tensor_to_numpy(value: Any):
if hasattr(value, 'numpy'):
return value.numpy() # tf.Tensor (TF2).
if hasattr(value, 'device_buffer'):
return np.asarray(value) # jnp.De | viceArray.
return value
def to_numpy(values: Any):
"""Converts tensors in a nested structure to numpy.
Converts tensors from TensorFlow to Numpy if needed without importing TF
dependency.
Args:
values: nested structure with numpy and / or TF tensors.
Returns:
Same nested structure as values, but with numpy tensors.
"""
return tree.map_structure(tensor_to_numpy, values)
|
StackOps/python-automationclient | automationclient/tests/test_shell.py | Python | apache-2.0 | 2,750 | 0.000727 | # Copyright 2011 OpenStack LLC.
# Copyright 2012-2013 STACKOPS TECHNOLOGIES S.L.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import fixtures
from six import moves
from testtools import matchers
from automationclient import exceptions
import automationclient.shell
from automationclient.tests import utils
class ShellTest(utils.TestCase):
FAKE_ENV = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_ | AUTH_URL': 'http://no.where',
}
# Patch os.environ to avoid required auth info.
def setUp(self):
super(ShellTest, self).setUp()
for var in self.FAKE_ENV:
self.useFixture(fixtures.EnvironmentVariable(var,
| self.FAKE_ENV[var]))
def shell(self, argstr):
orig = sys.stdout
try:
sys.stdout = moves.StringIO()
_shell = automationclient.shell.StackopsAutomationShell()
_shell.main(argstr.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(exc_value.code, 0)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = orig
return out
def test_help_unknown_command(self):
self.assertRaises(exceptions.CommandError, self.shell, 'help foofoo')
def test_help(self):
required = [
'.*?^usage: ',
'.*?(?m)^\s+architecture-create\s+Add a new architecture.',
'.*?(?m)^See "automation help COMMAND" for help on a '
'specific command',
]
help_text = self.shell('help')
for r in required:
self.assertThat(help_text,
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
def test_help_on_subcommand(self):
required = [
'.*?^usage: automation device-list',
'.*?(?m)^List all the devices.',
]
help_text = self.shell('help device-list')
for r in required:
self.assertThat(help_text,
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
|
Garrett-R/scikit-learn | sklearn/tests/test_grid_search.py | Python | bsd-3-clause | 26,766 | 0.000224 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import distributions
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def test_parameter_grid():
"""Test basic properties of ParameterGrid."""
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
# Special case: empty grid (useful to get default estimator settings)
empty | = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
has_empty = ParameterGrid([{'C': [1, 10]}, {}])
assert_equal(len(has_empty), 3)
assert_equal(list(has_empty), [ | {'C': 1}, {'C': 10}, {}])
def test_grid_search():
"""Test that the best estimator contains the right value for foo_param"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
"""Test search over a "grid" with only one point.
Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]})
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
"""Test that grid search can be used for model selection only"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_ |
rpedigoni/fbads | tests/test_creative.py | Python | apache-2.0 | 1,345 | 0.002974 | # coding: utf-8
from fbads import FBAds
from tests import BaseTestCase
class FBAdsCreativeTestCase(BaseTestCase):
def test_add_creative(self):
with self.replay():
fbads = FBAds(account_id='1378857852381224', access_token='a_valid_token')
creative_id = fbads.creative.add(
title=u'Test creative',
body=u'Testing creative creation! Lorem ipsum here.',
link_url='http://fbads.readthedocs.org/en/latest/index.html',
image_url='https://d1dhh18vvfes41.cloudfront.net/417x300/051057500.jpg',
)
self.assertEqual(creative_id, '6015403647082')
def test_list_creatives(self):
with self.replay():
fbads = FBAds(account_id='1378857852381224', access_token='a_valid_token')
creatives = fbads.creative.list(fields=['title'], limit=10)
self.assertEqual(le | n(creatives), 1)
creative = creatives.pop()
self.assertEqual(creati | ve.id, '6014479876682')
self.assertEqual(creative.title, 'Luizalabs.com')
def test_delete_creative(self):
with self.replay():
fbads = FBAds(account_id='1378857852381224', access_token='a_valid_token')
deleted = fbads.creative.delete('6015403647082')
self.assertTrue(deleted)
|
s-pearce/glider-utilities | glider_utils/parsers/dbd_parsers.py | Python | gpl-2.0 | 6,823 | 0.001905 | #!/usr/bin/env python
"""
@package glider_utils
@file glider_utils.py
@author Stuart Pearce & Chris Wingard
@brief Module containing glider utiliities
"""
__author__ = 'Stuart Pearce & Chris Wingard'
__license__ = 'Apache 2.0'
import numpy as np
import warnings
#import pdb
import re
#import pygsw.vectors as gsw
class DbaDataParser(object):
"""
A class that parses a glider data file and holds it in dictionaries.
GliderParsedData parses a Slocum Electric Glider data file that has
been converted to ASCII from binary, and holds the self describing
header data in a header dictionary and the data in a data dictionary
using the column labels as the dictionary keys.
Construct an instance of GliderParsedData using the filename of the
ASCII file containing the glider data.
E.g.:
glider_data = GliderParsedData('glider_data_file.mbd')
glider_data.hdr_dict holds the header dictionary with the self
describing ASCII tags from the file as keys.
data_dict holds a data dictionary with the variable names (column
labels) as keys.
A sub-dictionary holds the name of the variable (same as the key),
the data units, the number of binary bytes used to store each
variable type, the name of the variable, and the data using the
keys:
'Name'
'Units'
'Number_of_Bytes'
'Data'
For example, to retrieve the data for 'variable_name':
vn_data = glider_data.data_dict['variable_name]['Data']
"""
def __init__(self, filename):
self._fid = open(filename, 'r')
self.hdr_dict = {}
self.data_dict = {}
self._read_header()
self._read_data()
self._fid.close()
def _read_header(self):
"""
Read in the self describing header lines of an ASCII glider data
| file.
"""
# There are usually 14 header lines, start with 14,
# and check the 'num_ascii_tags' line.
num_hdr_lines = 14
header_pattern = r'( | .*): (.*)$'
header_re = re.compile(header_pattern)
#pdb.set_trace()
hdr_line = 1
while hdr_line <= num_hdr_lines:
line = self._fid.readline()
match = header_re.match(line)
if match:
key = match.group(1)
value = match.group(2)
value = value.strip()
if 'num_ascii_tags' in key:
num_hdr_lines = int(value)
self.hdr_dict[key] = value
hdr_line += 1
def _read_data(self):
"""
Read in the column labels, data type, number of bytes of each
data type, and the data from an ASCII glider data file.
"""
column_labels = self._fid.readline().split()
column_type = self._fid.readline().split()
column_num_bytes = self._fid.readline().split()
# read each row of data & use np.array's ability to grab a
# column of an array
data = []
#pdb.set_trace()
for line in self._fid.readlines():
data.append(line.split())
data_array = np.array(data, dtype=np.float) # NOTE: this is an array of strings
# warn if # of described data rows != to amount read in.
num_columns = int(self.hdr_dict['sensors_per_cycle'])
if num_columns != data_array.shape[1]:
warnings.warn('Glider data file does not have the same' +
'number of columns as described in header.\n' +
'described %d, actual %d' % (num_columns,
data_array.shape[1])
)
# extract data to dictionary
for ii in range(num_columns):
units = column_type[ii]
data_col = data_array[:, ii]
self.data_dict[column_labels[ii]] = {
'Name': column_labels[ii],
'Units': units,
'Number_of_Bytes': int(column_num_bytes[ii]),
'Data': data_col
}
# change ISO lat or lon format to decimal degrees
if units == 'lat' or units == 'lon':
min_d100, deg = np.modf(data_col/100.)
deg_col = deg + (min_d100*100.)/60.
self.data_dict[column_labels[ii]]['Data_deg'] = deg_col
self.data_keys = column_labels
class DataVizDataParser(DbaDataParser):
"""
A class that parses a glider data file and holds it in dictionaries.
GliderParsedData parses a Slocum Electric Glider data file that has
been converted to ASCII from binary, and holds the self describing
header data in a header dictionary and the data in a data dictionary
using the column labels as the dictionary keys.
Construct an instance of GliderParsedData using the filename of the
ASCII file containing the glider data.
E.g.:
glider_data = GliderParsedData('glider_data_file.mbd')
glider_data.hdr_dict holds the header dictionary with the self
describing ASCII tags from the file as keys.
data_dict holds a data dictionary with the variable names (column
labels) as keys.
A sub-dictionary holds the name of the variable (same as the key),
the data units, the number of binary bytes used to store each
variable type, the name of the variable, and the data using the
keys:
'Name'
'Units'
'Number_of_Bytes'
'Data'
For example, to retrieve the data for 'variable_name':
vn_data = glider_data.data_dict['variable_name]['Data']
"""
def _read_header(self):
pass
def _read_data(self):
"""
Read in the column labels, data type/units, and the data from an Data Visualizer data file.
"""
filename_hdr = self._fid.readline()
column_labels = self._fid.readline().split()
column_type = self._fid.readline().split()
#column_num_bytes = self._fid.readline().split()
# read each row of data & use np.array's ability to grab a
# column of an array
data = []
for line in self._fid.readlines():
data.append(line.split())
data_array = np.array(data) # NOTE: can't make floats because of lat & lon
num_columns = len(column_labels)
# extract data to dictionary
for ii in range(num_columns):
self.data_dict[column_labels[ii]] = {
'Name': column_labels[ii],
'Units': column_type[ii],
#'Number_of_Bytes': int(column_num_bytes[ii]),
'Data': data_array[:, ii]
}
self.data_keys = column_labels
class GliderData(dict):
""" An object specifically to store Slocum glider data.
"""
def __init__():
dict.__init__ |
plotly/plotly.py | packages/python/plotly/plotly/validators/choroplethmapbox/legendgrouptitle/_text.py | Python | mit | 452 | 0 | i | mport _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="text",
parent_name="choroplethmapbox.legendgrouptitle",
**kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs |
)
|
anchore/anchore-engine | tests/integration/services/policy_engine/test_cve_updates.py | Python | apache-2.0 | 7,228 | 0.000415 | """
This is an integration-level test for checking CVE updates at fine granularity.
"""
import sqlalchemy.exc
from anchore_engine.db import (
FixedArtifact,
Image,
ImagePackage,
Vulnerability,
VulnerableArtifact,
)
from anchore_engine.db import get_thread_scoped_session as get_session
from anchore_engine.services.policy_engine.engine.feeds import feeds
from anchore_engine.subsys import logger
logger.enable_test_logging(level="DEBUG")
def test_cve_updates(test_data_env):
test_env = test_data_env
test_env.init_feeds()
test_user_id = "test1"
test_img_id = "img1"
test_image = Image(
user_id=test_user_id, id=test_img_id, distro_name="centos", distro_version="7"
)
test_image.familytree_json = [test_img_id]
test_image.layers_json = [test_img_id]
test_image.layer_info_json = ["somelayer_here"]
test_image.like_distro = "centos"
test_image.state = "analyzed"
test_image.digest = "digest1"
test_image.anchore_type = "undefined"
test_image.dockerfile_mode = "Guessed"
test_image.docker_history_json = ["line1", "line2"]
test_image.docker_data_json = {"Config": {}, "ContainerConfig": {}}
test_image.dockerfile_contents = "FROM BLAH"
test_package = ImagePackage(
image_user_id=test_user_id,
image_id=test_img_id,
name="testpackage",
version="1.0",
pkg_type="RPM",
)
test_package.src_pkg = "testpackage"
test_package.distro_name = "centos"
test_package.distro_version = "7"
test_package.like_distro = "centos"
test_package.license = "apache2"
test_package.fullversion = "1.0"
test_package.normalized_src_pkg = "1.0"
test_package.release = ""
test_package.size = 1000
test_package.origin = "upstream"
test_package.arch = "x86_64"
test_package.image = test_image
test_cve = Vulnerability(id="CVE123", namespace_name="centos:7")
test_cve.severity = "High"
test_cve.description = "some test cve"
test_cve.cvss2_score = "1.0"
test_cve.metadata_json = {}
test_cve.cvss2_vectors = ""
test_cve.link = "http://mitre.com/cve123"
test_fixedin = FixedArtifact(vulnerability_id=test_cve.id)
test_fixedin.name = "testpackage"
test_fixedin.version = "1.1"
test_fixedin.version_format = "rpm"
test_fixedin.epochless_version = "1.1"
test_fixedin.include_later_versions = True
test_fixedin.parent = test_cve
test_cve.fixed_in = [test_fixedin]
test_vulnin = VulnerableArtifact(vulnerability_ | id=test_cve.id)
test_vulnin.name = "testpackage"
test_vulnin.version = "0.9"
test_vulnin.epochless_version = "0.9"
test_vulnin.namespace_name = "centos:7"
test_vulnin.version_format = "rpm | "
test_vulnin.include_previous_versions = False
test_vulnin.parent = test_cve
test_cve.vulnerable_in = [test_vulnin]
db = get_session()
try:
db.add(test_image)
db.add(test_package)
db.commit()
except sqlalchemy.exc.IntegrityError:
db.rollback()
except Exception:
logger.exception("Unexpected failure")
raise
db = get_session()
try:
db.add(test_cve)
feeds.process_updated_vulnerability(db, test_cve)
db.commit()
except sqlalchemy.exc.IntegrityError:
logger.exception("Failed!")
db.rollback()
finally:
db = get_session()
i = db.query(Image).get((test_img_id, test_user_id))
print(("Vulns: {}".format(i.vulnerabilities())))
db.commit()
test_cve2 = Vulnerability(id="CVE123", namespace_name="centos:7")
test_cve2.severity = "Medium"
test_cve2.description = "some test cve"
test_cve2.cvss2_score = "1.0"
test_cve2.metadata_json = {}
test_cve2.cvss2_vectors = ""
test_cve2.link = "http://mitre.com/cve123"
fix2 = FixedArtifact(name="pkg2", version="1.2", epochless_version="1.2")
fix2.namespace_name = "centos:7"
fix2.vulnerability_id = test_cve2.id
test_cve2.fixed_in = [fix2]
db = get_session()
try:
t2 = db.merge(test_cve2)
db.add(t2)
feeds.process_updated_vulnerability(db, t2)
db.commit()
except sqlalchemy.exc.IntegrityError:
logger.exception("Failed!")
db.rollback()
finally:
db = get_session()
i = db.query(Image).get((test_img_id, test_user_id))
print(("Vulns: {}".format(i.vulnerabilities())))
db.commit()
def test_github_advisory_fixed_in(test_data_env):
test_env = test_data_env
test_env.init_feeds()
test_user_id = "test1"
test_img_id = "img1"
test_image = Image(
user_id=test_user_id, id=test_img_id, distro_name="centos", distro_version="7"
)
test_image.familytree_json = [test_img_id]
test_image.layers_json = [test_img_id]
test_image.layer_info_json = ["somelayer_here"]
test_image.like_distro = "centos"
test_image.state = "analyzed"
test_image.digest = "digest1"
test_image.anchore_type = "undefined"
test_image.dockerfile_mode = "Guessed"
test_image.docker_history_json = ["line1", "line2"]
test_image.docker_data_json = {"Config": {}, "ContainerConfig": {}}
test_image.dockerfile_contents = "FROM BLAH"
test_package = ImagePackage(
image_user_id=test_user_id,
image_id=test_img_id,
name="testpackage",
version="1.0",
pkg_type="python",
)
test_package.src_pkg = "testpackage"
test_package.distro_name = "centos"
test_package.distro_version = "7"
test_package.like_distro = "centos"
test_package.license = "apache2"
test_package.fullversion = "1.0"
test_package.normalized_src_pkg = "1.0"
test_package.release = ""
test_package.size = 1000
test_package.origin = "upstream"
test_package.arch = "x86_64"
test_package.image = test_image
test_cve = Vulnerability(id="GHSA-rpch-cqj9-h65r", namespace_name="github:python")
test_cve.severity = "High"
test_cve.description = "some advisory ghsa"
test_cve.link = "http://mitre.com/cve123"
test_fixedin = FixedArtifact(vulnerability_id=test_cve.id)
test_fixedin.name = "testpackage"
test_fixedin.version = "None"
test_fixedin.fix_metadata = {"first_patched_version": "1.2"}
test_fixedin.version_format = "semver"
test_fixedin.parent = test_cve
test_cve.fixed_in = [test_fixedin]
db = get_session()
try:
db.add(test_image)
db.add(test_package)
db.commit()
except sqlalchemy.exc.IntegrityError:
db.rollback()
except Exception:
logger.exception("Unexpected failure")
raise
db = get_session()
# XXX This needs to be a fixture
try:
db.add(test_cve)
feeds.process_updated_vulnerability(db, test_cve)
db.commit()
except sqlalchemy.exc.IntegrityError:
logger.exception("Failed!")
db.rollback()
db = get_session()
image_vuln = db.query(Image).get((test_img_id, test_user_id))
# should be one vulnerability
vulnerabilities = image_vuln.vulnerabilities()
assert len(vulnerabilities) == 1
img_pkg_vuln = vulnerabilities[0]
assert img_pkg_vuln.fixed_in() == "1.2"
|
spr/OggifyOSX | mutagen/tests/test_mp4.py | Python | gpl-2.0 | 19,301 | 0.001917 | import os
import shutil
import struct
from cStringIO import StringIO
from tempfile import mkstemp
from tests import TestCase, add
from mutagen.mp4 import MP4, Atom, Atoms, MP4Tags, MP4Info, \
delete, MP4Cover, MP4MetadataError
from mutagen._util import cdata
try: from os.path import devnull
except ImportError: devnull = "/dev/null"
class TAtom(TestCase):
uses_mmap = False
def test_no_children(self):
fileobj = StringIO("\x00\x00\x00\x08atom")
atom = Atom(fileobj)
self.failUnlessRaises(KeyError, atom.__getitem__, "test")
def test_length_1(self):
fileobj = StringIO("\x00\x00\x00\x01atom"
"\x00\x00\x00\x00\x00\x00\x00\x08" + "\x00" * 8)
self.failUnlessEqual(Atom(fileobj).length, 8)
def test_render_too_big(self):
class TooBig(str):
def __len__(self):
return 1L << 32
data = TooBig("test")
try: len(data)
except OverflowError:
# Py_ssize_t is still only 32 bits on this system.
self.failUnlessRaises(OverflowError, Atom.render, "data", data)
else:
data = Atom.render("data", data)
self.failUnlessEqual(len(data), 4 + 4 + 8 + 4)
def test_length_0(self):
fileobj = StringIO("\x00\x00\x00\x00atom")
Atom(fileobj)
self.failUnlessEqual(fileobj.tell(), 8)
add(TAtom)
class TAtoms(TestCase):
uses_mmap = False
filename = os.path.join("tests", "data", "has-tags.m4a")
def setUp(self):
self.atoms = Atoms(file(self.filename, "rb"))
def test___contains__(self):
self.failUnless(self.atoms["moov"])
self.failUnless(self.atoms["moov.udta"])
self.failUnlessRaises(KeyError, self.atoms.__getitem__, "whee")
def test_name(self):
self.failUnlessEqual(self.atoms.atoms[0].name, "ftyp")
def test_children(self):
self.failUnless(self.atoms.atoms[2].children)
def test_no_children(self):
self.failUnless(self.atoms.atoms[0].children is None)
def test_extra_trailing_data(self):
data = StringIO(Atom.render("data", "whee") + "\x00\x00")
self.failUnless(Atoms(data))
def test_repr(self):
repr(self.atoms)
add(TAtoms)
class TMP4Info(TestCase):
uses_mmap = False
def test_no_soun(self):
self.failUnlessRaises(
IOError, self.test_mdhd_version_1, "vide")
def test_mdhd_version_1(self, soun="soun"):
mdhd = Atom.render("mdhd", ("\x01\x00\x00\x00" + "\x00" * 16 +
"\x00\x00\x00\x02" + # 2 Hz
"\x00\x00\x00\x00\x00\x00\x00\x10"))
hdlr = Atom.render("hdlr", "\x00" * 8 + soun)
mdia = Atom.render("mdia", mdhd + hdlr)
trak = Atom.render("trak", mdia)
moov = Atom.render("moov", trak)
fileobj = StringIO(moov)
atoms = Atoms(fileobj)
info = MP4Info(atoms, fileobj)
self.failUnlessEqual(info.length, 8)
def test_multiple_tracks(self):
hdlr = Atom.render("hdlr", "\x00" * 8 + "whee")
mdia = Atom.render("mdia", hdlr)
trak1 = Atom.render("trak", mdia)
mdhd = Atom.render("mdhd", ("\x01\x00\x00\x00" + "\x00" * 16 +
"\x00\x00\x00\x02" + # 2 Hz
"\x00\x00\x00\x00\x00\x00\x00\x10"))
hdlr = Atom.render("hdlr", "\x00" * 8 + "soun")
mdia = Atom.render("mdia", mdhd + hdlr)
trak2 = Atom.render("trak", mdia)
moov = Atom.render("moov", trak1 + trak2)
fileobj = StringIO(moov)
atoms = Atoms(fileobj)
info = MP4Info(atoms, fileobj)
self.failUnlessEqual(info.length, 8)
add(TMP4Info)
class TMP4Tags(TestCase):
uses_mmap = False
def wrap_ilst(self, data):
ilst = Atom.render("ilst", data)
meta = Atom.render("meta", " | \x00" * 4 + ilst)
data = Atom.render("moov", Atom.render("udta", meta))
fileobj = StringIO(data)
return MP4Tags(Atoms(fileobj), fileobj)
| def test_genre(self):
data = Atom.render("data", "\x00" * 8 + "\x00\x01")
genre = Atom.render("gnre", data)
tags = self.wrap_ilst(genre)
self.failIf("gnre" in tags)
self.failUnlessEqual(tags["\xa9gen"], ["Blues"])
def test_empty_cpil(self):
cpil = Atom.render("cpil", Atom.render("data", "\x00" * 8))
tags = self.wrap_ilst(cpil)
self.failUnless("cpil" in tags)
self.failIf(tags["cpil"])
def test_genre_too_big(self):
data = Atom.render("data", "\x00" * 8 + "\x01\x00")
genre = Atom.render("gnre", data)
tags = self.wrap_ilst(genre)
self.failIf("gnre" in tags)
self.failIf("\xa9gen" in tags)
def test_strips_unknown_types(self):
data = Atom.render("data", "\x00" * 8 + "whee")
foob = Atom.render("foob", data)
tags = self.wrap_ilst(foob)
self.failIf(tags)
def test_bad_covr(self):
data = Atom.render("foob", "\x00\x00\x00\x0E" + "\x00" * 4 + "whee")
covr = Atom.render("covr", data)
self.failUnlessRaises(MP4MetadataError, self.wrap_ilst, covr)
def test_covr_blank_format(self):
data = Atom.render("data", "\x00\x00\x00\x00" + "\x00" * 4 + "whee")
covr = Atom.render("covr", data)
tags = self.wrap_ilst(covr)
self.failUnlessEqual(MP4Cover.FORMAT_JPEG, tags["covr"][0].format)
def test_render_bool(self):
self.failUnlessEqual(MP4Tags()._MP4Tags__render_bool('pgap', True),
"\x00\x00\x00\x19pgap\x00\x00\x00\x11data"
"\x00\x00\x00\x15\x00\x00\x00\x00\x01")
self.failUnlessEqual(MP4Tags()._MP4Tags__render_bool('pgap', False),
"\x00\x00\x00\x19pgap\x00\x00\x00\x11data"
"\x00\x00\x00\x15\x00\x00\x00\x00\x00")
def test_render_text(self):
self.failUnlessEqual(
MP4Tags()._MP4Tags__render_text('purl', ['http://foo/bar.xml'], 0),
"\x00\x00\x00*purl\x00\x00\x00\"data\x00\x00\x00\x00\x00\x00"
"\x00\x00http://foo/bar.xml")
self.failUnlessEqual(
MP4Tags()._MP4Tags__render_text('aART', [u'\u0041lbum Artist']),
"\x00\x00\x00$aART\x00\x00\x00\x1cdata\x00\x00\x00\x01\x00\x00"
"\x00\x00\x41lbum Artist")
self.failUnlessEqual(
MP4Tags()._MP4Tags__render_text('aART', [u'Album Artist', u'Whee']),
"\x00\x00\x008aART\x00\x00\x00\x1cdata\x00\x00\x00\x01\x00\x00"
"\x00\x00Album Artist\x00\x00\x00\x14data\x00\x00\x00\x01\x00"
"\x00\x00\x00Whee")
def test_render_data(self):
self.failUnlessEqual(
MP4Tags()._MP4Tags__render_data('aART', 1, ['whee']),
"\x00\x00\x00\x1caART"
"\x00\x00\x00\x14data\x00\x00\x00\x01\x00\x00\x00\x00whee")
self.failUnlessEqual(
MP4Tags()._MP4Tags__render_data('aART', 2, ['whee', 'wee']),
"\x00\x00\x00/aART"
"\x00\x00\x00\x14data\x00\x00\x00\x02\x00\x00\x00\x00whee"
"\x00\x00\x00\x13data\x00\x00\x00\x02\x00\x00\x00\x00wee")
def test_bad_text_data(self):
data = Atom.render("datA", "\x00\x00\x00\x01\x00\x00\x00\x00whee")
data = Atom.render("aART", data)
self.failUnlessRaises(MP4MetadataError, self.wrap_ilst, data)
def test_render_freeform(self):
self.failUnlessEqual(
MP4Tags()._MP4Tags__render_freeform(
'----:net.sacredchao.Mutagen:test', ['whee', 'wee']),
"\x00\x00\x00a----"
"\x00\x00\x00\"mean\x00\x00\x00\x00net.sacredchao.Mutagen"
"\x00\x00\x00\x10name\x00\x00\x00\x00test"
"\x00\x00\x00\x14data\x00\x00\x00\x01\x00\x00\x00\x00whee"
"\x00\x00\x00\x13data\x00\x00\x00\x01\x00\x00\x00\x00wee")
def test_bad_freeform(self):
mean = Atom.render("mean", "net.sacredchao.Mutagen")
name = Atom.render("name", "empty test key")
bad_freeform = Atom.render("----", "\x00" * 4 + mean |
user01/love-letter | loveletter/env.py | Python | mit | 7,457 | 0.000805 | from operator import itemgetter
import gym
from gym import spaces
from gym.utils import seeding
from .game import Game
from .card import Card
from .player import PlayerAction, PlayerTools
from .agents.random import AgentRandom
class LoveLetterEnv(gym.Env):
"""Love Letter Game Environment
The goal of | hotter colder is to guess closer to a randomly selected number
After each step the agent receives an observation of:
0 - No guess yet submitted (only after reset)
1 - Guess is lower than the target
2 - Guess is equal to the target |
3 - Guess is higher than the target
The rewards is calculated as:
(min(action, self.number) + self.range) / (max(action, self.number) + self.range)
Ideally an agent will be able to recognize the 'scent' of a higher reward and
increase the rate in which is guesses in that direction until the reward reaches
its maximum
"""
def __init__(self, agent_other, seed=451):
self.action_space = spaces.Discrete(15)
self.observation_space = spaces.Box(low=0, high=1, shape=(24,))
self._agent_other = AgentRandom(
seed) if agent_other is None else agent_other
self._seed(seed)
self._reset()
self._game = Game.new(4, self.np_random.random_integers(5000000))
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
assert self.action_space.contains(action)
player_action = self.action_from_index(action)
if player_action is None:
return self._game.state(), -1, False, {"round": self._game.round()}
self._game, reward = LoveLetterEnv.advance_game(
self._game, player_action, self._agent_other)
done = self._game.over() or not PlayerTools.is_playing(
self._game.players()[0])
return self._game.state(), reward, done, {"round": self._game.round()}
def _reset(self):
self._game = Game.new(4, self.np_random.random_integers(5000000))
return self._game.state()
def force(self, game):
"""Force the environment to a certain game state"""
self._game = game
return game.state()
@staticmethod
def advance_game(game, action, agent):
"""Advance a game with an action
* Play an action
* Advance the game using the agent
* Return the game pending for the same player turn _unless_ the game ends
returns <game, reward>
"""
if not game.is_action_valid(action):
return game, -1
player_idx = game.player_turn()
game_current, _ = game.move(action)
while game_current.active():
if not game_current.is_current_player_playing():
game_current = game_current.skip_eliminated_player()
elif game_current.player_turn() != player_idx:
game_current, _ = game_current.move(agent.move(game_current))
else:
break
# print("Round", game.round(), '->', game_current.round(), ':', 'OVER' if game_current.over() else 'RUNN')
if game_current.over():
if game_current.winner() == player_idx:
return game_current, 15
else:
return game_current, -5
return game_current, 0
def action_by_score(self, scores, game=None):
"""
Returns best action based on assigned scores
return (action, score, idx)
"""
if len(scores) != 15:
raise Exception("Invalid scores length: {}".format(len(scores)))
game = self._game if game is None else game
assert game.active()
actions_possible = self.actions_set(game)
actions = [(action, score, idx) for action, score, idx in
zip(actions_possible,
scores,
range(len(actions_possible)))
if game.is_action_valid(action)]
action = max(actions, key=itemgetter(2))
return action
def action_from_index(self, action_index, game=None):
"""Returns valid action based on index and game"""
game = self._game if game is None else game
action_candidates = self.actions_set(game)
actions = [(idx, action) for idx, action in
enumerate(action_candidates)
if game.is_action_valid(action) and idx == action_index]
return actions[0][1] if len(actions) == 1 else None
def actions_possible(self, game=None):
"""Returns valid (idx, actions) based on a current game"""
game = self._game if game is None else game
action_candidates = self.actions_set(game)
actions = [(idx, action) for idx, action in
enumerate(action_candidates)
if game.is_action_valid(action)]
return actions
def actions_set(self, game=None):
"""Returns all actions for a game"""
game = self._game if game is None else game
player_self = game.player_turn()
opponents = game.opponent_turn()
actions_possible = [
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.priest,
Card.noCard),
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.baron,
Card.noCard),
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.handmaid,
Card.noCard),
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.prince,
Card.noCard),
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.king,
Card.noCard),
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.countess,
Card.noCard),
PlayerAction(Card.guard,
self.np_random.choice(opponents),
Card.princess,
Card.noCard),
PlayerAction(Card.priest,
self.np_random.choice(opponents),
Card.noCard,
Card.noCard),
PlayerAction(Card.baron,
self.np_random.choice(opponents),
Card.noCard,
Card.noCard),
PlayerAction(Card.king,
self.np_random.choice(opponents),
Card.noCard,
Card.noCard),
PlayerAction(Card.prince,
self.np_random.choice(opponents),
Card.noCard,
Card.noCard),
PlayerAction(Card.prince, player_self, Card.noCard, Card.noCard),
PlayerAction(Card.handmaid, player_self, Card.noCard, Card.noCard),
PlayerAction(Card.countess, player_self, Card.noCard, Card.noCard),
PlayerAction(Card.princess, player_self, Card.noCard, Card.noCard)
]
return actions_possible
|
UTSA-ICS/python-keystoneclient-SID | keystoneclient/tests/v2_0/test_access.py | Python | apache-2.0 | 5,660 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import testresources
from keystoneclient import access
from keystoneclient.openstack.common import timeutils
from keystoneclient.tests import client_fixtures as token_data
from keystoneclient.tests.v2_0 import client_fixtures
from keystoneclient.tests.v2_0 import utils
class AccessInfoTest(utils.TestCase, testresources.ResourcedTestCase):
resources = [('examples', token_data.EXAMPLES_RESOURCE)]
def test_building_unscoped_accessinfo(self):
token = client_fixtures.unscoped_token()
auth_ref = access.AccessInfo.factory(body=token)
self.assertTrue(auth_ref)
self.assertIn('token', auth_ref)
self.assertEqual(au | th_ref.auth_token,
'3e2813b7ba0b4006840c3825860b86ed')
self.assertEqual(auth_ref.username, 'exampleuser')
self.assertEqual(auth_ref.user_id, 'c4da488862bd435c9e6c0275a0d0e49a')
self.assertEqual(auth_ref.role_names, [])
self.assertIsNone(auth_ref.tenant_name)
self.asse | rtIsNone(auth_ref.tenant_id)
self.assertIsNone(auth_ref.auth_url)
self.assertIsNone(auth_ref.management_url)
self.assertFalse(auth_ref.scoped)
self.assertFalse(auth_ref.domain_scoped)
self.assertFalse(auth_ref.project_scoped)
self.assertFalse(auth_ref.trust_scoped)
self.assertIsNone(auth_ref.project_domain_id)
self.assertIsNone(auth_ref.project_domain_name)
self.assertEqual(auth_ref.user_domain_id, 'default')
self.assertEqual(auth_ref.user_domain_name, 'Default')
self.assertEqual(auth_ref.expires, token.expires)
def test_will_expire_soon(self):
token = client_fixtures.unscoped_token()
expires = timeutils.utcnow() + datetime.timedelta(minutes=5)
token.expires = expires
auth_ref = access.AccessInfo.factory(body=token)
self.assertFalse(auth_ref.will_expire_soon(stale_duration=120))
self.assertTrue(auth_ref.will_expire_soon(stale_duration=300))
self.assertFalse(auth_ref.will_expire_soon())
def test_building_scoped_accessinfo(self):
auth_ref = access.AccessInfo.factory(
body=client_fixtures.project_scoped_token())
self.assertTrue(auth_ref)
self.assertIn('token', auth_ref)
self.assertIn('serviceCatalog', auth_ref)
self.assertTrue(auth_ref['serviceCatalog'])
self.assertEqual(auth_ref.auth_token,
'04c7d5ffaeef485f9dc69c06db285bdb')
self.assertEqual(auth_ref.username, 'exampleuser')
self.assertEqual(auth_ref.user_id, 'c4da488862bd435c9e6c0275a0d0e49a')
self.assertEqual(auth_ref.role_names, ['Member'])
self.assertEqual(auth_ref.tenant_name, 'exampleproject')
self.assertEqual(auth_ref.tenant_id,
'225da22d3ce34b15877ea70b2a575f58')
self.assertEqual(auth_ref.tenant_name, auth_ref.project_name)
self.assertEqual(auth_ref.tenant_id, auth_ref.project_id)
self.assertEqual(auth_ref.auth_url, ('http://public.com:5000/v2.0',))
self.assertEqual(auth_ref.management_url, ('http://admin:35357/v2.0',))
self.assertEqual(auth_ref.project_domain_id, 'default')
self.assertEqual(auth_ref.project_domain_name, 'Default')
self.assertEqual(auth_ref.user_domain_id, 'default')
self.assertEqual(auth_ref.user_domain_name, 'Default')
self.assertTrue(auth_ref.scoped)
self.assertTrue(auth_ref.project_scoped)
self.assertFalse(auth_ref.domain_scoped)
def test_diablo_token(self):
diablo_token = self.examples.TOKEN_RESPONSES[
self.examples.VALID_DIABLO_TOKEN]
auth_ref = access.AccessInfo.factory(body=diablo_token)
self.assertTrue(auth_ref)
self.assertEqual(auth_ref.username, 'user_name1')
self.assertEqual(auth_ref.project_id, 'tenant_id1')
self.assertEqual(auth_ref.project_name, 'tenant_id1')
self.assertEqual(auth_ref.project_domain_id, 'default')
self.assertEqual(auth_ref.project_domain_name, 'Default')
self.assertEqual(auth_ref.user_domain_id, 'default')
self.assertEqual(auth_ref.user_domain_name, 'Default')
self.assertEqual(auth_ref.role_names, ['role1', 'role2'])
self.assertFalse(auth_ref.scoped)
def test_grizzly_token(self):
grizzly_token = self.examples.TOKEN_RESPONSES[
self.examples.SIGNED_TOKEN_SCOPED_KEY]
auth_ref = access.AccessInfo.factory(body=grizzly_token)
self.assertEqual(auth_ref.project_id, 'tenant_id1')
self.assertEqual(auth_ref.project_name, 'tenant_name1')
self.assertEqual(auth_ref.project_domain_id, 'default')
self.assertEqual(auth_ref.project_domain_name, 'Default')
self.assertEqual(auth_ref.user_domain_id, 'default')
self.assertEqual(auth_ref.user_domain_name, 'Default')
self.assertEqual(auth_ref.role_names, ['role1', 'role2'])
def load_tests(loader, tests, pattern):
return testresources.OptimisingTestSuite(tests)
|
chrisswk/reddit_comment_scraper | setup.py | Python | mit | 1,062 | 0.000942 | import os, sys
from setuptools import setup
setup(
name='reddit_comment_scraper',
version='2.0.5',
description='A simple Reddit-scrapin | g script',
url='https: | //github.com/jfarmer/reddit_comment_scraper',
author='Jesse Farmer',
author_email='jesse@20bits.com',
license='MIT',
packages=['reddit_comment_scraper'],
install_requires=[
'unicodecsv==0.9.4',
'praw==2.1.19'
],
entry_points={
'console_scripts': [
'scrape_comments=reddit_comment_scraper:main'
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Environment :: Console',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows :: Windows 7',
'Operating System :: Microsoft :: Windows :: Windows Vista',
],
)
|
pinax/pinax-referrals | pinax/referrals/apps.py | Python | mit | 211 | 0 | from django.apps import AppCo | nfig as BaseAppConfig
from django.utils.translation import ugettext_lazy as _
class A | ppConfig(BaseAppConfig):
name = "pinax.referrals"
verbose_name = _("Pinax Referrals")
|
yeongseon/django_beautifulseodang | home/forms.py | Python | mit | 899 | 0.003654 | from allauth.account.forms import *
class MyLoginForm(LoginForm):
def __init__(self, *args, **kwargs):
super(MyLoginForm, self).__init__(*args, **kwargs)
self.fields['login'].label = "이메일"
se | lf.fields['password'].label = "비밀번호"
self.fields['remember'].label = "로그인 상태 유지"
#self.fields['submit'].label = "로그인 상태 유지"
# You don't want the `remember` field?
# if 'remember' in self.fields.keys():
# del self.fields['remember']
class MySignupForm(SignupForm):
def __init__(self, *args, **kwargs):
super(MySignupForm, self).__init__(*args, **kwargs)
self.fields['username'].label = "사용자이름 | "
self.fields['email'].label = "이메일"
self.fields['password1'].label = "비밀번호"
self.fields['password2'].label = "비밀번호확인"
|
ElectroCode/lurklib | lurklib/connection.py | Python | gpl-3.0 | 13,948 | 0.001506 | # This file is part of Lurklib.
# Copyright (C) 2011 LK-
#
# Lurklib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lurklib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Lurklib. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
class _Connection(object):
def _connect(self, server, port, tls=True, tls_verify=True, proxy=False,
proxy_type='SOCKS5', proxy_server=None,
proxy_port=None, proxy_username=None, proxy_password=None):
"""
Connects the socket to an IRC server.
Required arguments:
* server - Server to connect to.
* port - Port to use.
Optional arguments:
* tls=True - Should we use TLS/SSL?
* tls_verify=True - Verify the TLS certificate?
Only works with Python 3.
* proxy=False - Should we use a proxy?
* proxy_type='SOCKS5' - Proxy type: SOCKS5, SOCKS4 or HTTP
* proxy_server=None - Proxy server's address
* proxy_port=None - Proxy server's port
* proxy_username=None - If SOCKS5 is used,
a proxy username/password can be specified.
* proxy_password=None - If SOCKS5 is used,
a proxy username/password can be specified.
"""
with self.lock:
if proxy:
if proxy_type == 'SOCKS5':
proxy_type = self._m_proxy.PROXY_TYPE_SOCKS5
elif proxy_type == 'SOCKS4':
proxy_type = self._m_proxy.PROXY_TYPE_SOCKS4
elif proxy_type == 'HTTP':
proxy_type = self._m_proxy.PROXY_TYPE_HTTP
self._socket = self._m_proxy.socksocket()
self._socket.setproxy(proxytype=proxy_type, \
addr=proxy_server, \
port=proxy_port, \
username=proxy_username, \
password=proxy_password)
if tls:
if tls_verify:
ca_bundle = self._m_tempfile.NamedTemporaryFile().name
with open(ca_bundle, 'w') as bundle_file:
bundle_file.write(self._ca_bundle)
cert_required = self._m_tls.CERT_REQUIRED
self._socket = \
self._m_tls.wrap_socket(self._socket, \
cert_reqs=cert_required, \
ca_certs=ca_bundle)
self._socket.connect((server, port))
self._m_tls.match_hostname(self._socket.getpeercert(), \
server)
return None
else:
self._socket = self._m_tls.wrap_socket(self._socket)
self._socket.connect((server, port))
def _register(self, nick, user, real_name, password=None):
"""
Register the connection with the IRC server.
Required arguments:
* nick - Nick to use. If a tuple/list is specified -
it will try to use the first,
and if the first is already used -
it will try to use the second and so on.
* user - Username to use.
* real_name - Real name to use.
Optional arguments:
* password=None - IRC server password.
"""
with self.lock:
if password:
self._password(password)
self.nick(nick)
self._user(user, real_name)
def _init(self, server, nick, user, real_name, password, port=None,
tls=True, tls_verify=True,
proxy=False, proxy_type='SOCKS5', proxy_server=None,
proxy_port=None, proxy_username=None, proxy_password=None):
"""
Connect and register with the IRC server and -
set server-related information variables.
Required arguments:
* server - Server to connect to.
* nick - Nick to use.
If a tuple/list is specified it will try to use the first,
and if the first is already used -
it will try to use the second and so on.
* user - Username to use.
* real_name - Real name to use.
* password=None - IRC server password.
Optional arguments:
* port - Port to use.
* tls=True - Should we use TLS/SSL?
* tls_verify=True - Verify the TLS certificate?
Only works with Python 3.
* proxy=False - Should we use a proxy?
* proxy_type='SOCKS5' - Proxy type: SOCKS5, SOCKS4 or HTTP
* proxy_server=None - Proxy server's address
* proxy_port=None - Proxy server's port
* proxy_username=None - If SOCKS5 is used,
a proxy username/password can be specified.
* proxy_password=None - If SOCKS5 is used,
a proxy username/password can be specified.
"""
with self.lock:
self.current_nick = nick
if tls:
if not port:
port = 6697
self._connect(server, port, tls, tls_verify, proxy, \
| proxy_type, proxy_server, proxy_port, \
proxy_username, proxy_password)
else:
if not port:
port = 6667
self._connect(server, port, tls, tls_verify, proxy, \
proxy_type, proxy_server, proxy_port, \
proxy_username, proxy_password)
while self.readable(2):
dat | a = self.recv()
if data[0] == 'NOTICE':
self.server = data[1][0]
self.con_msg.append(data)
self._register(nick, user, real_name, password)
while self.readable(timeout=4):
rdata = self.recv()
if rdata[0] == 'UNKNOWN':
data = rdata[1][3].replace(':', '', 1)
ncode = rdata[1][1]
if ncode == '004':
info = data.split()
self.server = info[0]
self.ircd = info[1]
self.umodes = info[2]
self.cmodes = info[3]
elif ncode == '005':
version = rdata[1][3].replace(':are supported' + \
'by this server', '')
version = version.split()
for info in version:
try:
info = info.split('=')
name = info[0]
value = info[1]
self.version[name] = value
if name == 'CHARSET':
self.encoding = value
except IndexError:
self.version[info[0]] = True
elif ncode == '376':
self.con_msg.append(rdata)
break
elif ncode == '422':
self.con_msg.append(rdata)
break
else:
if rdata[0] == 'NOTICE':
self.server = rdata[1][0]
self.con_msg.append(rdata[1])
self.motd = tuple(self.motd)
self.con_msg = tuple(self.con_msg)
self.connected = True
self.keep_go |
zqqf16/Peanut | peanut/template.py | Python | mit | 1,770 | 0.000565 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Template"""
from os import path
import jinja2
from jinja2 import FileSystemLoader, ChoiceLoader
from jinja2.exceptions import TemplateNotFound
import peanut
from peanut.utils import get_resource
class SmartLoader(FileSystemLoader):
"""A smart template loader"""
available_extension = ['.html', '.xml']
def get_source(self, environment, template):
if template is None:
raise TemplateNotFound(template)
if '.' in template:
return super(SmartLoader, self).get_source(environment, template)
for extension | in SmartLoader.available_extension:
try:
filename = template + extension
return super(SmartLoader, self).get_source(environment, filename)
except TemplateNotFound:
pass
| raise TemplateNotFound(template)
class Template(object):
"""Template"""
def __init__(self, path, filters=None, **kwargs):
loader = ChoiceLoader([
SmartLoader(path),
SmartLoader(get_resource('themes/default')),
])
self.env = jinja2.Environment(
loader=loader,
lstrip_blocks=True,
trim_blocks=True,
)
# Update filters
if isinstance(filters, dict):
self.env.filters.update(filters)
# Update global namesapce
self.env.globals.update(kwargs)
def update_context(self, **kwargs):
"""Update global context
"""
self.env.globals.update(kwargs)
def render(self, name, **context):
"""Render template with name and context
"""
template = self.env.get_template(name)
return template.render(**context)
|
deshi-basara/coovie2 | coovie2/scan.py | Python | mit | 1,799 | 0.000556 | import os
import re
class Scan(object):
def __init__(self, endings, size_limit):
self.movie_endings = endings
self.movie_size_limit = size_limit
def is_movie(self, file_name):
'''
Checks weather a handed file is a movie-file by validating its ending.
'''
if file_name.endswith(self.movie_endings):
return True
else:
return False
def is_large(self, file_path):
file_stat = os.stat(file_path)
file_s | ize = file_stat.st_size
# convert predefined-settings to bytes and compare
"""compare_str = '{size} >= {limit}'.format(
size=file_size,
limit=movie_size_limit
)
print(compare_str)
"""
if file_size >= self.movie_size_limit:
return True
else:
return False
def extract_file_data(self, file, folder):
| '''
Extract a movie name and year from folder or file
'''
# shall we use folder-name or file-name (-4 characters for file-
# extension)
base_name = folder
if len(base_name) < (len(file) - 4):
base_name = file
# find first digit and use it for extracting movie name
first_digit = -1
first_regex = (re.search('\d{4}', base_name))
if first_regex:
first_digit = first_regex.start()
# extract name and year
if first_digit != -1:
extracted_name = base_name[:first_digit-1] # remove dot
extracted_year = base_name[first_digit:first_digit+4]
else:
extracted_name = base_name
extracted_year = -1
name = extracted_name.replace('.', ' ')
year = extracted_year
return [name, year]
|
genonfire/portality | giza/forms.py | Python | mit | 412 | 0 | # -*- coding: utf-8 -*-
from | django import forms
from giza.models import Giza
class GizaEditForm(f | orms.ModelForm):
"""Giza edit form"""
class Meta:
"""Meta for GizaEditForm"""
model = Giza
exclude = ('user',)
def __init__(self, *args, **kwargs):
"""Init"""
self.user = kwargs.pop('user', None)
super(GizaEditForm, self).__init__(*args, **kwargs)
|
LynxyssCZ/Flexget | flexget/plugins/output/utorrent.py | Python | mit | 6,154 | 0.00195 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import os
from logging import getLogger
from flexget import plugin
from flexget.event import event
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.template import RenderError
log = getLogger('utorrent')
class PluginUtorrent(object):
"""
Parse task content or url for hoster links and adds them to utorrent.
Example::
utorrent:
url: http://localhost:8080/gui/
username: my_username
password: my_password
path: Series
"""
__author__ = 'Nil'
__version__ = '0.1'
schema = {
'type': 'object',
'properties': {
'url': {'type': 'string', 'format': 'url'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'path': {'type': 'string'}
},
'required': ['username', 'password', 'url'],
'additionalProperties': False
}
@plugin.priority(120)
def on_task_download(self, task, config):
"""
Call download plugin to generate the temp files we will load
into deluge then verify they are valid torrents
"""
# If the download plugin is not enabled, we need to call it to get
# our temp .torrent files
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
for _ in task.accepted:
download.instance.get_temp_files(task, handle_magnets=True, fail_html=True)
@plugin.priority(135)
# @plugin.internet(log)
def on_task_output(self, task, config):
if not config.get('enabled', True):
return
if not task.accepted:
return
# don't add when learning
if task.options.learn:
return
session = requests.Session()
url = config['url']
if not url.endswith('/'):
url += '/'
auth = (config['username'], config['password'])
# Login
try:
response = session.get(url + 'token.html', auth=auth)
except requests.RequestException as e:
if hasattr(e, 'response') and e.response.status_code == '401':
raise plugin.PluginError('Invalid credentials, check your utorrent webui username and password.', log)
raise plugin.PluginError('%s' % e, log)
token = get_soup(response.text).find('div', id='token').text
result = session.get(url, auth=auth, params={'action': 'list-dirs', 'token': token} | ).json()
download_dirs = dict((os.path.normcase(dir['path']), i) for i, dir in en | umerate(result['download-dirs']))
for entry in task.accepted:
# bunch of urls now going to check
folder = 0
path = entry.get('path', config.get('path', ''))
try:
path = os.path.expanduser(entry.render(path))
except RenderError as e:
log.error('Could not render path for `%s` downloading to default directory.' % entry['title'])
# Add to default folder
path = ''
if path:
path_normcase = os.path.normcase(path)
for dir in download_dirs:
if path_normcase.startswith(dir.lower()):
folder = download_dirs[dir]
path = path[len(dir):].lstrip('\\')
break
else:
log.error('path `%s` (or one of its parents)is not added to utorrent webui allowed download '
'directories. You must add it there before you can use it from flexget. '
'Adding to default download directory instead.' % path)
path = ''
if task.options.test:
log.info('Would add `%s` to utorrent' % entry['title'])
continue
# Get downloaded
downloaded = not entry['url'].startswith('magnet:')
# Check that file is downloaded
if downloaded and 'file' not in entry:
entry.fail('file missing?')
continue
# Verify the temp file exists
if downloaded and not os.path.exists(entry['file']):
tmp_path = os.path.join(task.manager.config_base, 'temp')
log.debug('entry: %s' % entry)
log.debug('temp: %s' % ', '.join(os.listdir(tmp_path)))
entry.fail("Downloaded temp file '%s' doesn't exist!?" % entry['file'])
continue
# Add torrent
if downloaded:
# HTTP://[IP]:[PORT]/GUI/?ACTION=ADD-FILE
files = {'torrent_file': open(entry['file'], 'rb')}
data = {'action': 'add-file', 'token': token, 'download_dir': folder, 'path': path}
result = session.post(url, params=data, auth=auth, files=files)
else:
# http://[IP]:[PORT]/gui/?action=add-url&s=[TORRENT URL]
data = {'action': 'add-url', 's': entry['url'], 'token': token, 'download_dir': folder, 'path': path}
result = session.get(url, params=data, auth=auth)
# Check result
if 'build' in result.json():
log.info('Added `%s` to utorrent' % entry['url'])
log.info('in folder %s ' % folder + path)
else:
entry.fail('Fail to add `%s` to utorrent' % entry['url'])
def on_task_learn(self, task, config):
""" Make sure all temp files are cleaned up when entries are learned """
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.cleanup_temp_files(task)
on_task_abort = on_task_learn
@event('plugin.register')
def register_plugin():
plugin.register(PluginUtorrent, 'utorrent', api_ver=2)
|
pgermain/pbda | pbda_classify.py | Python | bsd-2-clause | 2,793 | 0.006087 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
PAC-BAYESIAN DOMAIN ADAPTATION (aka PBDA)
Executable script to use the classifier (to be used after the learning process).
@author: Pascal Germain -- http://researchers.lille.inria.fr/pgermain/
'''
import common
from pbda import *
from dataset import *
from kernel import *
import sys
import pickle
import argparse
common.print_header('CLASSIFICATION')
# Arguments parser
parser = argparse.ArgumentParser(description="", formatter_class=common.custom_formatter, epilog="")
parser.add_argument("--format", "-f", dest="format", choices=['matrix', 'svmlight'], default='matrix', help='Datasets format. Default: matrix (each line defines an example, the first column defines the label in {-1, 1}, and the next columns represent the real-valued features)')
parser.add_argument("--model", "-m", dest="model_file", default='model.bin', help="Model file name. Default: model.bin")
parser.add_argument("--pred", "-p", dest="prediction_file", default='predictions.out', help="Save predictions into files. Default: predictions.out")
parser.add_argument("test_file", help="Defines the file containing the dataset to classify.")
args = parser.parse_args()
# Main program
###############################################################################
print('... Loading model file ...')
###############################################################################
try:
with open(args.model_file, 'rb') as model:
classifier = pickle.load(model)
except:
print('ERROR: Unable to load model file "' + args.model_file + '".')
sys.exit(-1)
print('File "' + args.model_file + '" loaded.')
###############################################################################
print('\n... Loading dataset file ...')
###############################################################################
try:
if args.format == 'matrix':
test_data = dataset_from_matrix_file(args.test_file)
elif args.format == 'svmlight':
test_data = dataset_from_svmlight_file(args.test_file, classifier.X1_shape[1])
except:
print('ERROR: Unable to load test file "' + args.test_file + '".')
sys.exit(-1)
print(str(test_data.get_nb_examples()) + ' test examples loaded.')
###############################################################################
print('\n... Prediction ...')
######################################################################### | ######
predictions = classifier.predict(test_data.X)
try:
predictions.tofile(args.prediction_file, '\n')
print('File "' + args.prediction_file + '" created.')
except:
print('ERROR | : Unable to write prediction file "' + args.prediction_file + '".')
risk = classifier.calc_risk(test_data.Y, predictions=predictions)
print('Test risk = ' + str(risk))
|
thomas-schmid-ubnt/avocado | avocado/core/dispatcher.py | Python | gpl-2.0 | 8,715 | 0.000688 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2015
# Author: Cleber Rosa <cleber@redhat.com>
"""Extensions/plugins dispatchers."""
import copy
import logging
import sys
from stevedore import EnabledExtensionManager
from .settings import settings
from ..utils import stacktrace
class Dispatcher(EnabledExtensionManager):
"""
Base dispatcher for various extension types
"""
#: Default namespace prefix for Avocado extensions
NAMESPACE_PREFIX = 'avocado.plugins.'
def __init__(self, namespace, invoke_kwds={}):
self.load_failures = []
super(Dispatcher, self).__init__(namespace=namespace,
check_func=self.enabled,
invoke_on_load=True,
invoke_kwds=invoke_kwds,
on_load_failure_callback=self.store_load_failure,
propagate_map_exceptions=True)
def plugin_type(self):
"""
Subset of entry points namespace for this dispatcher
Given an entry point `avocado.plugins.foo`, plugin type is `foo`. If
entry point does not conform to the Avocado standard prefix, it's
returned unchanged.
"""
if self.namespace.startswith(self.NAMESPACE_PREFIX):
return self.namespace[len(self.NAMESPACE_PREFIX):]
else:
return self.namespace
def fully_qualified_name(self, extension):
"""
Returns the Avocado fully qualified plugin name
:param extension: an Stevedore Extension instance
:type extension: :class:`stevedore | .extension.Extension`
"""
return "%s.%s" % (self.plugin_type(), extension.entry_point.name)
def settings_section(self):
"""
Returns the config section name for the plugin type handled by itself
"""
return "plugins.%s" % self.plugin_type()
def enabled(self, extension):
disabled = settings.get_value('plugins', 'disable', key_type=list)
return self.fully_qualified_name(extension) not in disabled
def n | ames(self):
"""
Returns the names of the discovered extensions
This differs from :func:`stevedore.extension.ExtensionManager.names`
in that it returns names in a predictable order, by using standard
:func:`sorted`.
"""
return sorted(super(Dispatcher, self).names())
def _init_plugins(self, extensions):
super(Dispatcher, self)._init_plugins(extensions)
self.extensions.sort(key=lambda x: x.name)
configured_order = settings.get_value(self.settings_section(), "order",
key_type=list, default=[])
ordered = []
for name in configured_order:
for ext in self.extensions:
if name == ext.name:
ordered.append(ext)
for ext in self.extensions:
if ext not in ordered:
ordered.append(ext)
self.extensions = ordered
@staticmethod
def store_load_failure(manager, entrypoint, exception):
manager.load_failures.append((entrypoint, exception))
class CLIDispatcher(Dispatcher):
"""
Calls extensions on configure/run
Automatically adds all the extension with entry points registered under
'avocado.plugins.cli'
"""
def __init__(self):
super(CLIDispatcher, self).__init__('avocado.plugins.cli')
class CLICmdDispatcher(Dispatcher):
"""
Calls extensions on configure/run
Automatically adds all the extension with entry points registered under
'avocado.plugins.cli.cmd'
"""
def __init__(self):
super(CLICmdDispatcher, self).__init__('avocado.plugins.cli.cmd')
class JobPrePostDispatcher(Dispatcher):
"""
Calls extensions before Job execution
Automatically adds all the extension with entry points registered under
'avocado.plugins.job.prepost'
"""
def __init__(self):
super(JobPrePostDispatcher, self).__init__('avocado.plugins.job.prepost')
def map_method(self, method_name, job):
for ext in self.extensions:
try:
if hasattr(ext.obj, method_name):
method = getattr(ext.obj, method_name)
method(job)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except:
job.log.error('Error running method "%s" of plugin "%s": %s',
method_name, ext.name, sys.exc_info()[1])
class ResultDispatcher(Dispatcher):
def __init__(self):
super(ResultDispatcher, self).__init__('avocado.plugins.result')
def map_method(self, method_name, result, job):
for ext in self.extensions:
try:
if hasattr(ext.obj, method_name):
method = getattr(ext.obj, method_name)
method(result, job)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except:
job.log.error('Error running method "%s" of plugin "%s": %s',
method_name, ext.name, sys.exc_info()[1])
class ResultEventsDispatcher(Dispatcher):
def __init__(self, args):
super(ResultEventsDispatcher, self).__init__(
'avocado.plugins.result_events',
invoke_kwds={'args': args})
self.log = logging.getLogger("avocado.app")
def map_method(self, method_name, *args):
for ext in self.extensions:
try:
if hasattr(ext.obj, method_name):
method = getattr(ext.obj, method_name)
method(*args)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except:
self.log.error('Error running method "%s" of plugin "%s": %s',
method_name, ext.name, sys.exc_info()[1])
class VarianterDispatcher(Dispatcher):
def __init__(self):
super(VarianterDispatcher, self).__init__('avocado.plugins.varianter')
def __getstate__(self):
"""
Very fragile pickle which works when all Varianter plugins are
available on both machines.
TODO: Replace this with per-plugin-refresh-mechanism
"""
return {"extensions": getattr(self, "extensions")}
def __setstate__(self, state):
"""
Very fragile pickle which works when all Varianter plugins are
available on both machines.
TODO: Replace this with per-plugin-refresh-mechanism
"""
self.__init__()
self.extensions = state.get("extensions")
def _map_method(self, method_name, deepcopy=False, *args, **kwargs):
"""
:warning: **kwargs are not supported for deepcopy=True
"""
ret = []
for ext in self.extensions:
try:
if hasattr(ext.obj, method_name):
method = getattr(ext.obj, method_name)
if deepcopy:
copied_args = [copy.deepcopy(arg) for arg in args]
ret.append(method(*copied_args))
else:
ret.append(method(*args, **kwargs))
except SystemExit:
raise
except KeyboardInterrupt:
raise
except: # catch any exception pylint: disable=W0702
stacktrace.log_exc_info(sys.exc_info(), logger='avocado.debug')
|
rogerscristo/BotFWD | env/lib/python3.6/site-packages/pytests/test_inlinequeryresultgame.py | Python | mit | 3,198 | 0.001251 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import json
import pytest
from telegram import (InlineKeyboardButton, InlineQueryResultGame,
InlineQueryResultVoice, InlineKeyboardMarkup)
@pytest.fixture(scope='class')
def inline_query_result_game():
return InlineQueryResultGame(TestInlineQueryResultGame.id,
TestInlineQueryResultGame.game_short_name,
reply_markup=TestInlineQueryResultGame.reply_markup)
class TestInlineQueryResultGame:
id = 'id'
type = 'game'
game_short_name = 'game short name'
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]])
def test_expected_values(self, inline_query_result_game):
assert inline_query_result_game.type == self.type
assert inline_query_result_game.id == self.id
assert inline_query_result_game.game_short_name == self.game_short_name
assert inline_query_result_game.reply_markup.to_dict() == \
self.reply_markup.to_dict()
def test_to_json(self, inline_query_result_game):
json.loads(inline_query_result_game.to_json())
def test_to_dict(self, inline_query_result_game):
inline_query_result_game_dict = inline_query_result_game.to_dict()
assert isinstance(inline_query_result_game_ | dict, dict)
assert inline_query_result_game_dict['type'] == inline_query_result_game.type
assert inline_query_result_game_dict['id'] == inline_query_result_game.id
assert inline_query_result_game_dict['game_short_name'] == \
inline_query_result_game.game_short_name
assert inline_query_result_game_dict['reply_markup'] == \
inline_query_result_game.reply_markup.to_dict()
def test_equality(self):
a = InlineQueryRes | ultGame(self.id, self.game_short_name)
b = InlineQueryResultGame(self.id, self.game_short_name)
c = InlineQueryResultGame(self.id, "")
d = InlineQueryResultGame("", self.game_short_name)
e = InlineQueryResultVoice(self.id, "", "")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
|
RedhawkSDR/integration-gnuhawk | components/tagged_file_sink_f/tests/test_tagged_file_sink_f.py | Python | gpl-3.0 | 4,079 | 0.006864 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. | If not, see http://www.gnu.org/licenses/.
#
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in tagged_file_sink_f"""
def testScaBasicBehavior(self):
#######################################################################
# La | unch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../tagged_file_sink_f.spd.xml") # By default tests all implementations
|
percyfal/luigi | test/dynamic_import_test.py | Python | apache-2.0 | 1,810 | 0.001657 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest, LuigiTestCase
import luigi
import luigi.interface
import tempfile
import re
class | ExtraArgs(luigi.Task):
blah = luigi.Parameter(default=444)
class CmdlineTest(LuigiTestCase):
def test_dynamic_loading(self):
interface = luigi.interface.DynamicArgParseInterface()
with tempfile.NamedTe | mporaryFile(dir='test/', prefix="_foo_module", suffix='.py') as temp_module_file:
temp_module_file.file.write(b'''
import luigi
class FooTask(luigi.Task):
x = luigi.IntParameter()
''')
temp_module_file.file.flush()
temp_module_path = temp_module_file.name
temp_module_name = re.search(r'/(_foo_module.*).py', temp_module_path).group(1)
tasks = interface.parse(['--module', temp_module_name, 'FooTask', '--ExtraArgs-blah', 'xyz', '--x', '123'])
self.assertEqual(ExtraArgs().blah, 'xyz')
self.assertEqual(len(tasks), 1)
task, = tasks
self.assertEqual(task.x, 123)
temp_module = __import__(temp_module_name)
self.assertEqual(task.__class__, temp_module.FooTask)
self.assertEqual(task, temp_module.FooTask(x=123))
|
ksmit799/Toontown-Source | toontown/safezone/DistributedFishingSpot.py | Python | mit | 42,911 | 0.002377 | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.directtools.DirectGeometry import LineNodePath
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.fishing import FishGlobals
from toontown.shtiker import FishPage
from toontown.toonbase import TTLocalizer
from toontown.quest import Quests
from direct.actor import Actor
from direct.showutil import Rope
import math
from direct.task.Task import Task
import random
import random
from toontown.fishing import FishingTargetGlobals
from toontown.fishing import FishBase
from toontown.fishing import FishPanel
from toontown.effects import Ripples
from toontown.toontowngui import TTDialog
from toontown.toonbase import ToontownTimer
from direct.fsm import ClassicFSM, State
from direct.fsm import State
fr | om toontown.hood import ZoneUtil
from toontown.toontowngui import TeaserPanel
class DistributedFishingSpot(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedFishingSpot')
vZ | eroMax = 25.0
angleMax = 30.0
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.lastAvId = 0
self.lastFrame = 0
self.avId = 0
self.av = None
self.placedAvatar = 0
self.localToonFishing = 0
self.nodePath = None
self.collSphere = None
self.collNode = None
self.collNodePath = None
self.castTrack = None
self.pond = None
self.guiTrack = None
self.madeGui = 0
self.castGui = None
self.itemGui = None
self.pole = None
self.line = None
self.poleNode = []
self.ptop = None
self.bob = None
self.bobBobTask = None
self.splashSounds = None
self.ripples = None
self.line = None
self.lineSphere = None
self.power = 0.0
self.startAngleNP = 0
self.firstCast = 1
self.fishPanel = None
self.fsm = ClassicFSM.ClassicFSM('DistributedFishingSpot', [State.State('off', self.enterOff, self.exitOff, ['waiting',
'distCasting',
'fishing',
'reward',
'leaving']),
State.State('waiting', self.enterWaiting, self.exitWaiting, ['localAdjusting',
'distCasting',
'leaving',
'sellFish']),
State.State('localAdjusting', self.enterLocalAdjusting, self.exitLocalAdjusting, ['localCasting', 'leaving']),
State.State('localCasting', self.enterLocalCasting, self.exitLocalCasting, ['localAdjusting', 'fishing', 'leaving']),
State.State('distCasting', self.enterDistCasting, self.exitDistCasting, ['fishing', 'leaving', 'reward']),
State.State('fishing', self.enterFishing, self.exitFishing, ['localAdjusting',
'distCasting',
'waitForAI',
'reward',
'leaving']),
State.State('sellFish', self.enterSellFish, self.exitSellFish, ['waiting', 'leaving']),
State.State('waitForAI', self.enterWaitForAI, self.exitWaitForAI, ['reward', 'leaving']),
State.State('reward', self.enterReward, self.exitReward, ['localAdjusting',
'distCasting',
'leaving',
'sellFish']),
State.State('leaving', self.enterLeaving, self.exitLeaving, [])], 'off', 'off')
self.fsm.enterInitialState()
return
def disable(self):
self.ignore(self.uniqueName('enterFishingSpotSphere'))
self.setOccupied(0)
self.avId = 0
if self.castTrack != None:
if self.castTrack.isPlaying():
self.castTrack.finish()
self.castTrack = None
if self.guiTrack != None:
if self.guiTrack.isPlaying():
self.guiTrack.finish()
self.guiTrack = None
self.__hideBob()
self.nodePath.detachNode()
self.__unmakeGui()
self.pond.stopCheckingTargets()
self.pond = None
DistributedObject.DistributedObject.disable(self)
return
def delete(self):
del self.pond
del self.fsm
if self.nodePath:
self.nodePath.removeNode()
del self.nodePath
DistributedObject.DistributedObject.delete(self)
if self.ripples:
self.ripples.destroy()
def generateInit(self):
DistributedObject.DistributedObject.generateInit(self)
self.nodePath = NodePath(self.uniqueName('FishingSpot'))
self.angleNP = self.nodePath.attachNewNode(self.uniqueName('FishingSpotAngleNP'))
self.collSphere = CollisionSphere(0, 0, 0, self.getSphereRadius())
self.collSphere.setTangible(0)
self.collNode = CollisionNode(self.uniqueName('FishingSpotSphere'))
self.collNode.setCollideMask(ToontownGlobals.WallBitmask)
self.collNode.addSolid(self.collSphere)
self.collNodePath = self.nodePath.attachNewNode(self.collNode)
self.bobStartPos = Point3(0.0, 3.0, 8.5)
def generate(self):
DistributedObject.DistributedObject.generate(self)
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.nodePath.reparentTo(self.getParentNodePath())
self.accept(self.uniqueName('enterFishingSpotSphere'), self.__handleEnterSphere)
def setPondDoId(self, pondDoId):
self.pond = base.cr.doId2do[pondDoId]
self.area = self.pond.getArea()
self.waterLevel = FishingTargetGlobals.getWaterLevel(self.area)
def allowedToEnter(self):
if hasattr(base, 'ttAccess') and base.ttAccess and base.ttAccess.canAccess():
return True
return False
def handleOkTeaser(self):
self.dialog.destroy()
del self.dialog
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('walk')
def __handleEnterSphere(self, collEntry):
if self.allowedToEnter():
if base.localAvatar.doId == self.lastAvId and globalClock.getFrameCount() <= self.lastFrame + 1:
self.notify.debug('Ignoring duplicate entry for avatar.')
return
if base.localAvatar.hp > 0 and base.cr.playGame.getPlace().fsm.getCurrentState().getName() != 'fishing':
self.cr.playGame.getPlace().detectedFishingCollision()
self.d_requestEnter()
else:
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('stopped')
self.dialog = TeaserPanel.TeaserPanel(pageName='fishing', doneFunc=self.handleOkTeaser)
def d_requestEnter(self):
self.sendUpdate('requestEnter', [])
def rejectEnter(self):
self.cr.playGame.getPlace().setState('walk')
def d_requestExit(self):
self.sendUpdate('requestExit', [])
def d_doCast(self, power, heading):
self.sendUpdate('doCast', [power, heading])
def getSphereRadius(self):
return 1.5
def getParentNodePath(self):
return render
def setPosHpr(self, x, y, z, h, p, r):
self.nodePath.setPosHpr(x, y, z, h, p, r)
self.angleNP.setH(render, self.nodePath.getH(render))
def setOccupied(self, avId):
if self.av != None:
if not self.av.isEmpty():
self.__dropPole()
self.av.loop('neutral')
self.av.setParent(ToontownGlobals.SPRender)
self.av.startSmooth()
self.ignore(self.av.uniqueName('disable'))
self.__hideBob()
self.fsm.requestFinalState()
self.__removePole()
self.av = None
self.placedAvatar = 0
self.angleNP.setH(render, self.nodePath.getH(render))
self.__hideLine()
wasLocalToon = self.localToonFishing
self.lastAvId = self.avId
self.lastFrame = globalClock.getFrameCount()
self.avId = avId
self.localToonFishing = 0
if self.avId |
klahnakoski/Bugzilla-ETL | vendor/pyLibrary/env/emailer.py | Python | mpl-2.0 | 4,306 | 0.002555 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import smtplib
import sys
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from mo_logs import Log
from mo_dots import listwrap
from mo_dots import coalesce
from mo_kwargs import override
class Emailer:
@override
def __init__(
self,
from_address,
to_address,
host,
username,
password,
subject="catchy title",
port=465,
use_ssl=1,
kwargs=None
):
self.settings = kwargs
self.server = None
def __enter__(self):
if self.server is not None:
Log.error("Got a problem")
if self.settings.use_ssl:
self.server = smtplib.SMTP_SSL(self.settings.host, self.settings.port)
else:
self.server = smtplib.SMTP(self.settings.host, self.settings.port)
if self.settings.username and self.settings.password:
self.server.login(self.settings.username, self.settings.password)
return self
def __exit__(self, type, value, traceback):
try:
self.server.quit()
except Exception as e:
Log.warning("Problem with smtp server quit(), ignoring problem", e)
self.server = None
def send_email(self,
from_address=None,
to_address=None,
subject=None,
text_data=None,
html_data=None
):
"""Sends an email.
from_addr is an email address; to_addrs is a list of email adresses.
Addresses can be plain (e.g. "jsmith@example.com") or with real names
(e.g. "John Smith <jsmith@example.com>").
text_data and html_data are both strings. You can specify one or both.
If you specify both, the email will be sent as a MIME multipart
alternative, i.e., the recipient will see the HTML content if his
viewer supports it; otherwise he'll see the text content.
"""
settings = self.settings
from_address = coalesce(from_address, settings["from"], settings.from_address)
to_address = listwrap(coalesce(to_address, settings.to_address, settings.to_addrs))
if not from_address or not to_address:
raise Exception("Both from_addr and to_addrs must be specified")
if not text_data and not html_data:
raise Exception("Must specify either text_data or html_data")
if not html_data:
msg = MIMEText(text_data)
elif not text_data:
msg = MIMEText(html_data, 'html')
else:
msg = MIMEMultipart('alternative')
msg.attach(MIMEText(text_data, 'plain'))
msg.attach(MIMEText(html_data, 'ht | ml'))
msg['Subject'] = coalesce(subject, settings.subject)
msg['From'] = from_address
msg['To'] = ', '.join(to_address)
if self.server:
# CALL AS PAR | T OF A SMTP SESSION
self.server.sendmail(from_address, to_address, msg.as_string())
else:
# CALL AS STAND-ALONE
with self:
self.server.sendmail(from_address, to_address, msg.as_string())
if sys.hexversion < 0x020603f0:
# versions earlier than 2.6.3 have a bug in smtplib when sending over SSL:
# http://bugs.python.org/issue4066
# Unfortunately the stock version of Python in Snow Leopard is 2.6.1, so
# we patch it here to avoid having to install an updated Python version.
import socket
import ssl
def _get_socket_fixed(self, host, port, timeout):
if self.debuglevel > 0:
print>> sys.stderr, 'connect:', (host, port)
new_socket = socket.create_connection((host, port), timeout)
new_socket = ssl.wrap_socket(new_socket, self.keyfile, self.certfile)
self.file = smtplib.SSLFakeFile(new_socket)
return new_socket
smtplib.SMTP_SSL._get_socket = _get_socket_fixed
|
supertask/ChainX | server/websocket/recorded_operations/combiner.py | Python | mit | 1,425 | 0.00432 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import os
import json
SPLIT_CHAR = '#'
MSG_SPLIT_CHAR = '@'
MIN_BETWEEN_BEFORE_AFTER = 1
if len(sys.argv) < 3:
print "引数が足りない(<infile1> <infile2>)"
sys.exit(1)
in_file_1 = sys.argv[1]
in_file_2 = sys.argv[2]
out_file = "combined_file.txt"
wf = open(out_file, 'w')
last_ts = 0
with open(in_file_1) as rf:
last_line = ""
for line in rf:
wf.write(line)
last_line = line
_,op_line = last_line.split(MSG_SPLIT_CHAR)
op_dict = json.loads(op_line)
last_ts = long(op_dict['ts'])
rf = open(in_file_2, 'r')
# MIN_BETWEEN_BEFO | RE_AFTER秒(x * 10^7 * 100ナノ秒)の間隔をあける
currentTs = 0
exCurrentTs = 0
newCurrentTs = last_ts + MIN_BETWEEN_BEFORE_AFTER * (10 ** 7)
firstOperation = True
for line in rf:
line = line.replace('\n', '')
if line == '': continue
#print line
op_name,param = line.split(MSG_SPLIT_CHAR)
if op_name == "OPERATION":
op_ | dict = json.loads(param)
ts_str = op_dict["ts"]
currentTs = long(op_dict["ts"])
diffTs = currentTs - exCurrentTs
if firstOperation:
diffTs = 0
firstOperation = False
newCurrentTs += diffTs
line = line.replace(ts_str, str(newCurrentTs))
wf.write(line + "\n")
exCurrentTs = currentTs
rf.close()
wf.close()
print 'Output file: "%s"' % out_file
|
eduNEXT/edx-platform | lms/djangoapps/course_api/blocks/transformers/tests/test_video_urls.py | Python | agpl-3.0 | 5,797 | 0.002933 | """
Tests for VideoBlockURLTransformer.
"""
from unittest import mock
from openedx.core.djangoapps.content.block_structure.factory import BlockStructureFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.factories import ToyCourseFactory # lint-amnesty, pylint: disable=wrong-import-order
from ..student_view import StudentViewTransformer
from ..video_urls import VideoBlockURLTransformer
class TestVideoBlockURLTransformer(ModuleStoreTestCase):
"""
Test the URL re-write for video URLs using VideoBlockURLTransformer.
"""
def setUp(self):
super().setUp()
self.course_key = ToyCourseFactory.create().id
self.course_usage_key = self.store.make_course_usage_key(self.course_key)
self.block_structure = BlockStructureFactory.create_from_modulestore(self.course_usage_key, self.store)
def get_pre_transform_data(self, block_key):
"""
Return the student view data before the transformation for given video block.
"""
video_block = self.block_structure.get_xblock(block_key)
return video_block.student_view_data()
def change_encoded_videos_presentation(self, encoded_videos):
"""
Relocate url data in new dictionary for pre & post transformation data comparison.
"""
video_urls = {}
for video_format, video_data in encoded_videos.items():
video_urls[video_format] = video_data['url']
return video_urls
def get_post_transform_data(self, block_key):
"""
Return the block's student view data after transformation.
"""
return self.block_structure.get_transformer_block_field(
block_key, StudentViewTransformer, StudentViewTransformer.STUDENT_VIEW_DATA
)
def collect_and_transform(self):
"""
Perform transformer operations.
"""
StudentViewTransformer.collect(self.block_structure)
self.block_structure._collect_requested_xblock_fields() # pylint: disable=protected-access
StudentViewTransformer(['video']).transform(
usage_info=None,
block_structure=self.block_structure,
)
VideoBlockURLTransformer().transform(
usage_info=None,
block_structure=self.block_structure,
)
@mock.patch('xmodule.video_module.VideoBlock.student_view_data')
def test_rewrite_for_encoded_videos(self, mock_video_data):
"""
Test that video URLs for videos with available encodings
are re-written successfully by VideoBlockURLTransformer.
"""
mock_video_data.return_value = {
'encoded_videos': {
'hls': {
'url': 'https://xyz123.cloudfront.net/XYZ123ABC.mp4',
'file_size': 0
},
'mobile_low': {
'url': 'https://1234abcd.cloudfront.net/ABCD1234abcd.mp4',
'file_size': 0
}
},
'only_on_web': False
}
video_block_key = self.course_key.make_usage_key('video', 'sample_video')
pre_transform_data = self.get_pre_transform_data(video_block_key)
pre_transform_data = self.change_encoded_videos_presentation(pre_transform_data['encoded_videos'])
self.collect_and_transform()
post_transform_data = self.get_post_transform_data(video_block_key)
post_transform_data = self.change_encoded_videos_presentation(post_transform_data['encoded_videos'])
for video_format, video_url in post_transform_data.items():
assert pre_transform_data[video_format] != video_url
@mock.patch('xmodule.video_module.VideoBlock.student_view_data')
def test_no_rewrite_for_third_party_vendor(self, mock_video_data):
"""
Test that video URLs aren't re-written for the videos
being served from third party vendors or CDN.
"""
mock_video_data.return_value = {
'encoded_videos': {
'youtube': {
'url': 'https://www.youtube.com/watch?v=abcd1234',
'file_size': 0
},
'fallback': {
'url': 'https://1234abcd.third_part_cdn.com/ABCD | 1234abcd.mp4',
'file_size': 0
}
},
'only_on_web': False
}
video_block_key = self.course_key.make_usage_key('video', 'sample_video')
pre_transform_data = self.get_pre_transform_data(video_block_key)
| pre_transform_data = self.change_encoded_videos_presentation(pre_transform_data['encoded_videos'])
self.collect_and_transform()
post_transform_data = self.get_post_transform_data(video_block_key)
post_transform_data = self.change_encoded_videos_presentation(post_transform_data['encoded_videos'])
for video_format, video_url in post_transform_data.items():
assert pre_transform_data[video_format] == video_url
@mock.patch('xmodule.video_module.VideoBlock.student_view_data')
def test_no_rewrite_for_web_only_videos(self, mock_video_data):
"""
Verify no rewrite attempt is made for the videos
available on web only.
"""
mock_video_data.return_value = {
'only_on_web': True
}
video_block_key = self.course_key.make_usage_key('video', 'sample_video')
pre_transform_data = self.get_pre_transform_data(video_block_key)
self.collect_and_transform()
post_transform_data = self.get_post_transform_data(video_block_key)
self.assertDictEqual(pre_transform_data, post_transform_data)
|
51reboot/homework-arch-4 | 2/chenkun/proc_pachong.py | Python | apache-2.0 | 1,151 | 0.02172 | #!/usr/bin/python
#coding=utf-8
import re,time,urllib,urllib2,cookielib,multiprocessing
img_path="/opt/imgs/"
def down_imgs():
url_login = 'http://www.douban.com/accounts/login'
values = {'form_email':'chenkun0226@163.com','form_password':'abc12345678'}
data = urllib.urlencode(values)
headers = {"User-Agent":"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1"}
req = urllib2.Request(url_login, data,headers)
c | j = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
response = opener.open(req)
the_page = response.read().decode("utf-8").encode("gbk")
url_list = re.findall('"http://.*.jpg" alt',the_page)
for url_u in url_list:
img_url = url_u.split(' ')[0].strip('"')
img_name = img_url.split('/')[-1]
urllib.urlretrieve(img_url,img_path+img | _name)
print "%s saved!" %img_name
if __name__ == "__main__":
pool = multiprocessing.Pool(processes=120)
pool.apply_async(down_imgs,())
time.sleep(0.2)
pool.close()
pool.join()
|
qtumproject/qtum | test/functional/tool_wallet.py | Python | mit | 9,551 | 0.003036 | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-wallet."""
import hashlib
import os
import stat
import subprocess
import textwrap
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
BUFFER_SIZE = 16 * 1024
class ToolWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_wallet_tool()
def bitcoin_wallet_process(self, *args):
binary = self.config["environment"]["BUILDDIR"] + '/src/qtum-wallet' + self.config["environment"]["EXEEXT"]
args = ['-datadir={}'.format(self.nodes[0].datadir), '-chain=%s' % self.chain] + list(args)
return subprocess.Popen([binary] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def assert_raises_tool_error(self, error, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 1)
assert_equal(stdout, '')
assert_equal(stderr.strip(), error)
def assert_tool_output(self, output, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(stderr, '')
assert_equal(stdout, output)
assert_equal(p.poll(), 0)
def wallet_shasum(self):
h = hashlib.sha1()
mv = memoryview(bytearray(BUFFER_SIZE))
with open(self.wallet_path, 'rb', buffering=0) as f:
for n in iter(lambda : f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
def wallet_timestamp(self):
return os.path.getmtime(self.wallet_path)
def wallet_permissions(self):
return oct(os.lstat(self.wallet_path).st_mode)[-3:]
def log_wallet_timestamp_comparison(self, old, new):
result = 'unchanged' if new == old else 'increased!'
self.log.debug('Wallet file timestamp {}'.format(result))
def test_invalid_tool_commands_and_args(self):
self.log.info('Testing that various invalid commands raise with specific error messages')
self.assert_raises_tool_error('Invalid command: foo', 'foo')
# `bitcoin-wallet help` raises an error. Use `bitcoin-wallet -help`.
self.assert_raises_tool_error('Invalid command: help', 'help')
self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create')
self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo')
self.assert_raises_tool_error('Error loading wallet.dat. Is wallet being used by other | process?', '-wallet=wallet.dat', 'info')
self.assert_raises_tool_error('Error: no wallet file at nonexistent.dat', '-wallet=nonexistent.dat', 'info')
def test_tool_wallet_info(self):
# Stop the node to close the wallet to call the info command.
self.stop_node(0)
self.log.info('Calling wallet tool info, | testing output')
#
# TODO: Wallet tool info should work with wallet file permissions set to
# read-only without raising:
# "Error loading wallet.dat. Is wallet being used by another process?"
# The following lines should be uncommented and the tests still succeed:
#
# self.log.debug('Setting wallet file permissions to 400 (read-only)')
# os.chmod(self.wallet_path, stat.S_IRUSR)
# assert(self.wallet_permissions() in ['400', '666']) # Sanity check. 666 because Appveyor.
# shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 0
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
self.log.debug('Setting wallet file permissions back to 600 (read/write)')
os.chmod(self.wallet_path, stat.S_IRUSR | stat.S_IWUSR)
assert(self.wallet_permissions() in ['600', '666']) # Sanity check. 666 because Appveyor.
#
# TODO: Wallet tool info should not write to the wallet file.
# The following lines should be uncommented and the tests still succeed:
#
# assert_equal(timestamp_before, timestamp_after)
# shasum_after = self.wallet_shasum()
# assert_equal(shasum_before, shasum_after)
# self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_info_after_transaction(self):
"""
Mutate the wallet with a transaction to verify that the info command
output changes accordingly.
"""
self.start_node(0)
self.log.info('Generating transaction to mutate wallet')
self.nodes[0].generate(1)
self.stop_node(0)
self.log.info('Calling wallet tool info after generating a transaction, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 1
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
#
# TODO: Wallet tool info should not write to the wallet file.
# This assertion should be uncommented and succeed:
# assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_create_on_existing_wallet(self):
self.log.info('Calling wallet tool create on an existing wallet, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling create: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Topping up keypool...
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2000
Transactions: 0
Address Book: 0
''')
self.assert_tool_output(out, '-wallet=foo', 'create')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling create: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_getwalletinfo_on_different_wallet(self):
self.log.info('Starting node with arg -wallet=foo')
self.start_node(0, ['-wallet=foo'])
self.log.info('Calling getwalletinfo on a different wallet ("foo"), testing output')
shasum_before = self.walle |
jandom/rdkit | rdkit/Chem/Pharm2D/UnitTestGobbi.py | Python | bsd-3-clause | 5,595 | 0.008758 | # $Id$
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the signatures
"""
from __future__ import print_function
import unittest
import os
from rdkit.six import next
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem.Pharm2D import Gobbi_Pharm2D, Generate
class TestCase(unittest.TestCase):
def setUp(self):
self.factory = Gobbi_Pharm2D.factory
def test1Sigs(self):
probes = [
('OCCC=O', {
'HA': (1, ((0, ), (4, ))),
'HD': (1, ((0, ), )),
'LH': (0, None),
'AR': (0, None),
'RR': (0, None),
'X': (0, None),
'BG': (0, None),
'AG': (0, None),
}),
('OCCC(=O)O', {
'HA': (1, ((0, ), (4, ))),
'HD': (1, ((0, ), (5, ))),
'LH': (0, None),
'AR': (0, None),
'RR': (0, None),
'X': (0, None),
'BG': (0, None),
'AG': (1, ((3, ), )),
}),
('CCCN', {
'HA': (1, ((3, ), )),
'HD': (1, ((3, ), )),
'LH': (0, None),
'AR': (0, None),
'RR': (0, None),
'X': (0, None),
'BG': (1, ((3, ), )),
'AG': (0, None),
}),
('CCCCC', {
'HA': (0, None),
'HD': (0, None),
'LH': (1, ((1, ), (3, ))),
'AR': (0, None),
'RR': | (0, None),
'X': (0, None),
'BG': (0, None),
'AG': (0, None),
}),
('CC1CCC1', {
'HA': (0, None),
'HD': (0, None),
'LH': (1, ((1, ), (3, ))),
| 'AR': (0, None),
'RR': (1, ((1, ), )),
'X': (0, None),
'BG': (0, None),
'AG': (0, None),
}),
('[SiH3]C1CCC1', {
'HA': (0, None),
'HD': (0, None),
'LH': (1, ((1, ), )),
'AR': (0, None),
'RR': (1, ((1, ), )),
'X': (1, ((0, ), )),
'BG': (0, None),
'AG': (0, None),
}),
('[SiH3]c1ccccc1', {
'HA': (0, None),
'HD': (0, None),
'LH': (0, None),
'AR': (1, ((1, ), )),
'RR': (0, None),
'X': (1, ((0, ), )),
'BG': (0, None),
'AG': (0, None),
}),
]
for smi, d in probes:
mol = Chem.MolFromSmiles(smi)
feats = self.factory.featFactory.GetFeaturesForMol(mol)
for k in d.keys():
shouldMatch, mapList = d[k]
feats = self.factory.featFactory.GetFeaturesForMol(mol, includeOnly=k)
if shouldMatch:
self.assertTrue(feats)
self.assertEqual(len(feats), len(mapList))
aids = [(x.GetAtomIds()[0], ) for x in feats]
aids.sort()
self.assertEqual(tuple(aids), mapList)
def test2Sigs(self):
probes = [('O=CCC=O', (149, )),
('OCCC=O', (149, 156)),
('OCCC(=O)O', (22, 29, 149, 154, 156, 184, 28822, 30134)), ]
for smi, tgt in probes:
sig = Generate.Gen2DFingerprint(Chem.MolFromSmiles(smi), self.factory)
self.assertEqual(len(sig), 39972)
bs = tuple(sig.GetOnBits())
self.assertEqual(len(bs), len(tgt))
self.assertEqual(bs, tgt)
def testOrderBug(self):
sdFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'Pharm2D', 'test_data', 'orderBug.sdf')
suppl = Chem.SDMolSupplier(sdFile)
m1 = next(suppl)
m2 = next(suppl)
sig1 = Generate.Gen2DFingerprint(m1, self.factory)
sig2 = Generate.Gen2DFingerprint(m2, self.factory)
ob1 = set(sig1.GetOnBits())
ob2 = set(sig2.GetOnBits())
self.assertEqual(sig1, sig2)
def testOrderBug2(self):
from rdkit.Chem import Randomize
from rdkit import DataStructs
probes = ['Oc1nc(Oc2ncccc2)ccc1']
for smi in probes:
m1 = Chem.MolFromSmiles(smi)
#m1.Debug()
sig1 = Generate.Gen2DFingerprint(m1, self.factory)
csmi = Chem.MolToSmiles(m1)
m2 = Chem.MolFromSmiles(csmi)
#m2.Debug()
sig2 = Generate.Gen2DFingerprint(m2, self.factory)
self.assertTrue(list(sig1.GetOnBits()) == list(sig2.GetOnBits()), '%s %s' % (smi, csmi))
self.assertEqual(DataStructs.DiceSimilarity(sig1, sig2), 1.0)
self.assertEqual(sig1, sig2)
for i in range(10):
m2 = Randomize.RandomizeMol(m1)
sig2 = Generate.Gen2DFingerprint(m2, self.factory)
if sig2 != sig1:
Generate._verbose = True
print('----------------')
sig1 = Generate.Gen2DFingerprint(m1, self.factory)
print('----------------')
sig2 = Generate.Gen2DFingerprint(m2, self.factory)
print('----------------')
print(Chem.MolToMolBlock(m1))
print('----------------')
print(Chem.MolToMolBlock(m2))
print('----------------')
s1 = set(sig1.GetOnBits())
s2 = set(sig2.GetOnBits())
print(s1.difference(s2))
self.assertEqual(sig1, sig2)
def testBitInfo(self):
m = Chem.MolFromSmiles('OCC=CC(=O)O')
bi = {}
sig = Generate.Gen2DFingerprint(m, Gobbi_Pharm2D.factory, bitInfo=bi)
self.assertEqual(sig.GetNumOnBits(), len(bi))
self.assertEqual(list(sig.GetOnBits()), sorted(bi.keys()))
self.assertEqual(sorted(bi.keys()), [23, 30, 150, 154, 157, 185, 28878, 30184])
self.assertEqual(sorted(bi[28878]), [[(0, ), (5, ), (6, )]])
self.assertEqual(sorted(bi[157]), [[(0, ), (6, )], [(5, ), (0, )]])
if __name__ == '__main__':
unittest.main()
|
ZachOhara/Project-Euler | python/p001_p010/problem001.py | Python | gpl-3.0 | 196 | 0.061224 |
def main():
print(sumMultiplesOf(3, 5, 1000))
def sumMultiplesOf(a, b, cap):
s = 0
for i in range(cap):
if i % a == 0 or i % b == 0:
s += i
return s |
if _ | _name__ == "__main__":
main() |
lfalvarez/votai | proposal_subscriptions/views.py | Python | gpl-3.0 | 3,370 | 0.000891 | # coding=utf-8
from django.views.generic.edit import CreateView, DeleteView
from proposal_subscriptions.models import SearchSubscription
from django.contrib.auth.mixins import LoginRequiredMixin
from pytimeparse.timeparse import timeparse
from django.forms import ModelForm
from django.http import JsonResponse
from django import forms
import json
from django.urls import reverse_lazy
from popular_proposal.filters import ProposalGeneratedAtFilter
from django.views.generic.list import ListView
from django.utils.translation import ugettext_lazy as _
OFTENITY_CHOICES = ((timeparse("1 day"), u"1 Día"),
(timeparse("2 days"), u"2 Días"),
(timeparse("1 weeks"), u"1 Semana"))
class SubscriptionCreateForm(ModelForm):
oftenity = forms.ChoiceField(choices=OFTENITY_CHOICES, label=_(u"Cada cuanto quieres que te notifiquemos?"))
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
s | elf.filter_class = kwargs.pop('filter_class')
self.search_params = kwargs.pop('search_params')
super(SubscriptionCreateForm, self).__init__(*args, **kwargs)
def save(self):
subscription = super(SubscriptionCreateForm, self).save(commit=False)
subscription.user = self.user
subscription.filter_class_module = self.filter_class.__module__
subscription.filter_class_name = self.filter_class.__name__
subscription.search_params = self.search_params
| subscription.save()
return subscription
class Meta:
model = SearchSubscription
fields = ['oftenity', ]
class SearchSubscriptionCreateView(LoginRequiredMixin, CreateView):
form_class = SubscriptionCreateForm
template_name = 'proposal_subscriptions/subscribe_to_search.html'
filter_class = ProposalGeneratedAtFilter
def get_form_kwargs(self):
kwargs = super(SearchSubscriptionCreateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['filter_class'] = self.filter_class
fields = self.filter_class().form.fields
search_params = {}
for field_key in fields:
value = self.request.POST.get(field_key, None)
if value:
search_params[field_key] = value
kwargs['search_params'] = search_params
return kwargs
def get_context_data(self, **kwargs):
context = super(SearchSubscriptionCreateView, self).get_context_data(**kwargs)
context['search_params_keys'] = json.dumps(list(self.filter_class().form.fields.keys()))
return context
def form_valid(self, form):
subscription = form.save()
return JsonResponse({'subscription_id': subscription.id})
class SearchSubscriptionDeleteView(LoginRequiredMixin, DeleteView):
model = SearchSubscription
slug_field = 'token'
slug_url_kwarg = 'token'
template_name = 'proposal_subscriptions/confirm_unsubscribe.html'
success_url = reverse_lazy('popular_proposals:home')
class SearchSubscriptionListView(LoginRequiredMixin, ListView):
model = SearchSubscription
template_name = "proposal_subscriptions/list.html"
context_object_name = 'subscriptions'
def get_queryset(self):
qs = super(SearchSubscriptionListView, self).get_queryset()
qs = qs.filter(user=self.request.user)
return qs
|
Sistemas-Multimedia/Icecast-tracker | Django/Savins/Icecast_tracker/manage.py | Python | gpl-2.0 | 258 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Icecast_tra | cker.se | ttings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
syncloud/lib | syncloudlib/application/paths.py | Python | gpl-3.0 | 223 | 0 | from syncloudlib.application.connection import api_get
def get_app_dir(app):
return api_get('/app/install_path?name={0}'.format(app))
def get_data_dir(app):
return | api_get('/app/data_path?name={0}'.format(app)) | |
inspirehep/invenio | modules/bibindex/lib/bibindex_engine.py | Python | gpl-2.0 | 105,731 | 0.002913 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009,
## 2010, 2011, 2012, 2013, 2014, 2015, 2016 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibIndex indexing engine implementation.
See bibindex executable for entry point.
"""
__revision__ = "$Id$"
import re
import sys
import time
import fnmatch
import inspect
from datetime import datetime
from invenio.config import CFG_SOLR_URL
from invenio.bibindex_engine_config import CFG_MAX_MYSQL_THREADS, \
CFG_MYSQL_THREAD_TIMEOUT, \
CFG_CHECK_MYSQL_THREADS, \
CFG_BIBINDEX_INDEX_TABLE_TYPE, \
CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR, \
CFG_BIBINDEX_UPDATE_MESSAGE, \
CFG_BIBINDEX_UPDATE_MODE, \
CFG_BIBINDEX_TOKENIZER_TYPE, \
CFG_BIBINDEX_WASH_INDEX_TERMS, \
CFG_BIBINDEX_SPECIAL_TAGS
from invenio.bibauthority_config import \
CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC
from invenio.bibauthority_engine import \
get_control_nos_from_recID
from invenio.search_engine import perform_request_search, \
get_index_stemming_language, \
get_synonym_terms, \
search_pattern, \
search_unit_in_bibrec
from invenio.dbquery import run_sql, DatabaseError, serialize_via_marshal, \
deserialize_via_marshal, wash_table_column_name
from invenio.bibindex_engine_washer import wash_index_term
from invenio.bibtask import task_init, write_message, get_datetime, \
task_set_option, task_get_option, task_get_task_param, \
task_update_progress, task_sleep_now_if_required
from invenio.intbitset import intbitset
from invenio.errorlib import register_exception
from invenio.solrutils_bibindex_indexer import solr_commit
from invenio.bibindex_tokenizers.BibIndexJournalTokenizer import \
CFG_JOURNAL_TAG, \
CFG_JOURNAL_PUBINFO_STANDARD_FORM, \
CFG_JOURNAL_PUBINFO_STANDARD_FORM_REGEXP_CHECK
from invenio.bibindex_termcollectors import TermCollector
from invenio.bibindex_engine_utils import load_tokenizers, \
get_all_index_names_and_column_values, \
get_index_tags, \
get_field_tags, \
get_marc_tag_indexes, \
get_nonmarc_tag_indexes, \
get_all_indexes, \
get_index_virtual_indexes, \
get_virtual_index_building_blocks, \
get_index_id_from_index_name, \
run_sql_drop_silently, \
get_min_last_updated, \
remove_inexistent_indexes, \
get_all_synonym_knowledge_bases, \
get_index_remove_stopwords, \
get_index_remove_html_markup, \
get_index_remove_latex_markup, \
filter_for_virtual_indexes, \
get_records_range_for_index, \
make_prefix, \
list_union, \
recognize_marc_tag, \
is_index_using_ | unicode_520
from invenio.bibindex_termcollectors import \
TermCollector, \
NonmarcTermCollector
from invenio.memoiseutils | import Memoise
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
## precompile some often-used regexp for speed reasons:
re_subfields = re.compile('\$\$\w')
re_datetime_shift = re.compile("([-\+]{0,1})([\d]+)([dhms])")
re_prefix = re.compile('__[a-zA-Z1-9]*__')
nb_char_in_line = 50 # for verbose pretty printing
chunksize = 1000 # default size of chunks that the records will be treated by
base_process_size = 4500 # process base size
_last_word_table = None
_TOKENIZERS = load_tokenizers()
def list_unique(_list):
"""Returns a _list with duplicates removed."""
_dict = {}
for e in _list:
_dict[e] = 1
return _dict.keys()
## safety function for killing slow DB threads:
def kill_sleepy_mysql_threads(max_threads=CFG_MAX_MYSQL_THREADS,
thread_timeout=CFG_MYSQL_THREAD_TIMEOUT):
"""Check the number of DB threads and if there are more than
MAX_THREADS of them, lill all threads that are in a sleeping
state for more than THREAD_TIMEOUT seconds. (This is useful
for working around the the max_connection problem that appears
during indexation in some not-yet-understood cases.) If some
threads are to be killed, write info into the log file.
"""
res = run_sql("SHOW FULL PROCESSLIST")
if len(res) > max_threads:
for row in res:
r_id, dummy, dummy, dummy, r_command, r_time, dummy, dummy = row
if r_command == "Sleep" and int(r_time) > thread_timeout:
run_sql("KILL %s", (r_id, ))
write_message("WARNING: too many DB threads, " + \
"killing thread %s" % r_id, verbose=1)
return
def get_associated_subfield_value(recID, tag, value, associated_subfield_code):
"""Return list of ASSOCIATED_SUBFIELD_CODE, if exists, for record
RECID and TAG of value VALUE. Used by fulltext indexer only.
Note: TAG must be 6 characters long (tag+ind1+ind2+sfcode),
otherwise en empty string is returned.
FIXME: what if many tag values have the same value but different
associated_subfield_code? Better use bibrecord library for this.
"""
out = ""
if len(tag) != 6:
return out
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT bb.field_number, b.tag, b.value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec=%%s AND bb.id_bibxxx=b.id AND tag LIKE
%%s%%""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID, tag[:-1]))
field_number = -1
for row in res:
if row[1] == tag and row[2] == value:
field_number = row[0]
if field_number > 0:
for row in res:
if row[0] == field_number and row[1] == tag[:-1] + associated_subfield_code:
out = row[2]
break
return out
def get_author_canonical_ids_for_recid_range(recID1, recID2):
"""
Return list of tuples (recid, author canonical IDs (e.g. `J.Ellis.1')) for
the given range of records. Done by consulting BibAuthorID module.
"""
return run_sql("""SELECT bibrec, data FROM aidPERSONIDDATA
JOIN aidPERSONIDPAPERS USING (personid) WHERE bibrec BETWEEN %s AND %s
AND tag='canonical_name' AND flag>-2""", (recID1, recID2))
def get_author_canonical_ids_for_recid(recID):
"""
Return list of author canonical IDs (e.g. `J.Ellis.1')) for
the given range of records. Done by consulting BibAuthorID module.
"""
return [elem[0] for elem in run_sql("""SELECT data FROM aidPERSONIDDATA
JOIN aidPERSONIDPAPERS USING (personid) WHERE bibrec=%s
AND tag='canonical_name' AND flag>-2""", (recID, ))]
def swap_temporary_reindex_tables(index_id, reindex_prefix="tmp_"):
"""Atomically swap reindexed temporary table with the original one.
Delete the now-old one."""
write_message("Putting new tmp index tables " + \
"for id %s into production" % index_id)
run_sql(
"RENAME TABLE " +
"idxWORD%02dR TO old_idxWORD%02dR," % (index_id, index_id) +
"%sidxWORD%02dR TO idxWORD%02dR," % (reindex_prefix, index_id, index_id) +
"idxWORD%02dF TO old_idxWORD%02dF," % (index_id, index_id) +
"%sidxWORD%02dF TO idxWORD%02dF," % (reindex_prefix, index_id, index_id) +
"idxPAIR%02dR TO old_idxPAIR%02dR," % (index_id, index_id) +
"%sidxPAIR%02dR TO idxPAIR%02dR," % (reindex_prefix, index_id, index_id) +
"idxPAIR%02dF TO old_idxPAIR%02dF," % (index_id, index_id) +
"%sidxPAIR%02dF TO idxPAIR%02dF," % (reindex_prefix, index_id, |
rye761/fDict | fdict.py | Python | mit | 7,906 | 0.007336 | from flask import Flask, request, redirect, url_for, render_templ | ate, flash, abort, Response, jsonify
from flask.ext.pymongo import PyMongo
from flask.ext.login import LoginManager, UserMixin, login_user, logout_user, login_required, current_user
from flask.ext.bcrypt import Bcrypt
from bson import ObjectId
from flask_jsglue import JSGlue
#Config
DEBUG = True
SECRET_KEY = '5eb5159208129d335c34fbbf838c83d9428 | 6bfaf5b064844' #This should be changed if this is ever run in production, though it is unlikely it will ever need to be, seeing as this is a test project.
app = Flask(__name__)
app.config.from_object(__name__)
mongo = PyMongo(app)
login_manager = LoginManager()
login_manager.init_app(app)
bcrypt = Bcrypt(app)
jsglue = JSGlue(app)
class User(UserMixin):
def __init__(self, username, userID):
self.username = username
self.userID = userID
def get_id(self):
return str(self.userID)
@login_manager.user_loader
def load_user(userID):
objectID = ObjectId(userID)
userObject = mongo.db.fdict_users.find_one({'_id': objectID})
if userObject:
user = User(userObject['username'], userObject['_id'])
return user
else:
return None
@app.route('/')
def index():
recent_entries = list(mongo.db.fdict_words.find().sort('_id', -1).limit(10))
for entry in recent_entries:
entry['view_url'] = url_for('view_definition', definitionid=str(entry['_id']))
entry['user_username'] = mongo.db.fdict_users.find_one({'_id': entry['user']})['username']
return render_template('index.html', recent_entries=recent_entries)
@app.route('/search', methods=['GET'])
def search_word():
query = request.args.get('q')
if query:
query = request.args.get('q')
results = list(mongo.db.fdict_words.find({'$text': {'$search': query}}))
for entry in results:
entry['view_url'] = url_for('view_definition', definitionid=str(entry['_id']))
entry['user_username'] = mongo.db.fdict_users.find_one({'_id': entry['user']})['username']
return render_template('search.html', results=results)
else:
#This will happen if the user sends an empty search. Just take them back to home and let them know what they did wrong.
flash('You need to submit a query to search', 'danger')
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register_user():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
password_confirm = request.form['password_confirm']
#check that everthing was filled out, and that passwords match
if username and password and password_confirm and password == password_confirm:
password_hash = bcrypt.generate_password_hash(password)
userObjectID =mongo.db.fdict_users.insert({'username': username, 'password_hash': password_hash})
user = User(username, userObjectID)
login_user(user)
return redirect(url_for('index'))
else:
flash('Missing entry or unmatched passwords', 'danger')
return redirect(url_for('register_user', username=username))
else:
username = request.args.get('username')
return render_template('register.html', username=username)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
userObject = mongo.db.fdict_users.find_one({'username': username})
if userObject and bcrypt.check_password_hash(userObject['password_hash'], password):
user = User(userObject['username'], userObject['_id'])
login_user(user)
return redirect(url_for('index'))
else:
flash('Invalid credentials', 'danger')
return redirect(url_for('login', username=username))
else:
username = request.args.get('username')
return render_template('login.html', username=username)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/create', methods=['GET', 'POST'])
@login_required
def create():
if request.method == 'POST':
word = request.form['word']
definition = request.form['definition']
if word and definition:
mongo.db.fdict_words.insert({'user': current_user.userID, 'word': word, 'definition': definition, 'votes': 0, 'voters': []})
flash('Definition submitted', 'success')
return redirect(url_for('index'))
else:
flash('You need to provide both a word and definition', 'danger')
return redirect(url_for('create'))
return redirect(url_for('index'))
else:
return render_template('create.html')
@app.route('/view/<definitionid>')
def view_definition(definitionid):
definitionObjectId = ObjectId(definitionid)
definitionObject = mongo.db.fdict_words.find_one({'_id': definitionObjectId})
definitionUserObject = mongo.db.fdict_users.find_one({'_id': definitionObject['user']})
if current_user.is_authenticated():
if current_user.userID in definitionObject['voters']:
hasVoted = True
else:
hasVoted = False
else:
hasVoted = False
if definitionObject:
return render_template('view_definition.html', word=definitionObject['word'], definition=definitionObject['definition'], votes=len(definitionObject['voters']), user=definitionUserObject, defid=definitionid, hasVoted=hasVoted)
else:
abort(404)
@app.route('/addvote', methods=['POST'])
def add_vote():
#This function adds a vote for a definition. It is meant to be called using AJAX
if current_user.is_authenticated():
definitionObjectId = ObjectId(request.form['definition_id'])
definitionObject = mongo.db.fdict_words.find_one({'_id': definitionObjectId})
if current_user.userID in definitionObject['voters']:
abort(501)
definitionObject['voters'].append(current_user.userID)
mongo.db.fdict_words.save(definitionObject)
return jsonify(votes = len(definitionObject['voters']))
else:
return abort(501)
@app.route('/revokevote', methods=['POST'])
def revoke_vote():
#This function removes a vote for a definition. It is meant to be called using AJAX
if current_user.is_authenticated():
definitionObjectId = ObjectId(request.form['definition_id'])
definitionObject = mongo.db.fdict_words.find_one({'_id': definitionObjectId})
if not current_user.userID in definitionObject['voters']:
abort(501)
definitionObject['voters'].remove(current_user.userID)
mongo.db.fdict_words.save(definitionObject)
return jsonify(votes = len(definitionObject['voters']))
else:
return abort(501)
@app.route('/deletedef', methods=['POST'])
def delete_def():
#This function removes an entry if the calling user created it. It is meant to be called using AJAX
if current_user.is_authenticated():
definitionObjectId = ObjectId(request.form['definition_id'])
definitionObject = mongo.db.fdict_words.find_one({'_id': definitionObjectId})
definitionUserObject = mongo.db.fdict_users.find_one({'_id': definitionObject['user']})
if not current_user.userID == definitionUserObject['_id']:
abort(501)
print('about to delete')
resp = Response(None, status=200)
mongo.db.fdict_words.remove(definitionObjectId);
return resp
else:
return abort(501)
if __name__ == '__main__':
app.run()
mongo.db.fdict_words.ensure_index([
('word', 'text'),
],
name='search_index',
weights={
'word':100
})
|
SamoFMF/stiri_v_vrsto | alphabeta.py | Python | mit | 17,907 | 0.007014 | from logika import IGRALEC_R, IGRALEC_Y, PRAZNO, NEODLOCENO, NI_KONEC, MAKSIMALNO_STEVILO_POTEZ, nasprotnik
from five_logika import Five_logika
from powerup_logika import Powerup_logika, POWER_STOLPEC, POWER_ZETON, POWER_2X_NW, POWER_2X_W
from pop10_logika import Pop10_logika
from pop_logika import Pop_logika
import random
#######################
## ALGORITEM MINIMAX ##
#######################
class AlphaBeta:
# Algoritem alphabeta
def __init__(self, globina):
self.globina = globina # Kako globoko iščemo?
self.prekinitev = False # Želimo algoritem prekiniti?
self.igra = None # Objekt, ki predstavlja igro
self.jaz = None # Katerega igralca igramo?
self.poteza = None # Sem vpišemo potezo, ko jo najdemo
def prekini(self):
'''Metoda, ki jo pokliče GUI, če je treba nehati razmišljati, ker
je uporabnik zapr okno ali izbral novo igro.'''
self.prekinitev = True
def izracunaj_potezo(self, igra):
'''Izračunaj potezo za trenutno stanje dane igre.'''
# To metodo pokličemo iz vzporednega vlakna
self.igra = igra
self.jaz = self.igra.na_potezi
self.prekinitev = False # Glavno vlakno bo to nastavilo na True, če bomo morali prekiniti
self.poteza = None # Sem napišemo potezo, ko jo najdemo
# Poženemo alphabeta
(poteza, vrednost) = self.alphabeta(self.globina, -AlphaBeta.NESKONCNO, AlphaBeta.NESKONCNO, True)
self.jaz = None
self.igra = None
if not self.prekinitev:
# Nismo bili prekinjeni, torej potezo izvedemo
self.poteza = poteza
def uredi_poteze(self, poteze):
'''Vrne urejen seznam potez, ki ga nato uporabimo v alphabeta.'''
urejene_poteze = [] # Urejen seznam potez
if isinstance(self.igra, Five_logika):
# Imamo 5 v vrsto
zeljen_vrstni_red = [1,4,7] # Željen vrstni red, če so na voljo vse poteze
zeljen_vrstni_red = random.sample(zeljen_vrst | ni_red, 3)
for i in range(1,3):
dodajamo = [4-i,4+i] # Poteze, ki jih želimo dodati
dodajamo = random.sample(dodajamo, 2)
| for j in dodajamo:
zeljen_vrstni_red.append(j)
elif isinstance(self.igra, Powerup_logika):
# Imamo Power Up igro
# Dodajmo dvojne poteze brez možnosti zmage
# Najprej dodamo te, ker če bi takšne z možnostjo zmage,
# bi jih (lahek) algoritem že na začetku porabil
zeljen_vrstni_red = [74]
for i in range(1,4):
zeljen_vrstni_red += random.sample([74+i, 74-i], 2)
# Dodajmo dvojne poteze z možno zmago
zeljen_vrstni_red.append(84)
for i in range(1,4):
zeljen_vrstni_red += random.sample([84+i, 84-i], 2)
# Dodajmo 'navadne' poteze
zeljen_vrstni_red.append(4)
for i in range(1,4):
zeljen_vrstni_red += random.sample([4+i, 4-i], 2)
# Dodajmo poteze, ki poteptajo stolpec pod sabo
zeljen_vrstni_red.append(14)
for i in range(1,4):
zeljen_vrstni_red += random.sample([14+i, 14-i], 2)
# Dodajmo poteze, ki odstranijo nasprotnikov žeton
zeljen_vrstni_red += random.sample([24+7*i for i in range(6)], 6)
for i in range(1,4):
dodajamo = [24+i+7*j for j in range(6)] + [24-i+7*j for j in range(6)]
zeljen_vrstni_red += random.sample(dodajamo, 12)
elif isinstance(self.igra, Pop10_logika):
# Imamo Pop 10 igro
if self.igra.faza == 1:
# Smo v fazi odstranjevanja žetonov
zeljen_vrstni_red = random.sample([18, 68, 25, 75], 4) # Središčni dve polji
dodajamo = [10, 11, 12, 17, 19, 24, 26, 31, 32, 33]
dodajamo += [50+i for i in dodajamo]
zeljen_vrstni_red += random.sample(dodajamo, len(dodajamo))
dodajamo = [i for i in range(2, 7)] + [i for i in range(37, 42)] + [9+7*i for i in range(4)] + [13+7*i for i in range(4)]
dodajamo += [50+i for i in dodajamo]
zeljen_vrstni_red += random.sample(dodajamo, len(dodajamo))
dodajamo = [1+7*i for i in range(6)] + [7+7*i for i in range(6)]
dodajamo += [50+i for i in dodajamo]
zeljen_vrstni_red += random.sample(dodajamo, len(dodajamo))
else:
# Smo v fazi dodajanja žetonov (lahko faza 0 ali 2)
zeljen_vrstni_red = [4]
for i in range(1,4):
zeljen_vrstni_red += random.sample([4+i, 4-i], 2)
else:
# Imamo 4 v vrsto ali Pop Out
zeljen_vrstni_red = [4,-4] # Željen vrstni red, če so na voljo vse poteze
for i in range(1,4):
dodajamo = [4-i,-4+i,4+i,-4-i] # Poteze, ki jih želimo dodati
dodajamo = random.sample(dodajamo, 4)
for j in dodajamo:
zeljen_vrstni_red.append(j)
for i in zeljen_vrstni_red:
if i in poteze:
# Poteza je na voljo, treba jo je dodati
urejene_poteze.append(i)
else:
# Poteza ni na voljo
continue
return urejene_poteze
# Vrednosti igre
ZMAGA = 10**5
NESKONCNO = ZMAGA + 1 # Več kot zmaga
def vrednost_pozicije(self):
'''Vrne oceno vrednosti polozaja.'''
vrednost = 0
if self.igra is None:
# Če bi se slučajno zgodilo, da ne bi bila izbrana nobena igra
return vrednost
elif self.igra.na_potezi is None:
# Igre je konec
# Sem ne bi smeli nikoli priti zaradi if stavkov v alphabeta
return vrednost
else:
delez = 0.8 # Faktor za katerega mu je izguba manj vredna kot dobiček
tocke = [0, 0] # Sem bomo shranili število točk igralcev [R,Y]
# Najprej preverimo kateri tip igre imamo
if isinstance(self.igra, Five_logika):
# Imamo 5 v vrsto, torej imamo zmagovalne štirke (robne)
# ter petke, pokličimo jih spodaj
stirke_R = self.igra.stirke_R
stirke_Y = self.igra.stirke_Y
petke = self.igra.petke
# Pojdimo skozi vse štirke & petke ter jih primerno ovrednotimo
# Štirke / petke, ki vsebujejo žetone obeh igralcev so vredne 0 točk
# Prazne petke so vredne 0.1 točke
# Štirke so vredne 0.2 + a/5 točke, kjer je a število žetonov v štirki,
# če je igralec pravilne barve za to štirko.
# Petke so vredne a/5 točke, kjer je a število žetonov v petki.
for s in stirke_R: # Štirke na voljo rdečemu
((i1,j1),(i2,j2),(i3,j3),(i4,j4)) = s
stirka = [self.igra.polozaj[i1][j1], self.igra.polozaj[i2][j2],
self.igra.polozaj[i3][j3], self.igra.polozaj[i4][j4]]
if IGRALEC_Y in stirka:
continue
else:
tocke[0] += 0.2 + stirka.count(IGRALEC_R) / 5
for s in stirke_Y: # Štirke na voljo rumenemu
((i1,j1),(i2,j2),(i3,j3),(i4,j4)) = s
stirka = [self.igra.polozaj[i1][j1], self.igra.polozaj[i2][j2],
self.igra.polozaj[i3][j3], self.igra.polozaj[i4][j4]]
if IGRALEC_R in stirka:
continue
else:
tocke[1] += 0.2 + stirka.count(IGRALEC_Y) / 5
for p in petke:
((i1,j1),(i2,j2),(i3,j3),(i4,j4),(i5,j5)) = p
petka = [self.igra.polozaj[i1][j1], self.igra.polozaj[i2][j2],
self.igra.polozaj[i3][j3], self.igra.polozaj[i4][j4],
self.igra.polozaj[i5][j5]]
barve = list(set(st |
twisted/quotient | xquotient/stressmtp.py | Python | mit | 3,190 | 0.006897 | import os
import sys
import time
from StringIO import StringIO
from os.path import join as opj
from twisted.python import log
from twisted.internet import defer
from twisted.mail import smtp
from twisted.internet import reactor
def sendmail(smtphost, port, from_addr, to_addrs, msg):
msg = StringIO(str(msg))
d = defer.Deferred()
factory = smtp.SMTPSenderFactory(from_addr, to_addrs, msg, d)
factory.noisy = False
reactor.connectTCP(smtphost, port, factory)
return d
class MessageSendingController:
sentBytes = 0
def __init__(self, host, port, recip, messages):
self.host = host
self.port = port
self.recip = recip
self.messages = messages[:]
self.nMsgs = len(self.messages)
def next(self):
try:
return file(self.messages.pop(0), 'rU').read()
except:
raise StopIter | ation
def send(self, nConns=1, bps=None):
d = [] |
pb = self.messages.append
for c in range(nConns):
d.append(MessageSender(self.host, self.port, self.recip, self.next, bps, pb
).sendMessages(
).addCallback(self._cbSenderFinished
))
return defer.DeferredList(d
).addCallback(self._cbSentAll
)
def _cbSenderFinished(self, bytes):
self.sentBytes += bytes
def _cbSentAll(self, result):
return self.sentBytes
class MessageSender:
def __init__(self, host, port, recip, msgs, bps, putBack):
self.host = host
self.port = port
self.recip = recip
self.msgs = msgs
self.bps = bps
self.putBack = putBack
def msgFrom(self):
return "foo@bar"
def msgTo(self):
return self.recip
def sendMessages(self, _bytes=0):
try:
m = self.msgs()
except StopIteration:
return defer.succeed(_bytes)
else:
return self.sendOneMessage(m
).addErrback(self._ebSendMessages, m
).addCallback(self._cbSendMessages, _bytes + len(m)
)
def sendOneMessage(self, msg):
return sendmail(self.host, self.port, self.msgFrom(), [self.msgTo()], msg
)
def _ebSendMessages(self, failure, msg):
self.putBack(msg)
log.err(failure)
def _cbSendMessages(self, result, bytes):
return self.sendMessages(bytes)
def sendDirectory(path, host, port, recip):
return MessageSendingController(host, port, recip, [opj(path, f) for f in os.listdir(path)])
def finished(bytes, nMsgs, startTime):
dur = (time.time() - startTime)
log.msg('%4.2f bps' % (bytes / dur))
log.msg('%4.2f mps' % (nMsgs / dur))
def main(path, host, port, recip, conns=4):
log.startLogging(sys.stdout)
c = sendDirectory(path, host, int(port), recip)
c.send(int(conns)
).addCallback(finished, len(c.messages), time.time(),
).addBoth(lambda _: reactor.stop()
)
reactor.run()
def usage():
return ("Usage: %s <directory of messages> <host> <port> "
"<recipient address> [<concurrent connections>] "
% (sys.argv[0],))
|
gpldecha/gym-square | setup.py | Python | mit | 407 | 0.004914 | from setupto | ols import setup
from setuptools import find_packages
setup(name='gym_square',
version='0.0.1',
author='Guillaume de Chambrier',
author_email='chambrierg@gmail.com',
description='A simple square world environment for openai/gym',
packages=find_packages(),
url='https://github.com/gpldecha/gym-square',
license='MIT', |
install_requires=['gym']
)
|
Bysmyyr/chromium-crosswalk | tools/telemetry/telemetry/web_perf/metrics/timeline_based_metric.py | Python | bsd-3-clause | 3,168 | 0.005051 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class TimelineBasedMetricException(Exception):
"""Exception that can be thrown from metrics that implements
TimelineBasedMetric to indicate a problem arised when computing the metric.
"""
def _TimeRangesHasOverlap(iterable_time_ranges):
""" Returns True if there is are overlapped ranges in time ranges.
iterable_time_ranges: an iterable of time ranges. Each time range is a
tuple (start time, end time).
"""
# Sort the ranges by the start time
sorted_time_ranges = sorted(iterable_time_ranges)
last_range = sorted_time_ranges[0]
for current_range in sorted_time_ranges[1:]:
start_current_range = current_range[0]
end_last_range = last_range[1]
if start_current_range < end_last_range:
return True
last_range = current_range
return False
def IsEventInInteractions(event, interaction_records):
""" Return True if event is in any of the interaction records' time range.
Args:
event: an instance of telemetry.timeline.event.TimelineEvent.
interaction_records: a list of interaction records, whereas each record is
an instance of
telemetry.web_perf.timeline_interaction_record.TimelineInter | actionRecord.
Returns:
True if |event|'s start & end time is in any of the |interaction_records|'s
time range.
"""
return any(ir.start <= event.start and ir.end >= event.end for ir
in interaction_records)
class TimelineBasedMetric(object):
def __init__(self):
"""Computes metrics from a telemetry.timeline Model and a range
"""
super(TimelineBasedMetric, self).__init__()
def AddResults(self, model, renderer_thread, interaction_records, results):
"""Com | putes and adds metrics for the interaction_records' time ranges.
The override of this method should compute results on the data **only**
within the interaction_records' start and end time ranges.
Args:
model: An instance of telemetry.timeline.model.TimelineModel.
interaction_records: A list of instances of TimelineInteractionRecord. If
the override of this method doesn't support overlapped ranges, use
VerifyNonOverlappedRecords to check that no records are overlapped.
results: An instance of page.PageTestResults.
"""
raise NotImplementedError()
def AddWholeTraceResults(self, model, results):
"""Computes and adds metrics corresponding to the entire trace.
Override this method to compute results that correspond to the whole trace.
Args:
model: An instance of telemetry.timeline.model.TimelineModel.
results: An instance of page.PageTestResults.
"""
pass
def VerifyNonOverlappedRecords(self, interaction_records):
"""This raises exceptions if interaction_records contain overlapped ranges.
"""
if _TimeRangesHasOverlap(((r.start, r.end) for r in interaction_records)):
raise TimelineBasedMetricException(
'This metric does not support interaction records with overlapped '
'time range.')
|
agoose77/hivesystem | manual/movingpanda/panda-15c.py | Python | bsd-2-clause | 3,125 | 0.00192 | from pandalib import myhive, myscene, pandalogichive, pandalogicframe, load_hive, camerabind
import bee, dragonfly
from bee import connect
import Spyder
camerahive = "camera.web"
pandadict = {} # global variable...
# First panda class
a = Spyder.AxisSystem()
a *= 0.005
data = "models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a
box = Spyder.Box2D(50, 470, 96, 96)
image = "pandaicon.png", True
hivemap = "pandawalk.web"
pandadict["pandaclass"] = ("actor", data, box, image, hivemap)
#Second panda class
a = Spyder.AxisSystem()
a *= 0.002
data = "models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a
box = Spyder.Box2D(200, 500, 48, 48)
image = "pandaicon.png", True
hivemap = "pandawalk2.web"
pandadict["pandaclass2"] = ("actor", data, box, image, hivemap)
#Third panda class
a = Spyder.AxisSystem()
a *= 0.3
data = "models/panda", "egg", [], a
box = Spyder.Box2D(280, 480, 144, 112)
image = "pandaicon2.png", True
hivemap = "pandajump.web"
pandadict["pandaclass3"] = ("model", data, box, image, hivemap)
def generate_pandasceneframe(name, panda):
class pandasceneframe(dragonfly.pandahive.sp | yderframe):
mode, data, box, image, hivemap = panda
model, modelformat, animations, a = data
if mode == "actor":
obj = Spyder.ActorClass3D(model, modelformat, animations, a, actorclassname=name)
elif mode == "model":
model = Spyder.Model3D(model, modelformat, a)
| obj = Spyder.EntityClass3D(name, [model])
else:
raise ValueError(mode)
im, transp = image
icon = Spyder.Icon(im, name, box, transparency=transp)
del model, modelformat, animations, a
del mode, data, box, image, hivemap
del im, transp
return pandasceneframe
class myscene2(myscene):
for name in pandadict:
panda = pandadict[name]
pandasceneframe = generate_pandasceneframe(name, panda)(scene="scene", canvas="canvas", mousearea="mousearea")
locals()["pandasceneframe_%s" % name] = pandasceneframe
del name, panda, pandasceneframe
class pandalogichive2(pandalogichive):
c_hivereg = bee.configure("hivereg")
for name in pandadict:
mode, data, box, image, hivemap = pandadict[name]
hive = load_hive(hivemap)
c_hivereg.register_hive(name, hive)
p = pandalogicframe(name)
connect(p.set_panda, "do_set_panda")
connect(p.trig_spawn, "do_trig_spawn")
locals()["pandalogicframe_%s" % name] = p
del mode, data, box, image, hivemap, hive, p, name
class camerabind2(camerabind):
hive = load_hive(camerahive)
class mainhive(myhive):
pandalogic = pandalogichive2(hivereg="hivereg")
connect(pandalogic.set_panda, "v_panda")
connect(pandalogic.set_panda, "v_hivename")
connect(pandalogic.trig_spawn, "trig_spawn")
camerabind = camerabind2().worker()
myscene = myscene2(
scene="scene",
canvas="canvas",
mousearea="mousearea",
)
main = mainhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
main.run()
|
Azure/azure-sdk-for-python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2021_03_01/operations/_certificates_operations.py | Python | mit | 26,217 | 0.004272 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure | .core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpR | equest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Web/certificates')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_v |
the-zebulan/CodeWars | katas/beta/lets_flat_them_out.py | Python | mit | 396 | 0 | from collections import MutableMapping
d | ef flatten(d, parent_key='', sep='/'):
result = []
for k, v in d.iteritems():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, MutableMapping):
result.extend(flatten(v, new_key, sep=sep).iteritems())
else:
result.append((new_key, v))
return dict(result) or {parent_key: '' | }
|
elifesciences/elife-tools | tests/test_parse_jats.py | Python | mit | 132,828 | 0.001197 | # coding=utf-8
import json
import os
import unittest
from collections import OrderedDict
from bs4 import BeautifulSoup
from ddt import ddt, data, unpack
from elifetools import parseJATS as parser
from elifetools import rawJATS as raw_parser
from elifetools.utils import date_struct
from tests.file_utils import (
sample_xml,
json_expected_file,
read_fixture,
read_sample_xml,
)
from tests import soup_body
@ddt
class TestParseJats(unittest.TestCase):
def setUp(self):
pass
def soup(self, filename):
# return soup
return parser.parse_document(sample_xml(filename))
def json_expected(self, filename, function_name):
json_expected = None
json_file = json_expected_file(filename, function_name)
try:
with open(json_file, "rb") as json_file_fp:
json_expected = json.loads(json_file_fp.read().decode("utf-8"))
except IOError:
# file may not exist or the value is None for this article
pass
return json_expected
@data("elife-kitchen-sink.xml")
def test_parse_document(self, filename):
soup = parser.parse_document(sample_xml(filename))
self.assertTrue(isinstance(soup, BeautifulSoup))
"""
Quick test cases during development checking syntax errors and coverage
"""
@unpack
@data(
(
"elife04493.xml",
"Neuron hemilineages provide the functional ground plan for the <i>Drosophila</i> ventral nervous system",
)
)
def test_full_title_json(self, filename, expected):
full_title_json = parser.full_title_json(self.soup(filename))
self.assertEqual(expected, full_title_json)
@unpack
@data(
(
"elife04490.xml",
"Both the frequency of sesquiterpene-emitting individuals and the defense capacity of individual plants determine the consequences of sesquiterpene volatile emission for individuals and their neighbors in populations of the wild tobacco <i>Nicotiana attenuata</i>.",
),
("elife_poa_e06828.xml", ""),
)
def test_impact_statement_json(self, filename, expected):
impact_statement_json = parser.impact_statement_json(self.soup(filename))
self.assertEqual(expected, impact_statement_json)
@unpack
@data(("elife-kitchen-sink.xml", 6), ("elife-02833-v2.xml", 0))
def test_ethics_json_by_file(self, filename, expected_length):
soup = parser.parse_document(sample_xml(filename))
self.assertEqual(len(parser.ethics_json(soup)), expected_length)
@unpack
@data(
(
read_fixture("test_ethics_json", "content_01.xml"),
read_fixture("test_ethics_json", "content_01_expected.py"),
),
)
def test_ethics_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.ethics_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(("elife-kitchen-sink.xml", list), ("elife_poa_e06828.xml", None))
def test_acknowledgements_json_by_file(self, filename, expected):
acknowledgements_json = parser.acknowledgements_json(self.soup(filename))
if expected is None:
self.assertEqual(expected, acknowledgements_json)
else:
self.assertEqual(expected, type(acknowledgements_json))
@unpack
@data(("elife04490.xml", 3))
def test_appendices_json_by_file(self, filename, expected_len):
soup = parser.parse_document(sample_xml(filename))
tag_content = parser.appendices_json(soup)
self.assertEqual(len(tag_content), expected_len)
@unpack
@data(
# example based on 14093 v1 with many sections and content
(
read_fixture("test_appendices_json", "content_01.xml"),
read_fixture("test_appendices_json", "content_01_expected.py"),
),
# example based on 14022 v3 having a section with no title in it, with some additional scenarios
(
read_fixture("test_appendices_json", "content_02.xml"),
read_fixture("test_appendices_json", "content_02_expected.py"),
),
# appendix with no sections, based on 00666 kitchen sink
(
read_fixture("test_appendices_json", "content_03.xml"),
read_fixture("test_appendices_json", "content_03_expected.py"),
),
# appendix with a section and a box, also based on 00666 kitchen sink
(
read_fixture("test_appendices_json", "content_04.xml"),
read_fixture("test_appendices_json", "content_04_expected.py"),
),
# appendix with a boxed-text in a subsequent section based on article
(
read_fixture("test_appendices_json", "content_05.xml"),
read_fixture("test_appendices_json", "content_05_expected.py"),
),
)
def test_appendices_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.appendices_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
# appendix with inline-graphic, based on 17092 v1
(
read_fixture("test_appendices_json_base_url", "content_01.xml"),
None,
read_fixture("test_appendices_json_base_url", "content_01_expected.py"),
),
# appendix with inline-graphic, based on 17092 v1
(
read_fixture("test_appendices_json_base_url", "content_02.xml"),
"https://example.org/",
read_fixture("test_appendices_json_base_url", "content_02_expected.py"),
),
)
def test_appendices_json_with_base_url(self, xml_content, base_url, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.appendices_json(soup_body(soup), base_url)
self.assertEqual(expected, tag_content)
@unpack
@data(
(
"elife04490.xml",
[
"<i>Nicotiana attenuata</i>",
"<i>Manduca sexta</i>",
u"Geocoris spp.",
"<i>Trichobaris mucorea</i>",
u"direct and indirect defense",
u"diversity",
],
),
| ("elife07586.xml", []),
)
def test_keywords_json(self, filename, expected):
keywords_json = parser.keywords_json(self.soup(filename))
self.assertEqual(expected, keywords_json)
@unpack
@data(
('<root xmlns | :xlink="http://www.w3.org/1999/xlink"/>', []),
(
'<root xmlns:xlink="http://www.w3.org/1999/xlink"><kwd-group kwd-group-type="research-organism"><title>Research organism</title><kwd><italic>A. thaliana</italic></kwd><kwd>Other</kwd></kwd-group></root>',
["<i>A. thaliana</i>"],
),
(
'<root xmlns:xlink="http://www.w3.org/1999/xlink"><kwd-group kwd-group-type="research-organism"><title>Research organism</title><kwd>None</kwd></kwd-group></root>',
[],
),
)
def test_research_organism_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.research_organism_json(soup_body(soup))
self.assertEqual(expected, tag_content)
@unpack
@data(
("<root></root>", None),
("<root><ack></ack></root>", None),
(
"<root><ack><title>Acknowledgements</title><p>Paragraph</p></ack></root>",
[OrderedDict([("type", "paragraph"), ("text", u"Paragraph")])],
),
(
"<root><ack><title>Acknowledgements</title><p>Paragraph</p><p><italic>italic</italic></p></ack></root>",
[
OrderedDict([("type", "paragraph"), ("text", u"Paragraph")]),
OrderedDict([("type", "paragraph"), ("text", u"<i>italic</i>")]),
],
),
)
def test_acknowledgements_json(self, xml_content, expected):
soup = parser.parse_xml(xml_content)
tag_content = parser.acknowledgements_json(soup_body(soup))
self.assertEqual(expected, tag_content)
|
Melon-PieldeSapo/IoTFramework | src/tests/test_ground.py | Python | gpl-3.0 | 1,704 | 0.019953 | test = "#{0}:{1};"
import re
regex = re.compile(test,re.IGNORECASE)
pattern = r'[{]\d[}]'
regex = re.compile(pattern,re.IGNORECASE)
print regex
parsed = []
for idx,match in enumerate(regex.finditer(test)):
parsed.append({'start':match.start(),'end':match.end()})
print "%s: %s-%s: %s" % (str(idx),match.start(),match.end(),match.group(0))
print parsed
total = len(parsed)
for idx,group in enumerate(parsed):
if(idx == 0):#special case for first one
prec = test[0:group['start']]
post = test[group['end']:parsed[idx+1]['start']]
elif(idx == (total-1)):#special case for the last one
prec = test[parsed[idx-1]['end']:group['start']]
post = test[group['end']:len(test)]
else:#generic case
prec = test[parsed[idx-1]['end']:group['start']]
post = test[group['end']:parsed[idx+1]['start']]
parsed[idx]['prec'] = prec
parsed[idx]['post'] = post
print parsed
#De esto sacar u | n array para valor donde se guarda el preceding string y el postceding string. (en caso de no | haber al principio o al final se guarda un -1
#Para procesar el payload se va extrallendo el substring de forma que lo que va quedando al principio siempre debe estar el preceding string.
payload = "#2:643.0605;"
payload_group = []
for group in parsed:
start= payload.find(group['prec'])+len(group['prec'])
end= payload.find(group['post'],start)
print("positions {}:{}".format(start,end))
result = payload[start:end]
payload_group.append(result)
payload = payload[end:]
print("Extracted: {} - Remaining: {}".format(result,payload))
pattern = r'[{]'+str(1)+'[}]'
print( re.sub(pattern, "XX", ".00..0..{1}1."))
|
blueyed/pytest_django | tests/compat.py | Python | bsd-3-clause | 270 | 0 | try:
from django.utils.encoding import force_text # noqa
exc | ept ImportError:
from django.utils.encoding import force_unicode as force_text # noqa
try:
from urllib2 import urlopen # noqa
except ImportError:
from urlli | b.request import urlopen # noqa
|
michaelforney/libblit | amdgpu/makeregheader.py | Python | isc | 16,007 | 0.003061 | from __future__ import absolute_import, division, print_function, unicode_literals
COPYRIGHT = '''
/*
* Copyright 2015-2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
'''
"""
Create the (combined) register header from register JSON. Use --help for usage.
"""
import argparse
from collections import defaultdict
import itertools
import json
import re
import sys
from regdb import Object, RegisterDatabase, deduplicate_enums, deduplicate_register_types
######### BEGIN HARDCODED CONFIGURATION
# Chips are sorted chronologically
CHIPS = [
Object(name='gfx6', disambiguation='GFX6'),
Object(name='gfx7', disambiguation='GFX7'),
Object(name='gfx8', disambiguation='GFX8'),
Object(name='fiji', disambiguation='GFX8'),
Object(name='stoney', disambiguation='GFX8'),
Object(name='gfx9', disambiguation='GFX9'),
Object(name='gfx10', disambiguation='GFX10'),
]
######### END HARDCODED CONFIGURATION
def get_chip_index(chip):
"""
Given a chip name, return its index in the global CHIPS list.
"""
return next(idx for idx, obj in enumerate(CHIPS) if obj.name == chip)
def get_disambiguation_suffix(chips):
"""
Disambiguation suffix to be used for an enum entry or field name that
is supported in the given set of chips.
"""
oldest_chip_index = min([get_chip_index(chip) for chip in chips])
return CHIPS[oldest_chip_index].disambiguation
def get_chips_comment(chips, parent=None):
"""
Generate a user-friendly comment describing the given set of chips.
The return value may be None, if such a comment is deemed unnecessary.
parent is an optional set of chips supporting a parent structure, e.g.
where chips may be the set of chips supporting a specific enum value,
parent would be the set of chips supporting the field containing the enum,
the idea being that no comment is necessary if all chips that support the
parent also support the child.
"""
chipflags = [chip.name in chips for chip in CHIPS]
if all(chipflags):
return None
if parent is not None:
parentflags = [chip.name in parent for chip in CHIPS]
if all(childflag or not parentflag for childflag, parentflag in zip(chipflags, parentflags)):
return None
prefix = 0
for idx, chip, flag in zip(itertools.count(), CHIPS, chipflags):
if not flag:
break
prefix = idx + 1
suffix = len(CHIPS)
for idx, chip, flag in zip(itertools.count(), reversed(CHIPS), reversed(chipflags)):
if not flag:
break
suffix = len(CHIPS) - idx - 1
comment = []
if prefix > 0:
comment.append('<= {0}'.format(CHIPS[prefix - 1].name))
for chip, flag in zip(CHIPS[prefix:suffix], chipflags[prefix:suffix]):
if flag:
comment.append(chip.name)
if suffix < len(CHIPS):
comment.append('>= {0}'.format(CHIPS[suffix].name))
return ', '.join(comment)
class HeaderWriter(object):
def __init__(self, regdb, guard=None):
self.guard = guard
# The following contain: Object(address, chips, name, regmap/field/enumentry)
self.register_lines = []
self.field_lines = []
self.value_lines = []
regtype_emit = defaultdict(set)
enum_emit = defaultdict(set)
for regmap in regdb.register_mappings():
type_ref = getattr(regmap, 'type_ref', None)
self.register_lines.append(Object(
address=regmap.map.at,
chips=set(regmap.chips),
name=regmap.name,
regmap=regmap,
type_refs=set([type_ref]) if type_ref else set(),
))
basename = re.sub(r'[0-9]+', '', regmap.name)
key = '{type_ref}::{basename}'.format(**locals())
if type_ref is not None and regtype_emit[key].isdisjoint(regmap.chips):
regtype_emit[key].update(regmap.chips)
regtype = regdb.register_type(type_ref)
for field in regtype.fields:
if field.name == 'RESERVED':
continue
enum_ref = getattr(field, 'enum_ref', None)
self.field_lines.append(Object(
address=regmap.map.at,
chips=set(regmap.chips),
name=field.name,
field=field,
bits=field.bits[:],
type_refs=set([type_ref]) if type_ref else set(),
enum_refs=set([enum_ref]) if enum_ref else set(),
))
key = '{type_ref}::{basename}::{enum_ref}'.format(**locals())
if enum_ref is not None and enum_emit[key].isdisjoint(regmap.chips):
enum_emit[key].update(regmap.chips)
enum = regdb.enum(enum_ref)
for entry in enum.entries:
self.value_lines.append(Object(
address=regmap.map.at,
chips=set(regmap.chips),
name=entry.name,
enumentry=entry,
enum_refs=set([enum_ref]) if enum_ref else set(),
))
# Merge register lines
lines = self.register_lines
lines.sort(key=lambda line: (line.address, line.name))
self.register_lines = []
for line in lines:
prev = self.register_lines[-1] if | self.register_lines else None
if prev and prev.address == line.address and prev.name == line.name:
| prev.chips.update(line.chips)
prev.type_refs.update(line.type_refs)
continue
self.register_lines.append(line)
# Merge field lines
lines = self.field_lines
lines.sort(key=lambda line: (line.address, line.name))
self.field_lines = []
for line in lines:
merged = False
for prev in reversed(self.field_lines):
if prev.address != line.address or prev.name != line.name:
break
# Can merge fields if they have the same starting bit and the
# range of the field as intended by the current line does not
# conflict with any of the regtypes covered by prev.
if prev.bits[0] != line.bits[0]:
continue
if prev.bits[1] < line.bits[1]:
# Current line's field extends beyond the range of prev.
# Need to check for conflicts
conflict = False
for type_ref in prev.type_refs:
for field in regdb.register_type(type_ref).fields:
# The only possible conflict is for a prev field
# that starts at a higher bit.
if (field.bits[0] |
webcomics/dosage | dosagelib/plugins/comicfury.py | Python | mit | 56,565 | 0.000177 | # SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2021 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
import os
from ..scraper import _ParserScraper
from ..helpers import bounceStarter
XPATH_LINK = '//a[d:class("%s") and contains(text(), "%s")]'
XPATH_IMG = '//div[d:class("comicnav")]//a[img[contains(@alt, "%s")]]'
class ComicFury(_ParserScraper):
imageSearch = ('//img[@id="comicimage"]',
'//div[@id="comicimagewrap"]//embed',
'//div[@id="comicimagewrap"]//img')
prevSearch = (
'//link[@rel="prev"]',
# 137 (needs to be before the generic a@rel, because layout is wrong)
'//a[contains(@title, "previous")]',
'//a[@rel="prev"]',
XPATH_LINK % ('comicnavlink', 'Previous'),
XPATH_IMG % ('Previous'),
# Art, ConsolersDLC, etc.
u'//nav//a[contains(text(), "\u2039")]',
# LatchkeyKingdom
'//a[d:class("navi") and img[contains(@src, "Previous")]]',
# KATRAN
'//a[contains(text(), "Previous")]',
# MansionofE
'//a[img[contains(@alt, "PREVIOUS")]]',
# RedSpot
'//a[contains(text(), "Back")]',
)
nextSearch = (
'//link[@rel="next"]',
# 137 (see above)
'//a[contains(@title, "next")]',
'//a[@rel="next"]',
XPATH_LINK % ('comicnavlink', 'Next'),
XPATH_IMG % ('Next'),
# Art, ConsolersDLC, etc.
u'//nav//a[contains(text(), "\u203A")]',
# LatchkeyKingdom
'//a[d:class("navi") and img[contains(@src, "Next")]]',
# RedSpot, KATRAN
'//a[contains(text(), "Next")]',
# MansionofE
'//a[img[contains(@alt, "NEXT")]]',
)
help = 'Index format: n'
starter = bounceStarter
def __init__(self, name, sub, lang=None, adult=False, endOfLife=False):
super(ComicFury, self).__init__('ComicFury/' + name)
self.prefix = name
self.url = 'http://%s.webcomic.ws/comics/' % sub
self.stripUrl = self.url + '%s'
self.firstStripUrl = self.stripUrl % '1'
if lang:
self.lang = lang
if adult:
self.adult = adult
if endOfLife:
self.endOfLife = endOfLife
def namer(self, image_url, page_url):
parts = page_url.split('/')
path, ext = os.path.splitext(image_url)
num = parts[-1]
return "%s_%s%s" % (self.prefix, num, ext)
@classmethod
def getmodules(cls): # noqa: Allowed to be long
return (
# These were once in the list below, but fell out from the index...
cls('BadassologyByMichaelBay', 'strudelology'),
cls('DandyAndCompany', 'dandyandcompany'),
cls('DeadAtNight', 'deadnight'),
cls('Shatterrealm', 'shatterrealm'),
# do not edit anything below since these entries are generated from
# scripts/comicfury.py
# START AUTOUPDATE
cls('0Eight', '0eight'),
cls('1000', '1000'),
cls('12YearsLater', '12yearslater'),
cls('137', '137'),
cls('20', 'two-over-zero'),
cls('20QuidAmusements', 'twentyquidamusements'),
cls('30', '30years'),
cls('30DaysOfCharacters', '30days'),
cls('3DGlasses', '3dglasses'),
cls('60SecondComics', '6tsc'),
cls('6ColorStories', '6colorstories'),
cls('6Tales', 'sixtales'),
cls('933Dollars', '933dollars'),
cls('_Thetest_', 'thetest'),
cls('AbbyComics', 'abbycomics'),
cls('ABrickishSpaceComic', 'abrickishspacecomic'),
cls('AbsentMindedTheatre', 'amtheatre'),
cls('Absurd', 'absurd'),
cls('ACannonadeOfHogwash', 'cannonadeofhogwash'),
cls('AccidentallyOnPurpose', 'accidentally-on-purpose'),
cls('ACelestialStory', 'acelestialstory'),
cls('AComicExistense', 'acomicexistense'),
cls('Acroalis', 'acroalis'),
cls('ActingOut', 'actingout'),
cls('ActionLand', 'actionland'),
cls('Advent', 'advent'),
cls('AdventuresInJetpacks', 'adventuresinjetpacks'),
cls('AdventuresInTanoshii', 'adventuresintanoshii'),
cls('AdventuresInTrueLove', 'advtl'),
cls('AdventuresOftheGreatCaptainMaggieandCrew', 'adventuresofmaggie'),
cls('Aerosol', 'aerosol'),
cls('AetherEarthAndSun', 'aether'),
cls('AForeverQuest', 'aforeverquest'),
cls('Afterdead', 'afterdead'),
cls('AGame', 'kirahitogame'),
cls('Agency', 'agency-comic'),
cls('AgentBishop', 'agentbishop'),
cls('AHappierKindOfSad', 'ahappierkindofsad'),
cls('AlbinoBrothers', 'albinobros'),
cls('AlexanderAndLucasRebooted', 'alexanderandlucas'),
cls('AliaTerra', 'alia-terra'),
cls('AlienIrony', 'alien-irony'),
cls('AlienSpike', 'alienspike'),
cls('Alignment', 'alignment'),
cls('AllTheBbqSauce', 'allthebbqsauce'),
cls('Alone', 'alone'),
cls('ALoonaticsTale', 'aloonaticstale'),
cls('ALoveStorydraft', 'alovestory'),
cls('AlyaTheLastChildOfLight', 'alya'),
cls('Amara', | 'amara'),
cls('Ampre', 'ampere'),
cls('AmyOok', 'amyook'),
cls('AndroidFiles', 'androidfiles'),
cls('AngelGuardianEnEspanol', 'angelguardianespano | l', 'es'),
cls('AngelsOfIblis', 'angelsofiblis'),
cls('AngryFaerie', 'angryfaerie'),
cls('AnimalInstinct', 'fur-realanimalinstinct'),
cls('Animangitis', 'animangitis'),
cls('AnK', 'ank'),
cls('Anne', 'anne'),
cls('AntarcticBroadcasting', 'antarcticbroadcasting'),
cls('AntaresComplex', 'antarescomplex'),
cls('Antcomics', 'antcomics'),
cls('Anthology', 'strudelology'),
cls('AnthologyOfAnfer', 'anfer'),
cls('AnthrosAndDungeons', 'anthrosanddungeons'),
cls('AntiqueTimeMachine', 'atm'),
cls('APiratesLife', 'pirateslife'),
cls('ApocalypsoAdventure', 'thewriter13'),
cls('ApplepineMonkeyAndFriends', 'applepine'),
cls('AquazoneBreakfastNews', 'aqbn'),
cls('ArachnidGoddess', 'arachnidgoddess'),
cls('Arcane', 'rbsarcane'),
cls('Archibald', 'archibald'),
cls('ArchiNinja', 'archininja'),
cls('AreYouDoneYet', 'areyoudoneyet'),
cls('ArmlessAmy', 'armlessamy'),
cls('ArmlessAmyExtraEdition', 'armlessamyextraedition'),
cls('ArmyBrat', 'armybrat'),
cls('Art', 'art'),
cls('ArtificialStorm', 'artificialstorm'),
cls('ArtisticAdventuresInBoredom', 'aab'),
cls('ARVEYToonz', 'arveytoonz'),
cls('Ashes', 'ashescomic'),
cls('Asperchu', 'asperchu'),
cls('AsperitasAstraalia', 'asperitasastraalia'),
cls('AssholeAndDouchebag', 'aaanddb'),
cls('AstralAves', 'astralaves'),
cls('ASTRAYCATS', 'astraycats'),
cls('Astronautical', 'astronautical'),
cls('AtomicMonkeyComics', 'atomicmonkey'),
cls('ATownCalledAlandale', 'atowncalledalandale'),
cls('AttackOfTheRobofemoids', 'attack-of-the-robofemoids'),
cls('AugustosClassic', 'augustos-classic'),
cls('AuntieClara', 'auntieclara'),
cls('Auriga', 'auriga'),
cls('Auster', 'auster'),
cls('AutumnBay', 'autumnbay'),
cls('AutumnBayExtraEdition', 'autumnbayextra'),
cls('Avatars', 'avatars'),
cls('AvengersRollInitiative', 'avengersrollinitiative'),
cls('AwkwardPaws', 'awkwardpaws'),
cls('AwkwardShelby', 'awkwardshelby'),
cls('BabesOfDongaria', 'dongaria'),
cls('Baby001 |
qedsoftware/commcare-hq | corehq/ex-submodules/pillowtop/tests/test_bulk.py | Python | bsd-3-clause | 1,267 | 0.001591 | # coding=utf-8
import json
from django.test import SimpleTestCase
from corehq.util.test_utils import generate_cases
from pillowtop.utils import prepare_bulk_payloads
class BulkTest(SimpleTestCase):
def test_ | prepare_bulk_payloads_unicode(self):
unicode_domain = u'हिंदी'
bulk_changes = [
{'id': 'doc1'},
{'id': 'doc2', 'domain': unicode_domain},
]
payloads = prepare_bulk_payloads(bulk_changes, max_size=10, chunk_si | ze=1)
self.assertEqual(2, len(payloads))
self.assertEqual(unicode_domain, json.loads(payloads[1])['domain'])
@generate_cases([
(100, 1, 3),
(100, 10, 1),
(1, 1, 10),
(1, 2, 5),
], BulkTest)
def test_prepare_bulk_payloads2(self, max_size, chunk_size, expected_payloads):
bulk_changes = [{'id': 'doc%s' % i} for i in range(10)]
payloads = prepare_bulk_payloads(bulk_changes, max_size=max_size, chunk_size=chunk_size)
self.assertEqual(expected_payloads, len(payloads))
self.assertTrue(all(payloads))
# check that we can reform the original list of changes
json_docs = ''.join(payloads).strip().split('\n')
reformed_changes = [json.loads(doc) for doc in json_docs]
self.assertEqual(bulk_changes, reformed_changes)
|
RCOSDP/waterbutler | tests/providers/azureblobstorage/test_provider.py | Python | apache-2.0 | 18,530 | 0.001943 | import pytest
import math
import io
import time
import base64
import hashlib
from http import client
from unittest import mock
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import metadata
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.azureblobstorage import AzureBlobStorageProvider
from waterbutler.providers.azureblobstorage.metadata import AzureBlobStorageFileMetadata
from waterbutler.providers.azureblobstorage.metadata import AzureBlobStorageFolderMetadata
from waterbutler.providers.azureblobstorage.provider import (
MAX_UPLOAD_BLOCK_SIZE,
)
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': 'cat@cat.com',
}
@pytest.fixture
def credentials():
return {
'account_name': 'dontdead',
'account_key': base64.b64encode(b'open inside'),
}
@pytest.fixture
def settings():
return {
'container': 'thatkerning'
}
@pytest.fixture
def mock_time(monkeypatch):
mock_time = mock.Mock(return_value=1454684930.0)
monkeypatch.setattr(time, 'time', mock_time)
@pytest.fixture
def provider(auth, credentials, settings):
provider = AzureBlobStorageProvider(auth, credentials, settings)
return provider
@pytest.fixture
def file_content():
return b'sleepy'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def large_file_content():
# 71MB (4MB * 17 + 3MB)
return b'a' * (71 * (2 ** 20))
@pytest.fixture
def large_file_like(large_file_content):
return io.BytesIO(large_file_content)
@pytest.fixture
def large_file_stream(large_file_like):
return streams.FileStreamReader(large_file_like)
@pytest.fixture
def folder_metadata():
return b'''<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://vrosf.blob.core.windows.net/" ContainerName="sample-container1">
<Blobs>
<Blob>
<Name>Photos/test-text.txt</Name>
<Properties>
<Last-Modified>Thu, 10 Nov 2016 11:04:45 GMT</Last-Modified>
<Etag>0x8D40959613D32F6</Etag>
<Content-Length>0</Content-Length>
<Content-Type>text/plain</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<Content-Disposition />
<BlobType>BlockBlob</BlobType>
<LeaseStatus>unlocked</LeaseStatus>
<LeaseState>available</LeaseState>
</Properties>
</Blob>
<Blob>
<Name>Photos/a/test.txt</Name>
<Properties>
<Last-Modified>Thu, 10 Nov 2016 11:04:45 GMT</Last-Modified>
<Etag>0x8D40959613D32F6</Etag>
<Content-Length>0</Content-Length>
<Content-Type>text/plain</Content-Type>
<Content-Encoding />
| <Content-Language />
<Content-MD5 />
<Cache-Control />
<Content-Disposition />
<BlobType>BlockBlob</BlobT | ype>
<LeaseStatus>unlocked</LeaseStatus>
<LeaseState>available</LeaseState>
</Properties>
</Blob>
<Blob>
<Name>top.txt</Name>
<Properties>
<Last-Modified>Thu, 10 Nov 2016 11:04:45 GMT</Last-Modified>
<Etag>0x8D40959613D32F6</Etag>
<Content-Length>0</Content-Length>
<Content-Type>text/plain</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<Content-Disposition />
<BlobType>BlockBlob</BlobType>
<LeaseStatus>unlocked</LeaseStatus>
<LeaseState>available</LeaseState>
</Properties>
</Blob>
</Blobs>
<NextMarker />
</EnumerationResults>'''
@pytest.fixture
def file_metadata():
return {
'CONTENT-LENGTH': '0',
'CONTENT-TYPE': 'text/plain',
'LAST-MODIFIED': 'Thu, 10 Nov 2016 11:04:45 GMT',
'ACCEPT-RANGES': 'bytes',
'ETAG': '"0x8D40959613D32F6"',
'SERVER': 'Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0',
'X-MS-REQUEST-ID': '5b4a3cb6-0001-00ea-4575-895e2c000000',
'X-MS-VERSION': '2015-07-08',
'X-MS-LEASE-STATUS': 'unlocked',
'X-MS-LEASE-STATE': 'available',
'X-MS-BLOB-TYPE': 'BlockBlob',
'DATE': 'Fri, 17 Feb 2017 23:28:33 GMT'
}
@pytest.fixture
def large_file_metadata(large_file_content):
return {
'CONTENT-LENGTH': str(len(large_file_content)),
'CONTENT-TYPE': 'text/plain',
'LAST-MODIFIED': 'Thu, 10 Nov 2016 11:04:45 GMT',
'ACCEPT-RANGES': 'bytes',
'ETAG': '"0x8D40959613D32F6"',
'SERVER': 'Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0',
'X-MS-REQUEST-ID': '5b4a3cb6-0001-00ea-4575-895e2c000000',
'X-MS-VERSION': '2015-07-08',
'X-MS-LEASE-STATUS': 'unlocked',
'X-MS-LEASE-STATE': 'available',
'X-MS-BLOB-TYPE': 'BlockBlob',
'DATE': 'Fri, 17 Feb 2017 23:28:33 GMT'
}
class TestValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file(self, provider, file_metadata,
mock_time):
file_path = 'foobah'
for good_metadata_url in provider.generate_urls(file_path, secondary=True):
aiohttpretty.register_uri('HEAD', good_metadata_url, headers=file_metadata)
for bad_metadata_url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', bad_metadata_url,
params={'restype': 'container', 'comp': 'list'}, status=404)
try:
wb_path_v1 = await provider.validate_v1_path('/' + file_path)
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + file_path + '/')
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + file_path)
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder(self, provider, folder_metadata, mock_time):
folder_path = 'Photos'
for good_metadata_url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri(
'GET', good_metadata_url, params={'restype': 'container', 'comp': 'list'},
body=folder_metadata, headers={'Content-Type': 'application/xml'}
)
for bad_metadata_url in provider.generate_urls(folder_path, secondary=True):
aiohttpretty.register_uri('HEAD', bad_metadata_url, status=404)
try:
wb_path_v1 = await provider.validate_v1_path('/' + folder_path + '/')
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + folder_path)
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + folder_path + '/')
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
async def test_normal_name(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/path.txt')
assert path.name == 'path.txt'
assert path.parent.name == 'a'
assert path.is_file
assert not path.is_dir
assert not path.is_root
@pytest.mark.asyncio
async def test_folder(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
@pytest.mark.asyncio
async def test_root(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
cl |
makinacorpus/mapnik2 | scons/scons-local-1.2.0/SCons/cpp.py | Python | lgpl-2.1 | 19,982 | 0.003853 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/cpp.py 3842 2008/12/20 22:59:52 scons"
__doc__ = """
SCons C Pre-Processor module
"""
# TODO(1.5): remove this import
# This module doesn't use anything from SCons by name, but we import SCons
# here to pull in zip() from the SCons.compat layer for early Pythons.
import SCons
import os
import re
import string
#
# First "subsystem" of regular expressions that we set up:
#
# Stuff to turn the C preprocessor directives in a file's contents into
# a list of tuples that we can process easily.
#
# A table of regular expressions that fetch the arguments from the rest of
# a C preprocessor line. Different directives have different arguments
# that we want to fetch, using the regular expressions to which the | lists
# of preprocessor directives map.
cpp_lines_dict = {
# Fetch the rest of a #if/#elif/#ifdef/#ifndef as one argument,
# separated from the keyword by white space.
('if', 'elif', 'ifdef', 'ifndef',)
: '\s+(.+)',
# Fetch the rest of a #import/#include/#include_next line as one
# argument, with white space optional.
('import', 'include', 'include_next',)
: ' | \s*(.+)',
# We don't care what comes after a #else or #endif line.
('else', 'endif',) : '',
# Fetch three arguments from a #define line:
# 1) The #defined keyword.
# 2) The optional parentheses and arguments (if it's a function-like
# macro, '' if it's not).
# 3) The expansion value.
('define',) : '\s+([_A-Za-z][_A-Za-z0-9_]+)(\([^)]*\))?\s*(.*)',
# Fetch the #undefed keyword from a #undef line.
('undef',) : '\s+([_A-Za-z][A-Za-z0-9_]+)',
}
# Create a table that maps each individual C preprocessor directive to
# the corresponding compiled regular expression that fetches the arguments
# we care about.
Table = {}
for op_list, expr in cpp_lines_dict.items():
e = re.compile(expr)
for op in op_list:
Table[op] = e
del e
del op
del op_list
# Create a list of the expressions we'll use to match all of the
# preprocessor directives. These are the same as the directives
# themselves *except* that we must use a negative lookahead assertion
# when matching "if" so it doesn't match the "if" in "ifdef."
override = {
'if' : 'if(?!def)',
}
l = map(lambda x, o=override: o.get(x, x), Table.keys())
# Turn the list of expressions into one big honkin' regular expression
# that will match all the preprocessor lines at once. This will return
# a list of tuples, one for each preprocessor line. The preprocessor
# directive will be the first element in each tuple, and the rest of
# the line will be the second element.
e = '^\s*#\s*(' + string.join(l, '|') + ')(.*)$'
# And last but not least, compile the expression.
CPP_Expression = re.compile(e, re.M)
#
# Second "subsystem" of regular expressions that we set up:
#
# Stuff to translate a C preprocessor expression (as found on a #if or
# #elif line) into an equivalent Python expression that we can eval().
#
# A dictionary that maps the C representation of Boolean operators
# to their Python equivalents.
CPP_to_Python_Ops_Dict = {
'!' : ' not ',
'!=' : ' != ',
'&&' : ' and ',
'||' : ' or ',
'?' : ' and ',
':' : ' or ',
'\r' : '',
}
CPP_to_Python_Ops_Sub = lambda m, d=CPP_to_Python_Ops_Dict: d[m.group(0)]
# We have to sort the keys by length so that longer expressions
# come *before* shorter expressions--in particular, "!=" must
# come before "!" in the alternation. Without this, the Python
# re module, as late as version 2.2.2, empirically matches the
# "!" in "!=" first, instead of finding the longest match.
# What's up with that?
l = CPP_to_Python_Ops_Dict.keys()
l.sort(lambda a, b: cmp(len(b), len(a)))
# Turn the list of keys into one regular expression that will allow us
# to substitute all of the operators at once.
expr = string.join(map(re.escape, l), '|')
# ...and compile the expression.
CPP_to_Python_Ops_Expression = re.compile(expr)
# A separate list of expressions to be evaluated and substituted
# sequentially, not all at once.
CPP_to_Python_Eval_List = [
['defined\s+(\w+)', '__dict__.has_key("\\1")'],
['defined\s*\((\w+)\)', '__dict__.has_key("\\1")'],
['/\*.*\*/', ''],
['/\*.*', ''],
['//.*', ''],
['(0x[0-9A-Fa-f]*)[UL]+', '\\1L'],
]
# Replace the string representations of the regular expressions in the
# list with compiled versions.
for l in CPP_to_Python_Eval_List:
l[0] = re.compile(l[0])
# Wrap up all of the above into a handy function.
def CPP_to_Python(s):
"""
Converts a C pre-processor expression into an equivalent
Python expression that can be evaluated.
"""
s = CPP_to_Python_Ops_Expression.sub(CPP_to_Python_Ops_Sub, s)
for expr, repl in CPP_to_Python_Eval_List:
s = expr.sub(repl, s)
return s
del expr
del l
del override
class FunctionEvaluator:
"""
Handles delayed evaluation of a #define function call.
"""
def __init__(self, name, args, expansion):
"""
Squirrels away the arguments and expansion value of a #define
macro function for later evaluation when we must actually expand
a value that uses it.
"""
self.name = name
self.args = function_arg_separator.split(args)
try:
expansion = string.split(expansion, '##')
except (AttributeError, TypeError):
# Python 1.5 throws TypeError if "expansion" isn't a string,
# later versions throw AttributeError.
pass
self.expansion = expansion
def __call__(self, *values):
"""
Evaluates the expansion of a #define macro function called
with the specified values.
"""
if len(self.args) != len(values):
raise ValueError, "Incorrect number of arguments to `%s'" % self.name
# Create a dictionary that maps the macro arguments to the
# corresponding values in this "call." We'll use this when we
# eval() the expansion so that arguments will get expanded to
# the right values.
locals = {}
for k, v in zip(self.args, values):
locals[k] = v
parts = []
for s in self.expansion:
if not s in self.args:
s = repr(s)
parts.append(s)
statement = string.join(parts, ' + ')
return eval(statement, globals(), locals)
# Find line continuations.
line_continuations = re.compile('\\\\\r?\n')
# Search for a "function call" macro on an expansion. Returns the
# two-tuple of the "function" name itself, and a string containing the
# arguments within the call parentheses.
function_name = re.compile('(\S+)\(([^)]*)\)')
# Split a string containing comma-separated function call arguments into
# the separate arguments.
function_ |
Orange-OpenSource/cf-php-build-pack | extensions/appdynamics/extension.py | Python | apache-2.0 | 11,666 | 0.004543 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AppDynamics Extension
Downloads, installs and configures the AppDynamics agent for PHP
"""
import os
import os.path
import logging
from extension_helpers import PHPExtensionHelper
from subprocess import call
import re
_log = logging.getLogger('appdynamics')
class AppDynamicsInstaller(PHPExtensionHelper):
_detected = None # Boolean to check if AppDynamics service is detected
_FILTER = "app[-]?dynamics"
_appdynamics_credentials = None # JSON which contains all appdynamics credentials
_account_access_key = None # AppDynamics Controller Account Access Key
_account_name = None # AppDynamics Controller Account Name
_host_name = None # AppDynamics Controller Host Address
_port = None # AppDynamics Controller Port
_ssl_enabled = None # AppDynamics Controller SSL Enabled
# Specify the Application details
_app_name = None # AppDynamics App name
_tier_name = None # AppDynamics Tier name
_node_name = None # AppDynamics Node name
def __init__(self, ctx):
PHPExtensionHelper.__init__(self, ctx)
def _defaults(self):
"""Returns a set of default environment variables.
Return a dictionary of default environment variables. These
are merged with the build pack context when this the extension
object is created.
"""
return {
'APPDYNAMICS_HOST': 'packages.appdynamics.com',
'APPDYNAMICS_VERSION': '4.3.5.9',
'APPDYNAMICS_PACKAGE': 'appdynamics-php-agent-x64-linux-{APPDYNAMICS_VERSION}.tar.bz2',
'APPDYNAMICS_DOWNLOAD_URL': 'https://{APPDYNAMICS_HOST}/php/{APPDYNAMICS_VERSION}/{APPDYNAMICS_PACKAGE}'
}
def _should_compile(self):
"""
Determines if the extension should install it's payload.
This check is called during the `compile` method of the extension.
It should return true if the payload of the extension should
be installed (i.e. the `install` method is called).
"""
if AppDynamicsInstaller._detected is None:
VCAP_SERVICES_STRING = str(self._services)
if bool(re.search(AppDynamicsInstaller._FILTER, VCAP_SERVICES_STRING)):
print("AppDynamics service detected, beginning compilation")
_log.info("AppDynamics service detected")
AppDynamicsInstaller._detected = True
else:
AppDynamicsInstaller._detected = False
return AppDynamicsInstaller._detected
def _configure(self):
"""
Configures the extension.
Called when `should_configure` returns true.
"""
print("Running AppDynamics extension method _configure")
self._load_service_info()
def _load_service_info(self):
"""
Get Controller binding credentials and application details for AppDynamics service
"""
print("Setting AppDynamics credentials info...")
services = self._ctx.get('VCAP_SERVICES', {})
service_defs = services.get("appdynamics")
if service_defs is None:
# Search in user-provided service
print("No Marketplace AppDynamics services found")
print("Searching for AppDynamics service in user-provided services")
user_services = services.get("user-provided")
for user_service in user_services:
if bool(re.search(AppDynamicsInstaller._FILTER, user_service.get("name"))):
print("Using the first AppDynamics service present in user-provided services")
AppDynamicsInstaller._appdynamics_credentials = user_service.get("credentials")
self._load_service_credentials()
try:
# load the app details from user-provided service
print("Setting AppDynamics App, Tier and Node names from user-provided service")
AppDynamicsInstaller._app_name = AppDynamicsInstaller._appdynamics_credentials.get("application-name")
print("User-provided service application-name = " + AppDynamicsInstaller._app_name)
AppDynamicsInstaller._tier_name = AppDynamicsInstaller._appdynamics_credentials.get("tier-name")
print("User-provided service tier-name = " + AppDynamicsInstaller._tier_name)
AppDynamicsInstaller._node_name = AppDynamicsInstaller._appdynamics_credentials.get("node-name")
print("User-provided service node-name = " + AppDynamicsInstaller._node_name)
except Exception:
print("Exception occurred while setting AppDynamics App, Tier and Node names from user-provided service, using default naming")
self._load_app_details()
elif len(service_defs) > 1:
print("Multiple AppDynamics services found in VCAP_SERVICES, using credentials from first one.")
AppDynamicsInstaller._appdynamics_credentials = service_defs[0].get("credentials")
self._load_service_credentials()
self._load_app_details()
elif len(service_defs) == 1:
print("AppDynamics service found in VCAP_SERVICES")
AppDynamicsInstaller._appdynamics_credentials = service_defs[0].get("credentials")
self._load_service_credentials()
self._load_app_details()
def _load_service_credentials(self):
"""
Configure AppDynamics Controller Binding credentials
Called when Appdynamics Service is detected
"""
if (AppDynamicsInstaller._appdynamics_credentials is not None):
print("Setting AppDynamics Controller Binding Credentials")
try:
AppDynamicsInstaller._host_name = AppDynamicsInstaller._appdynamics_credentials.get("host-name")
AppDynamicsInstaller._port = AppDynamicsInstaller._appdynamics_credentials.get("port")
AppDynamicsInstaller._account_name = AppDynamicsInstaller._appdynamics_credentials.get("account-name")
AppDynamicsInstaller._account_access_key = AppDynamicsInstaller._appdynamics_credentials.get("account-access-key")
AppDynamicsInstaller._ssl_en | abled = AppDynamicsInstaller._appdynamics_credentials.get("ssl-enabled")
except Exception:
print("Error populating AppDynamics controller binding credentials")
else:
print("AppDynamics credentials empty")
def _load_app_details(self):
"""
Configure AppDynamics application details
Called when AppDynamics Service is detected
"""
print("Setting default AppDynamics App, Tier and Node names")
try:
| AppDynamicsInstaller._app_name = self._application.get("space_name") + ":" + self._application.get("application_name")
print("AppDymamics default application-name = " + AppDynamicsInstaller._app_name)
AppDynamicsInstaller._tier_name = self._application.get("application_name")
print("AppDynamics default tier-name = " + AppDynamicsInstaller._tier_name)
AppDynamicsInstaller._node_name = AppDynamicsInstaller. |
cmccabe/redfish | deploy/st_chunk_io.py | Python | apache-2.0 | 1,287 | 0.00777 | #!/usr/bin/python
from optparse import OptionParser
import filecmp
import json
import of_daemon
import of_node
import of_util
import os
import subprocess
import sys
import tempfile
import time
of_util.check_python_version()
parser = OptionParser()
(opts, args, node_list) = of_util.pa | rse_deploy_opts(parser)
if opts.bld_dir == None:
sys.stder | r.write("you must give a Redfish build directory\n")
sys.exit(1)
# get a chunk ID that we think will be unique
cid = int(time.clock())
cid = cid + (os.getpid() << 32)
# create input file
input_file = opts.bld_dir + "/hello.in"
f = open(input_file, "w")
try:
print >>f, "hello, world!"
finally:
f.close()
output_file = opts.bld_dir + "/hello.out"
for d in of_node.OfNodeIter(node_list, ["osd"]):
print "writing chunk to " + d.get_short_name()
tool_cmd = [ opts.bld_dir + "/tool/fishtool", "chunk_write",
"-i", input_file, "-k", str(d.id), hex(cid) ]
of_util.subprocess_check_output(tool_cmd)
for d in of_node.OfNodeIter(node_list, ["osd"]):
print "reading chunk from " + d.get_short_name()
tool_cmd = [ opts.bld_dir + "/tool/fishtool", "chunk_read",
"-o", output_file, "-k", str(d.id), hex(cid) ]
of_util.subprocess_check_output(tool_cmd)
filecmp.cmp(input_file, output_file)
|
jsbueno/towertower | towertower/__init__.py | Python | gpl-3.0 | 14,154 | 0.00106 | # coding: utf-8
from __future__ import division
import random
from random import randint
import pygame
from pygame.locals import *
SIZE = 800, 600
FLAGS = 0
# actually, delay in ms:
FRAMERATE = 30
WAVE_ENEMIES = [1, 5, 1]
Group = pygame.sprite.OrderedUpdates
class GameOver(Exception):
pass
class NoEnemyInRange(Exception):
pass
class Vector(object):
def __init__(self, x=0, y=0):
if hasattr(x, "__len__"):
self.x = x[0]
self.y = x[1]
else:
self.x = x
self.y = y
def __getitem__(self, index):
if index == 0:
return self.x
return self.y
def __len__(self):
return 2
def __add__(self, other):
return Vector(self[0] + other[0], self[1] + other[1])
def __sub__(self, other):
return Vector(self[0] - other[0], self[1] - other[1])
def __mul__(self, other):
return Vector(self[0] * other, self[1] * other)
def __div__(self, other):
return Vector(self[0] / other, self[1] / other)
__truediv__ = __div__
def size(self):
return (self[0] ** 2 + self[1] ** 2) ** 0.5
def distance(self, other):
return (self - other).size()
def __repr__(self):
return "Vector({}, {})".format(self.x, self.y)
epsilon = 1
def __eq__(self, other):
return self.distance(other) < self.epsilon
def normalize(self):
size = self.size()
if size == 0:
return 0
return self / size
class Event(object):
def __init__(self, event_type, callback):
self.type = event_type
self.callback = callback
def __call__(self, instance=None):
self.callback(instance)
class EventQueue(object):
def __init__(self):
self._list = []
def post(self, event):
self._list.append(event)
def pick(self, type_=None):
for i, event in enumerate(self._list):
if type_ is None or event.type == type_:
del self._list[i]
return event
return None
def draw_bg(surface, rect=None):
if rect is None:
surface.fill((0, 0, 0))
pygame.draw.rect(surface, (0, 0, 0), rect)
class BaseTowerObject(pygame.sprite.Sprite):
size = 20
color = (255, 255, 255)
def __init__(self, map_, position=Vector((0, 0))):
super(BaseTowerObject, self).__init__()
self.map_ = map_
self._reset_image()
self.position = position
self.rect = pygame.Rect((0, 0, self.size, self.size))
self.rect.center = position
self.events = EventQueue()
def _reset_image(self):
self.image = pygame.surface.Surface((self.size, self.size))
self.image.fill(self.color)
def update(self):
self.draw_energy_bar()
def draw_ene | rgy_bar(self):
if hasattr(self, "energy"):
total, remaining = energy = self.energy
if getattr(self, "_last_energy", None) == energy:
return
self._reset_image()
self._last_energy = energy
width = int(self.size * 0.7)
y = int(self.size * 0.8)
x = int(self.size * 0.15)
if remaining < total:
pygame | .draw.line(self.image, (0, 0, 0), (x, y), (x + width, y), 3)
color = (0, 255, 0) if remaining > total / 2 else (255, 255, 0) if remaining > total / 4 else (255, 0, 0)
pygame.draw.line(self.image, color, (x, y), (x + int(width * remaining / total), y), 3)
class Targetting(BaseTowerObject):
movement_type = "tracking"
# possible_values: ("tracking", "straight")
def update(self):
super(Targetting, self).update()
movement_function = getattr(self, self.movement_type)
movement_function()
self.rect.center = self.position
def tracking(self):
objective = self.objective if isinstance(self.objective, pygame.sprite.Sprite) else self.objective.sprites()[0]
if not objective:
return
self.direction = (objective.position - self.position).normalize()
return self._update(self.direction)
def straight(self):
if not hasattr(self, "direction"):
objective = self.objective if isinstance(self.objective, pygame.sprite.Sprite) else self.objective.sprites()[0]
target_position = Vector(objective.position)
self.direction = (target_position - self.position).normalize()
return self._update(self.direction)
def _update(self, direction):
self.position += direction * self.speed
class Enemy(Targetting):
speed = 1
size = 15
color = (255, 0, 0)
speed = 1
endurance = 5
def __init__(self, map_, position):
super(Enemy, self).__init__(map_, position)
self.speed = self.__class__.speed
self.stamina = self.__class__.endurance
def update(self):
self.objective = iter(self.map_.objective).next()
super(Enemy, self).update()
if self.position == self.objective.position:
self.objective.enemies_reached.add(self)
self.kill()
@property
def energy(self):
return self.__class__.endurance, self.endurance
class StrongEnemy(Enemy):
color = (255, 128, 0)
size = 12
endurance = 25
class FastEnemy(Enemy):
color = (128, 255, 0)
size = 18
endurance = 3
speed = 4
class Tower(BaseTowerObject):
size = 15
color = (0, 0, 255)
shot_type = "Shot"
repeat_rate = 15
def __init__(self, *args):
super(Tower, self).__init__(*args)
if isinstance(self.shot_type, basestring):
# TODO: create a game class registry from where to retrieve this
self.shot_type = globals()[self.shot_type]
self.last_shot = self.repeat_rate
def update(self):
super(Tower, self).update()
self.last_shot -= 1
if self.last_shot <= 0:
self.last_shot = self.repeat_rate
if self.shoot():
event = self.events.pick("after_shot")
if event:
event(self)
def shoot(self):
try:
self.map_.shots.add(self.shot_type(
self.map_, Vector(self.position), piercing=getattr(self, "piercing", None)))
return True
except NoEnemyInRange:
pass
return False
class TeleTower(Tower):
size = 15
color = (0, 255, 128)
shot_type = "TeleShot"
class Shot(Targetting):
size = 3
color = (0, 255, 0)
speed = 5
range_ = 800
piercing = 2
movement_type = "straight"
def __init__(self, *args, **kw):
super(Shot, self).__init__(*args)
if kw.get("piercing", False):
self.piercing = kw.pop("piercing")
self.start_pos = self.position
objective = self.get_closer_enemy()
if objective and self.position.distance(objective.position) <= self.range_:
self.objective = pygame.sprite.GroupSingle()
self.objective.sprite = objective
else:
raise NoEnemyInRange
def get_closer_enemy(self):
distance = max(SIZE) * 2
closest = None
for enemy in self.map_.enemies:
if self.position.distance(enemy.position) < distance:
closest = enemy
distance = self.position.distance(enemy.position)
return closest
def update(self):
if not self.objective:
self.kill()
return
super(Shot, self).update()
shot_enemy = None
for shot_enemy in pygame.sprite.spritecollide(self, self.map_.enemies, False):
# No surprises in Python as long as we are using imutable objects
# to keep data: the class "endurance" atribute
# is properly assigned to ths instance class at the first "-="
shot_enemy.endurance -= self.piercing
if shot_enemy.endurance <= 0:
shot_enemy.kill()
if shot_enemy:
self.kill()
if self.position.distance(self.start_pos) > self.range_:
self. |
Statoil/libecl | python/tests/util_tests/test_thread_pool.py | Python | gpl-3.0 | 2,274 | 0.003078 | import time
from ecl.util.util import ThreadPool
from ecl.util.util.thread_pool import Task
from tests import EclTest
class ThreadPoolTest(EclTest):
def sleepTask(self, *args, **kwargs):
time.sleep(args[0])
def numberer(self, index, result):
result[index] = True
def test_pool_creation(self):
pool = ThreadPool(4)
self.assertEqual(4, pool.poolSize())
def noop(*args, **kwargs):
pass
pool.addTask(noop)
self.assertEqual(1, pool.taskCount())
pool.addTask(noop, 1, 2, 3)
self.assertEqual(2, pool.taskCount())
pool.addTask(noop, 1, 2, 3, name="name", group="group", purpose="porpoise")
self.assertEqual(3, pool.taskCount())
self.assertEqual(pool.runningCount(), 0)
self.assertEqual(pool.doneCount(), 0)
def test_pool_execution(self):
pool = ThreadPool(4)
| result = {}
for index in range(10):
pool.addTask(self.numberer, index, result=result)
pool.nonBlockingStart()
pool.join()
for index in range(10):
self.assertTrue(index in result)
self.assertTrue(result[in | dex])
self.assertFalse(pool.hasFailedTasks())
def test_pool_unbound_fail(self):
pool = ThreadPool(4)
self.assertEqual(4, pool.poolSize())
pool.addTask(ThreadPoolTest.numberer, 0, {})
pool.nonBlockingStart()
pool.join()
self.assertTrue(pool.hasFailedTasks())
def test_fill_pool(self):
pool = ThreadPool(4)
for index in range(10):
pool.addTask(self.sleepTask, 2)
pool.nonBlockingStart()
time.sleep(0.5)
self.assertEqual(pool.doneCount(), 0)
self.assertEqual(pool.runningCount(), 4)
pool.join()
def test_task(self):
def sleeping():
time.sleep(1)
task = Task(sleeping)
self.assertFalse(task.hasStarted())
self.assertFalse(task.isRunning())
self.assertFalse(task.isDone())
task.start()
self.assertTrue(task.hasStarted())
self.assertTrue(task.isRunning())
task.join()
self.assertFalse(task.isRunning())
self.assertTrue(task.isDone())
|
jrbl/invenio | modules/websearch/lib/search_engine.py | Python | gpl-2.0 | 270,395 | 0.006746 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Softwar | e Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have | received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio Search Engine in mod_python."""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
## import general modules:
import cgi
import cStringIO
import copy
import string
import os
import re
import time
import urllib
import urlparse
import zlib
import sys
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
## import Invenio stuff:
from invenio.config import \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_OAI_ID_FIELD, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_CALL_BIBFORMAT, \
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \
CFG_WEBSEARCH_FIELDS_CONVERT, \
CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \
CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_WEBSEARCH_FULLTEXT_SNIPPETS, \
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_WEBSEARCH_WILDCARD_LIMIT, \
CFG_WEBSEARCH_SYNONYM_KBRS, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_LOGDIR, \
CFG_BIBFORMAT_HIDDEN_TAGS, \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SOLR_URL, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT, \
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY, \
CFG_BIBSORT_BUCKETS
from invenio.search_engine_config import InvenioWebSearchUnknownCollectionError, InvenioWebSearchWildcardLimitError
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibrecord import create_record
from invenio.bibrank_record_sorter import get_bibrank_methods, is_method_valid, rank_records as rank_records_bibrank
from invenio.bibrank_downloads_similarity import register_page_view_event, calculate_reading_similarity_list
from invenio.bibindex_engine_stemmer import stem
from invenio.bibindex_engine_tokenizer import wash_author_name, author_name_requires_phrase_search
from invenio.bibformat import format_record, format_records, get_output_format_content_type, create_excel
from invenio.bibformat_config import CFG_BIBFORMAT_USE_OLD_BIBFORMAT
from invenio.bibrank_downloads_grapher import create_download_history_graph_and_box
from invenio.bibknowledge import get_kbr_values
from invenio.data_cacher import DataCacher
from invenio.websearch_external_collections import print_external_results_overview, perform_external_collection_search
from invenio.access_control_admin import acc_get_action_id
from invenio.access_control_config import VIEWRESTRCOLL, \
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS
from invenio.websearchadminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from invenio.intbitset import intbitset
from invenio.dbquery import DatabaseError, deserialize_via_marshal, InvenioDbQueryWildcardLimitError
from invenio.access_control_engine import acc_authorize_action
from invenio.errorlib import register_exception
from invenio.textutils import encode_for_xml, wash_for_utf8, strip_accents
from invenio.htmlutils import get_mathjax_header
from invenio.htmlutils import nmtoken_from_string
import invenio.template
webstyle_templates = invenio.template.load('webstyle')
webcomment_templates = invenio.template.load('webcomment')
from invenio.bibrank_citation_searcher import calculate_cited_by_list, \
calculate_co_cited_with_list, get_records_with_num_cites, get_self_cited_by, \
get_refersto_hitset, get_citedby_hitset
from invenio.bibrank_citation_grapher import create_citation_history_graph_and_box
from invenio.dbquery import run_sql, run_sql_with_limit, \
get_table_update_time, Error
from invenio.webuser import getUid, collect_user_info, session_param_set
from invenio.webpage import pageheaderonly, pagefooteronly, create_error_box
from invenio.messages import gettext_set_language
from invenio.search_engine_query_parser import SearchQueryParenthesisedParser, \
SpiresToInvenioSyntaxConverter
from invenio import webinterface_handler_config as apache
from invenio.solrutils import solr_get_bitset
try:
import invenio.template
websearch_templates = invenio.template.load('websearch')
except:
pass
from invenio.websearch_external_collections import calculate_hosted_collections_results, do_calculate_hosted_collections_results
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
VIEWRESTRCOLL_ID = acc_get_action_id(VIEWRESTRCOLL)
## global vars:
cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection
cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical?
## precompile some often-used regexp for speed reasons:
re_word = re.compile('[\s]')
re_quotes = re.compile('[\'\"]')
re_doublequote = re.compile('\"')
re_equal = re.compile('\=')
re_logical_and = re.compile('\sand\s', re.I)
re_logical_or = re.compile('\sor\s', re.I)
re_logical_not = re.compile('\snot\s', re.I)
re_operators = re.compile(r'\s([\+\-\|])\s')
re_pattern_wildcards_after_spaces = re.compile(r'(\s)[\*\%]+')
re_pattern_single_quotes = re.compile("'(.*?)'")
re_pattern_double_quotes = re.compile("\"(.*?)\"")
re_pattern_regexp_quotes = re.compile("\/(.*?)\/")
re_pattern_spaces_after_colon = re.compile(r'(:\s+)')
re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+')
re_pattern_space = re.compile("__SPACE__")
re_pattern_today = re.compile("\$TODAY\$")
re_pattern_parens = re.compile(r'\([^\)]+\s+[^\)]+\)')
class RestrictedCollectionDataCacher(DataCacher):
def __init__(self):
def cache_filler():
ret = []
try:
res = run_sql("""SELECT DISTINCT ar.value
FROM accROLE_accACTION_accARGUMENT raa JOIN accARGUMENT ar ON raa.id_accARGUMENT = ar.id
WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (VIEWRESTRCOLL_ID,))
except Exception:
# database problems, return empty cache
return []
for coll in res:
ret.append(coll[0])
return ret
def timestamp_verifier():
return max(get_table_update_time('accROLE_accACTION_accARGUMENT'), get_table_update_time('accARGUMENT'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def collection_restricted_p(collection, recreate_cache_if_needed=True):
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
return collection in restricted_collection_cache.cache
try:
restricted_collection_cache.is_ok_p
except Exception:
restricted_collection_cache = RestrictedCollectionDataCacher()
def ziplist(*lists):
"""Just like zip(), but returns lists of lists instead of lists of tuples
Example:
zip([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[(f1, p1, op1), (f2, p2, op2), (f3, p3, '')]
ziplist([f1, f2, f3], [p1, p2, p3], [op1, op |
alejob/mdanalysis | package/MDAnalysis/migration/fixes/fix_agmethods2.py | Python | gpl-2.0 | 1,114 | 0.004488 | '''
run with: python ten2eleven.py -f agmethods2 test_dummy_old_MDA_code.py
Author: Tyler Reddy
'''
from lib2to3.fixer_base import BaseFix
from lib2to3.fixer_util import Name, Call, LParen, RParen, ArgList, Dot
from lib2to3 import pytree
class FixAgmethods2(BaseFix):
PATTERN = """
power< head =any+
trailer< dot = '.' method=('bond'|'angle'|'torsion'|
'improper')>
parens=trailer< '(' ')' >
tail=any*>
"""
def transform(self, node, results):
head = results['head']
method = results['met | hod'][0]
tail = results['tail']
syms = self.syms
method_name = method.value
if method_name == 'torsion':
method_name = 'dihedral'
head = [n.clone() for n in head]
| tail = [n.clone() for n in tail]
args = head + [pytree.Node(syms.trailer, [Dot(), Name(method_name, prefix = method.prefix), Dot(), Name('value'), LParen(), RParen()])]
new = pytree.Node(syms.power, args)
return new
|
beetbox/beets | test/test_dbcore.py | Python | mit | 23,723 | 0.000042 | # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the DBCore database abstraction.
"""
import os
import shutil
import sqlite3
import unittest
from test import _common
from beets import dbcore
from tempfile import mkstemp
# Fixture: concrete database and model classes. For migration tests, we
# have multiple models with different numbers of fields.
class SortFixture(dbcore.query.FieldSort):
pass
class QueryFixture(dbcore.query.Query):
def __init__(self, pattern):
self.pattern = pattern
def clause(self):
return None, ()
def match(self):
return True
class ModelFixture1(dbcore.Model):
_table = 'test'
_flex_table = 'testflex'
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.STRING,
}
_types = {
'some_float_field': dbcore.types.FLOAT,
}
_sorts = {
'some_sort': SortFixture,
}
_queries = {
'some_query': QueryFixture,
}
@classmethod
def _getters(cls):
return {}
def _template_funcs(self):
return {}
class DatabaseFixture1(dbcore.Database):
_models = (ModelFixture1,)
pass
class ModelFixture2(ModelFixture1):
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.INTEGER,
}
class DatabaseFixture2(dbcore.Database):
_models = (ModelFixture2,)
pass
class ModelFixture3(ModelFixture1):
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.INTEGER,
'field_three': dbcore.types.INTEGER,
}
class DatabaseFixture3(dbcore.Database):
_models = (ModelFixture3,)
pass
class ModelFixture4(ModelFixture1):
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.INTEGER,
'field_three': dbcore.types.INTEGER,
'field_four': dbcore.types.INTEGER,
}
class DatabaseFixture4(dbcore.Database):
_models = (ModelFixture4,)
pass
class AnotherModelFixture(ModelFixture1):
_table = 'another'
_flex_table = 'anotherflex'
_fields = {
'id': dbcore.types.PRIMARY_ID,
'foo': dbcore.types.INTEGER,
}
class ModelFixture5(ModelFixture1):
_fields = {
'some_string_field': dbcore.types.STRING,
'some_float_field': dbcore.types.FLOAT,
'some_boolean_field': dbcore.types.BOOLEAN,
}
class DatabaseFixture5(dbcore.Database):
_models = (ModelFixture5,)
pass
class DatabaseFixtureTwoModels(dbcore.Database):
_models = (ModelFixture2, AnotherModelFixture)
pass
class ModelFixtureWithGetters(dbcore.Model):
@classmethod
def _getters(cls):
return {'aComputedField': (lambda s: 'thing')}
def _template_funcs(self):
return {}
@_common.slow_test()
class MigrationTest(unittest.TestCase):
"""Tests the ability to change the database schema between
versions.
"""
@classmethod
def setUpClass(cls):
handle, cls.orig_libfile = mkstemp('orig_db')
os.close(handle)
# Set up a database with the two-field schema.
old_lib = DatabaseFixture2(cls.orig_libfile)
# Add an item to the old library.
old_lib._connection().execute(
'insert into test (field_one, field_two) values (4, 2)'
)
old_lib._connection().commit()
del old_lib
@classmethod
def tearDownClass(cls):
os.remove(cls.orig_libfile)
def setUp(self):
handle, self.libfile = mkstemp('db')
os.close(handle)
shutil.copyfile(self.orig_libfile, self.libfile)
def tearDown(self):
os.remove(self.libfile)
def test_open_with_same_fields_leaves_untouched(self):
new_lib = DatabaseFixture2(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
self.assertEqual(len(row.keys()), len(ModelFixture2._fields))
def test_open_with_new_field_adds_column(self):
new_lib = DatabaseFixture3(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
self.assertEqual(len(row.keys()), len(ModelFixture3._fields))
def test_open_with_fewer_fields_leaves_untouched(self):
new_lib = DatabaseFixture1(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
self.assertEqual(len(row.keys()), len(ModelFixture2._fields))
def test_open_with_multiple_new_fields(self):
new_lib = DatabaseFixture4(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
self.assertEqual(len(row.keys()), len(ModelFixture4._fields))
def test_extra_model_adds_table(self):
new_lib = DatabaseFixtureTwoModels(self.libfile)
try:
new_lib._connection().execute("select * from another")
except sqlite3.OperationalError:
self.fail("select failed")
class TransactionTest(unittest.TestCase):
def setUp(self):
self.db = DatabaseFixture1(':memory:')
def tearDown(self):
self.db._connection().close()
def test_mutate_increase_revision(self):
old_rev = self.db.revision
with self.db.transaction() as tx:
tx.mutate(
'INSERT INTO {} '
'(field_one) '
'VALUES (?);'.format(ModelFixture1._table),
(111,),
)
self.assertGreater(self.db.revision, old_rev)
def test_query_no_increase_revision(self):
old_rev = self.db.revision
with self.db.transaction() as tx:
tx.query('PRAGMA table_info(%s)' % ModelFixture1._table)
self.assertEqual(self.db.revision, old_rev)
class ModelTest(unittest.TestCase):
def setUp(self):
self.db = DatabaseFixture1(':memory:')
def tearDown(self):
self.db._connection().close()
def test_add_model(self):
model = ModelFixture1()
model.add(self.db)
rows = self.db._connection().execute('select * from test').fetchall()
self.assertEqual(len(rows), 1)
def test_store_fixed_field(self):
model = ModelFixture1()
model.add(self.db)
model.field_one = 123
model.store()
row = self.db._connection().execute('select * from test').fetchone()
self.assertEqual(row['field_one'], 123)
def test_revision(self):
old_rev = self.db.revision
model = ModelFixture1()
model.add(self.db)
model.store()
self.assertEqual(model._revision, self.db.revision)
self.assertGreater(self.db.revision, old_rev)
mid_rev = self.db.revision
model2 = ModelFixture1()
model2.add(self.db)
model2.store()
self.assertGreater(model2._revision, mid_rev)
self.assertGreater(self.db.revision, model._re | vision)
# revision changed, so the model should be re-loaded
model.load()
self.assertEqual(model._revision, self.db.revision)
# revision did not change, so no reload
mod2_old_rev = model2._revision
model2.load()
self.assertEqual(model2._revision, mod2_old_rev)
def test_retrieve_by_ | id(self):
model = ModelFixture1()
model.add(self.db)
|
apache/incubator-superset | superset/dashboards/commands/importers/v1/__init__.py | Python | apache-2.0 | 6,297 | 0.000794 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, List, Set, Tuple
from marshmallow import Schema
from sqlalchemy.orm import Session
from sqlalchemy.sql import select
from superset.charts.commands.importers.v1.utils import import_chart
from superset.charts.schemas import ImportV1ChartSchema
from superset.commands.importers.v1 import ImportMode | lsCommand
from superset.dashboards.commands.exceptions import DashboardImportError
from superset.dashboards.commands.importers.v1.utils import (
find_chart_uuids,
find_native_filter_datasets,
import_dashboard,
update_id_refs,
)
from superset.dashboar | ds.dao import DashboardDAO
from superset.dashboards.schemas import ImportV1DashboardSchema
from superset.databases.commands.importers.v1.utils import import_database
from superset.databases.schemas import ImportV1DatabaseSchema
from superset.datasets.commands.importers.v1.utils import import_dataset
from superset.datasets.schemas import ImportV1DatasetSchema
from superset.models.dashboard import dashboard_slices
class ImportDashboardsCommand(ImportModelsCommand):
"""Import dashboards"""
dao = DashboardDAO
model_name = "dashboard"
prefix = "dashboards/"
schemas: Dict[str, Schema] = {
"charts/": ImportV1ChartSchema(),
"dashboards/": ImportV1DashboardSchema(),
"datasets/": ImportV1DatasetSchema(),
"databases/": ImportV1DatabaseSchema(),
}
import_error = DashboardImportError
# TODO (betodealmeida): refactor to use code from other commands
# pylint: disable=too-many-branches, too-many-locals
@staticmethod
def _import(
session: Session, configs: Dict[str, Any], overwrite: bool = False
) -> None:
# discover charts and datasets associated with dashboards
chart_uuids: Set[str] = set()
dataset_uuids: Set[str] = set()
for file_name, config in configs.items():
if file_name.startswith("dashboards/"):
chart_uuids.update(find_chart_uuids(config["position"]))
dataset_uuids.update(
find_native_filter_datasets(config.get("metadata", {}))
)
# discover datasets associated with charts
for file_name, config in configs.items():
if file_name.startswith("charts/") and config["uuid"] in chart_uuids:
dataset_uuids.add(config["dataset_uuid"])
# discover databases associated with datasets
database_uuids: Set[str] = set()
for file_name, config in configs.items():
if file_name.startswith("datasets/") and config["uuid"] in dataset_uuids:
database_uuids.add(config["database_uuid"])
# import related databases
database_ids: Dict[str, int] = {}
for file_name, config in configs.items():
if file_name.startswith("databases/") and config["uuid"] in database_uuids:
database = import_database(session, config, overwrite=False)
database_ids[str(database.uuid)] = database.id
# import datasets with the correct parent ref
dataset_info: Dict[str, Dict[str, Any]] = {}
for file_name, config in configs.items():
if (
file_name.startswith("datasets/")
and config["database_uuid"] in database_ids
):
config["database_id"] = database_ids[config["database_uuid"]]
dataset = import_dataset(session, config, overwrite=False)
dataset_info[str(dataset.uuid)] = {
"datasource_id": dataset.id,
"datasource_type": dataset.datasource_type,
"datasource_name": dataset.table_name,
}
# import charts with the correct parent ref
chart_ids: Dict[str, int] = {}
for file_name, config in configs.items():
if (
file_name.startswith("charts/")
and config["dataset_uuid"] in dataset_info
):
# update datasource id, type, and name
config.update(dataset_info[config["dataset_uuid"]])
chart = import_chart(session, config, overwrite=False)
chart_ids[str(chart.uuid)] = chart.id
# store the existing relationship between dashboards and charts
existing_relationships = session.execute(
select([dashboard_slices.c.dashboard_id, dashboard_slices.c.slice_id])
).fetchall()
# import dashboards
dashboard_chart_ids: List[Tuple[int, int]] = []
for file_name, config in configs.items():
if file_name.startswith("dashboards/"):
config = update_id_refs(config, chart_ids, dataset_info)
dashboard = import_dashboard(session, config, overwrite=overwrite)
for uuid in find_chart_uuids(config["position"]):
if uuid not in chart_ids:
break
chart_id = chart_ids[uuid]
if (dashboard.id, chart_id) not in existing_relationships:
dashboard_chart_ids.append((dashboard.id, chart_id))
# set ref in the dashboard_slices table
values = [
{"dashboard_id": dashboard_id, "slice_id": chart_id}
for (dashboard_id, chart_id) in dashboard_chart_ids
]
# pylint: disable=no-value-for-parameter # sqlalchemy/issues/4656
session.execute(dashboard_slices.insert(), values)
|
hzlf/discogs-proxy | website/apps/dgsproxy/stats.py | Python | mit | 2,133 | 0.004219 | # -*- coding: utf-8 -*-
from decimal import Decimal
import logging
from django.core.cache import cache
from django.db.models import Sum
log = logging.getLogger(__name__)
CACHE_PREFIX = 'dgsproxy_stats_'
class ProxyStats(object):
def __init__(self):
self.hits_proxied = 0
self.hits_cached = 0
self.num_resources = 0
self.cache_size = 0
self.rate_limit = 0
self.rate_limit_remain = 0
def build_stats(self):
# Get values from cache
self.hits_proxied = cache.get('%s_hits_backend' % CACHE_PREFIX, 0)
self.hits_cached = cache.get('%s_hits_cache' % CACHE_PREFIX, 0)
self.rate_limit = cache.get('%s_rate_limt' % CACHE_PREFIX, 0)
self.rate_limit_remain = cache.get('%s_rate_limt_remain' % CACHE_PREFIX, 0)
# Get values from db
from dgsproxy.models import CachedResource
self.num_resources = CachedResource.objects.count()
try:
total_size = int(CachedResource.objects.aggregate(Sum('filesize'))['filesize__sum'])
except:
total_size = 0
self.cache_size = total_size
def get_stats(self):
self.build_stats()
stats = {
'hits_proxied': self.hits_proxied,
'hits_cached': self.hits_cached,
'num_resources': self.num_resources,
'cache_size': '%.2f' % (float(self.cache_size) / 1024 / 1024),
'rate_limit': self.rate_limit,
'rate_limit_remain': self.rate_limit_remain
}
return stats
def set_rate_limit(self, limit, remain):
pass
def set_rate_limit(limit, remain):
log.debug('Update rate-limit: %s / %s' % | (remain, limit))
# Writ | e values to cache
cache.set('%s_rate_limt' % CACHE_PREFIX, int(limit))
cache.set('%s_rate_limt_remain' % CACHE_PREFIX, int(remain))
pass
def set_hit(target):
if target in ('cache', 'backend'):
log.debug('Add %s hit' % target)
key = '%s_hits_%s' % (CACHE_PREFIX, target)
if cache.get(key):
cache.incr(key)
else:
cache.set(key, 1)
|
windelbouwman/ppci-mirror | ppci/utils/binary_txt.py | Python | bsd-2-clause | 892 | 0 | """ Small helper to convert binary data into text and vice-versa.
"""
import binascii
from .chunk import chunks
def bin2asc(data: bytes):
""" Encode binary data as ascii. If it is a large data set, then use a
list of hex characters.
"""
if len(data) > 30:
res = []
for part in chunks(data):
res.append(binascii.hexlify(part).decode("ascii"))
return res
else:
| return binascii.hexlify(data).decode("ascii")
def asc2bin(data) -> bytes:
""" Decode ascii into binary """
| if isinstance(data, str):
return bytes(binascii.unhexlify(data.encode("ascii")))
elif isinstance(data, list):
res = bytearray()
for part in data:
res.extend(binascii.unhexlify(part.encode("ascii")))
return bytes(res)
else: # pragma: no cover
raise NotImplementedError(str(type(data)))
|
skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/twisted/trial/test/test_pyunitcompat.py | Python | gpl-2.0 | 8,688 | 0.001957 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import
import sys
import traceback
from zope.interface import implementer
from twisted.python.compat import _PY3
from twisted.python.failure import Failure
from twisted.trial.unittest import SynchronousTestCase, PyUnitResultAdapter
from twisted.trial.itrial import IReporter, ITestCase
import unittest as pyunit
class TestPyUnitTestCase(SynchronousTestCase):
class PyUnitTest(pyunit.TestCase):
def test_pass(self):
pass
def setUp(self):
self.original = self.PyUnitTest('test_pass')
self.test = ITestCase(self.original)
def test_callable(self):
"""
Tests must be callable in order to be used with Python's unittest.py.
"""
self.assertTrue(callable(self.test),
"%r is not callable." % (self.test,))
# Remove this when we port twisted.trial._synctest to Python 3:
if _PY3:
del TestPyUnitTestCase
class TestPyUnitResult(SynchronousTestCase):
"""
Tests to show that PyUnitResultAdapter wraps TestResult objects from the
standard library 'unittest' module in such a way as to make them usable and
useful from Trial.
"""
# Once erroneous is ported to Python 3 this can be replaced with
# erroneous.ErrorTest:
class ErrorTest(SynchronousTestCase):
"""
A test case which has a L{test_foo} which will raise an error.
@ivar ran: boolean indicating whether L{test_foo} has been run.
"""
ran = False
def test_foo(self):
"""
Set C{self.ran} to True and raise a C{ZeroDivisionError}
"""
self.ran = True
1/0
def test_dontUseAdapterWhenReporterProvidesIReporter(self):
"""
The L{PyUnitResultAdapter} is only used when the result passed to
C{run} does *not* provide L{IReporter}.
"""
@implementer(IReporter)
class StubReporter(object):
"""
A reporter which records data about calls made to it.
@ivar errors: Errors passed to L{addError}.
@ivar failures: Failures passed to L{addFailure}.
"""
def __init__(self):
self.errors = []
self.failures = []
def startTest(self, test):
"""
Do nothing.
"""
def stopTest(self, test):
"""
Do nothing.
"""
def addError(self, test, error):
"""
Record the error.
"""
self.errors.append(error)
test = self.ErrorTest("test_foo")
result = StubReporter()
test.run(result)
self.assertIsInstance(result.errors[0], Failure)
def test_success(self):
class SuccessTest(SynchronousTestCase):
ran = False
def test_foo(s):
s.ran = True
test = SuccessTest('test_foo')
result = pyunit.TestResult()
test.run(result)
self.failUnless(test.ran)
self.assertEqual(1, result.testsRun)
self.failUnless(result.wasSuccessful())
def test_failure(self):
class FailureTest(SynchronousTestCase):
ran = False
def test_foo(s):
s.ran = True
s.fail('boom!')
test = FailureTest('test_foo')
result = pyunit.TestResult()
test.run(result)
self.failUnless(test.ran)
self.assertEqual(1, result.testsRun)
self.assertEqual(1, len(result.failures))
self.failIf(result.wasSuccessful())
def test_error(self):
test = self.ErrorTest('test_foo')
result = pyunit.TestResult()
test.run(result)
self.failUnless(test.ran)
self.assertEqual(1, result.testsRun)
self.assertEqual(1, len(result.errors))
self.failIf(result.wasSuccessful())
def test_setUpError(self):
class ErrorTest(SynchronousTestCase):
ran = False
def setUp(self):
1/0
def test_foo(s):
s.ran = True
test = ErrorTest('test_foo')
result = pyunit.TestResult()
test.run(result)
self.failIf(test.ran)
self.assertEqual(1, result.testsRun)
self.assertEqual(1, len(result.errors))
self.failIf(result.wasSuccessful())
def test_tracebackFromFailure(self):
"""
Errors added through the L{PyUnitResultAdapter} have the same traceback
information as if there were no adapter at all.
"""
try:
1/0
except ZeroDivisionError:
exc_info = sys.exc_info()
f = Failure()
pyresult = pyunit.TestResult()
result = PyUnitResultAdapter(pyresult)
result.addError(self, f)
self.assertEqual(pyresult.errors[0][1],
''.join(traceback.format_exception(*exc_info)))
def test_traceback(self):
"""
As test_tracebackFromFailure, but covering more code.
"""
class ErrorTest(SynchronousTestCase):
exc_info = None
def test_foo(self):
try:
1/0
except ZeroDivisionError:
self.exc_info = sys.exc_info()
raise
test = ErrorTest('test_foo')
result = pyunit.TestResult()
test.run(result)
# We can't test that the tracebacks are equal, because Trial's
# machinery inserts a few extra frames on the top and we don't really
# want to trim them off without an extremely good reason.
#
# So, we just test that the result's stack ends with the the
# exception's stack.
expected_stack = ''.join(traceback.format_tb(test.exc_info[2]))
observed_stack = '\n'.join(result.errors[0][1].splitlines()[:-1])
self.assertEqual(expected_stack.strip(),
observed_stack[-len(expected_stack):].strip())
def test_tracebackFromCleanFailure(self):
"""
Errors added through the L{PyUnitResultAdapter} have the same
traceback information as if there were no adapter at all, even
if the Failure that held the information has been cleaned.
"""
try:
1/0
except ZeroDivisionError:
exc_info = sys.exc_info()
f = Failure()
f.cleanFailure()
pyresult = pyunit.TestResult()
result = PyUnitResultAdapter(pyresult)
result.addError(self, f)
self.assertEqual(pyresult.errors[0][1],
''.join(traceback.format_exception(*exc_info)))
def test_trialSkip(self):
"""
Skips using trial's skipping functionality are reported as skips in
the L{pyunit.TestResult}.
"""
class SkipTest(SynchronousTestCase):
def test_skip(self):
1/0
test_skip.skip = "Let's skip!"
test = SkipTest('test_skip')
result = pyunit.TestResult()
test.run(result)
self.assertEqual(result.skipped, [(test, "Let's skip!")])
def test_pyunitSkip(self):
"""
Skips using pyunit's skipping functionality are reported as skips in
the L{pyunit.TestResult}.
"""
class SkipTest(SynchronousTestCase):
@pyunit.skip("skippy")
def test_skip(self):
1/0
test = SkipTest('test_skip')
result = pyunit.TestResult()
test.run(result)
self.assertEqual(result.skipped, [(test, "skippy")] | )
def test_skip26(self):
"""
On Python 2.6, pyunit doesn't support skipping, so it gets added as a
failure to the L{pyunit.TestResult}.
"""
class SkipTest(SynchronousTestCase):
def | test_skip(self):
1/0
test_skip.skip = "Let's skip!"
test = SkipTest('test_skip')
result = pyunit.TestR |
gencer/sentry | src/sentry/quotas/base.py | Python | bsd-3-clause | 4,175 | 0.001198 | """
sentry.quotas.base
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from django.conf import settings
from sentry import options
from sentry.utils.services import Service
class RateLimit(object):
__slots__ = ['is_limited', 'retry_after', 'reason', 'reason_code']
def __init__(self, is_limited, retry_after=None, reason=None, reason_code=None):
self.is_limited = is_limited
# delta of seconds in the future to retry
self.retry_after = retry_after
# human readable description
self.reason = reason
# machine readable description
self.reason_code = reason_code
class NotRateLimited(RateLimit):
def __init__(self, **kwargs):
super(NotRateLimited, self).__init__(False, **kwargs)
class RateLimited(RateLimit):
def __init__(self, **kwargs):
super(RateLimited, self).__init__(True, **kwargs)
class Quota(Service):
"""
Quotas handle tracking a project's event usage (at a per minute tick) and
respond whether or not a project has been configured to throttle incoming
events if they go beyond the specified quota.
"""
__all__ = (
'get_maximum_quota', 'get_organization_quota', 'get_project_quota', 'is_rate_limited',
'translate_quota', 'validate', 'refund',
)
def __init__(self, **options):
pass
def is_rate_limited(self, project, key=None):
return NotRateLimited()
def refund(self, project, key=None, timestamp=None):
raise NotImplementedError
def get_time_remaining(self):
return 0
def translate_quota(self, quota, parent_quota):
if six.text_type(quota).endswith('%'):
pct = int(quota[:-1])
quota = int(parent_quota) * pct / 100
if not quota:
return int(parent_quota or 0)
return int(quota or 0)
def get_key_quota(self, key):
from sentry import features
if features.has('projects:rate-limits', key.project):
return key.rate_limit
return (0, 0)
def get_project_quota(self, project):
from sentry.models import Organization, OrganizationOption
org = getattr(project, '_organization_cache', None)
if not org:
org = Organization.objects.get_from_cache(id=project.organization_id)
project._organization_cache = org
max_quota_share = int(
OrganizationOption.objects.get_value(org, 'sentry:project-rate-limit', 100)
)
org_quota, window = self.get_organization_quota(org)
if max_quota_share != 100 and org_quota:
quota = self.translate_quota(
'{}%'.format(max_quota_share),
org_quota,
)
else:
quota = 0
return (quota, window)
def get_organization_quota(self, organization):
from sentry.models import OrganizationOption
account_limit = int(
OrganizationOption.objects.get_value(
organization=organization,
key='sentry:account-rate-limit',
default=0,
)
)
system_limit = options.get('system.rate-limit')
# If there is only a single org, this one org s | hould
# be allowed to consume the entire quota.
if settings.SENTRY_SINGLE_ORGANIZATION:
if system_limit < account_limit:
return (system_limit, 60)
return (account_limit, 3600)
# an account limit | is enforced, which is set as a fixed value and cannot
# utilize percentage based limits
elif account_limit:
return (account_limit, 3600)
return (
self.translate_quota(
settings.SENTRY_DEFAULT_MAX_EVENTS_PER_MINUTE,
system_limit,
), 60
)
def get_maximum_quota(self, organization):
"""
Return the maximum capable rate for an organization.
"""
return (options.get('system.rate-limit'), 60)
|
serge-cohen/Lima | common/python/Debug.py | Python | gpl-3.0 | 2,593 | 0.004242 | ############################################################################
# This file is part of LImA, a Library for Image Acquisition
#
# Copyright (C) : 2009-2011
# European Synchrotron Radiation Facility
# BP 220, Grenoble 38043
# FRANCE
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
############################################################################
from limacore import DebParams, DebObj
import os, sys, types
import functools
def DEB_GLOBAL_FUNCT(fn):
return DEB_FUNCT(fn, True, 2)
def DEB_MEMBER_FUNCT(fn):
return DEB_FUNCT(fn, False, 2)
def DEB_FUNCT(fn, in_global=True, frame=1, deb_container=None):
frame = sys._getframe(frame)
if in_global:
n_dict = frame.f_globals
else:
n_dict = frame.f_locals
deb_params = n_dict['deb_params']
code = frame.f_code
filename = os.path.basename(code.co_f | ilename)
lineno = frame.f_lineno
@functools.wraps(fn)
def real_fn(*arg, **kw):
sys.exc_clear()
fn_globals = dict(fn.func_globals)
deb_obj = DebObj(deb_params, fn.func_name, '', filename, lineno)
fn_globals['deb'] = deb_obj
if deb_container is not None:
deb_container.add(deb_obj)
new_fn = types.F | unctionType(fn.func_code, fn_globals, fn.func_name,
fn.func_defaults)
return new_fn(*arg, **kw)
return real_fn
def DEB_GLOBAL(deb_mod):
DEB_PARAMS(deb_mod, '', True, 2)
def DEB_CLASS(deb_mod, class_name):
DEB_PARAMS(deb_mod, class_name, False, 2)
def DEB_PARAMS(deb_mod, class_name, in_global=True, frame=1):
frame = sys._getframe(frame)
g_dict, l_dict = frame.f_globals, frame.f_locals
mod_name = g_dict['__name__']
if mod_name == '__main__':
file_name = frame.f_code.co_filename
mod_name = os.path.basename(file_name).strip('.py')
if in_global:
d_dict = g_dict
else:
d_dict = l_dict
d_dict['deb_params'] = DebParams(deb_mod, class_name, mod_name)
|
LibreSoftTeam/R-SNA | GraphDataCreator.py | Python | gpl-2.0 | 32,520 | 0.001907 | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
"""
GraphDataCreator Shell(bash)-to-Pyhton translation from Christian Ramiro code
Miguel Angel Fernandez Sanchez
"""
#TODO Implement -s option (program lauched from a super-script, no questions)
import os
import sys
from time import strftime, gmtime
import subprocess
import shutil
import commands
print " - Graph Data Creator Started - "
print " - MAKE SURE CTAGS AND GIT ARE INSTALLED IN YOUR COMPUTER\r\n"
INIT_PATH = os.path.abspath(os.curdir)
def help():
"""
prints usage & description
"""
line = "\r\nNAME\r\n\r\nGraphDataCreator.py\r\n\r\n"
line += "USAGE\r\n\r\n./GrahDataCreator.py [SHORT-OPTION]\r\n\r\n"
line += "EXAMPLE\r\n\r\n"
line += "./GraphDataCreator.py -f 2010-1-1 -t 2011-1-1 -r "
line += "git://git.openstack.org/openstack/swift -v\r\n\r\n"
line += "DESCRIPTION\r\n\r\n"
line += "GraphDataCreator.py reads information of a Git repository and"
line += " outputs two\nCSV files ready to be read to represent"
line += " a software community. The files\ncontain pairs "
line += "developer-developer meaning that both developers have\nworked "
line += "together. One file uses file-scope to create a relationship "
line += "while\nthe other narrows relationship down using"
line += " a method-scope.\r\n"
line += "\r\nOPTIONS\r\n\r\n-h\tprints help page.\r\n\r\n"
line += "-f\tStarting date of study. When empty, study start "
line += "the beginning of times.\n\tFormat: 2012-12-31\r\n\r\n"
line += "-t\tEnding date of study. When empty, current date will "
line += "be chosen.\n\tFormat: 2012-12-31\r\n\r\n"
line += "-r\tRepository URL. If argument is 'reuse', and there is a "
line += "Repository file in directory, reuses that repository\n\tExample: "
line += "git://git.openstack.org/openstack/swift\r\n"
line += "\r\n-v\tVerbose mode."
line += "\r\n\r\nDEPENDENCIES\r\n\r\nGit and ctags are required "
line += "to run this script.\r\n\r\nOUTPUT\r\n\r\n"
line += "DataMethods.csv-File using relationship-in-method approach\r\n"
line += "DataFiles.csv-File using relantionship-in-file approach\r\n"
return line
def error_info(show_line):
return show_line
def check_date(date, date_type):
"""
Checks if a date (type: starting or ending)
has format 'YYYY-MM-DD'. Returns pair of values:
[<0/1>(wrong/correct), description]
"""
date = str(date)
date_fields = date.split('-')
result = [0, ""]
if len(date_fields) == 3:
if date_fields[0] >= 1971:
if (int(date_fields[1]) > 0) and (int(date_fields[1]) < 13):
if (int(date_fields[2]) > 0) and (int(date_fields[2]) < 32):
result[0] = 1
if result[0]:
result[1] = "Valid " + date_type + " date: " + date
else:
result[1] = date_type + " date is wrong. "
result[1] += "\nPlease use option -h for further information"
return result
def under_linux():
"""
Checks if we are working on a GNU/Linux distribution
"""
unamestr = os.uname()[0]
if unamestr != 'Linux':
print "We are not under Linux, no options available"
return 0
else:
return 1
def extract_options(list_opt, dicc_opt):
"""
Extracts data from program arguments and fills a dictionary
# Verbose option (-v)
# Starting date of study option (-f)
# Ending date option (-t)
# Repository URL option (-r)
# Show help option (-h)
"""
user_param = " ".join(list_opt)
list_param = user_param.split(" -")
for value in list_param:
value_tmp = value.split()
if len(value_tmp) == 2:
if value_tmp[0] == "f":
dicc_opt['f'] = str(value_tmp[1])
elif value_tmp[0] == "t":
dicc_opt['t'] = str(value_tmp[1])
elif value_tmp[0] == "r":
dicc_opt['r'] = str(value_tmp[1])
else:
if value == 'v':
dicc_opt['v'] = True
elif value == 'h':
dicc_opt['h'] = True
elif value_tmp[0] == "s":
dicc_opt['s'] = True
print "We are under Super-script"
def dir_exists(directory):
"""
Checks if a directory exists
"""
if os.path.exists(directory):
print "Please, remove directory '" + directory + "' before starting"
print "Do you want to remove directory '" + directory + "'? (Y / n)"
ans = raw_input()
if (ans == 'Y') or (ans == 'y'):
print "Removing directory: " + directory
shutil.rmtree(directory, ignore_errors=True)
out = 0
else:
out = 1
else:
out = 0
return out
def go_home_dir():
"""
Goes back in current path to home directory
"""
init_list = INIT_PATH.split('/')
cur_dir = os.path.abspath(os.curdir)
list_dir = cur_dir.split('/')
exceeds = len(list_dir) - len(init_list)
if exceeds > 0:
print "Going up " + str(exceeds) + " directory levels"
for i in range(exceeds):
os.chdir('..')
def add_backslash(line):
line2 = ""
for char in line:
if char == '/':
line2 += '\\' + char
else:
line2 += char
return line2
class GraphData:
def __init__(self):
self.DATA_PATH = "Data"
self.OUT_PATH = self.DATA_PATH + '/' + 'output'
self.CHECKOUT_PATH = self.DATA_PATH + '/' + 'Repository'
self.dfiles_name = "DataFiles.csv"
self.dmethods_name = "DataMethods.csv"
self.out_names = {}
self.out_paths = {}
self.out_files = {}
self.listCommitters = []
self.diccCommitters = {}
self.diccMethods = {}
self.diccTimes = {}
self.diccTimesM = {}
self.out_names['commits'] = "CommitsFromScriptFile.txt"
self.out_names['output'] = "outputFile.txt"
self.out_names['log'] = "graphData.log"
self.out_names['diff'] = "diffFile.txt"
self.out_names['allFiles'] = "allFiles.txt"
self.out_names['auxTag'] = "AuxTagFile.txt"
self.date_now = strftime("%Y-%m-%d", gmtime())
| self.conf_opt = {}
self.conf_opt['v'] = True # Verbose option (-v)
self.conf_opt['f'] = '1971-1-1' # Starting date of study option (-f)
self.conf_opt['t'] = self.date_now # Ending date opt. (-t)
self.conf_opt['r'] = "" # Repository URL option (-r)
self.conf_opt['h'] = False # Show help option (-h)
self.conf_opt['s'] = False # Super-script option
self.fichtag = open('fichtag.log', 'w')
self.fichtag.close() |
self.fichtag = open('fichtag.log', 'a')
def create_data_files(self):
os.mkdir(self.DATA_PATH)
os.mkdir(self.CHECKOUT_PATH)
os.mkdir(self.OUT_PATH)
for out_file in self.out_names.keys():
dir_to_open = self.DATA_PATH + '/' + self.out_names[out_file]
self.out_paths[out_file] = dir_to_open
self.out_files[out_file] = open(dir_to_open, 'a')
def log(self, log_line):
str_out = ""
log_file = self.out_files['log']
log_file.write(log_line + '\n')
if self.conf_opt['v']:
str_out = str(log_line) + "\r\n"
return str_out
def check_program_starting(self):
"""
Checks if program can run properly
"""
if not under_linux():
raise SystemExit
if len(sys.argv) == 1:
print help()
raise SystemExit
if not self.conf_opt['s']:
if self.conf_opt['r'] != "reuse":
if (dir_exists("Repository")) or (dir_exists("Data")):
raise SystemExit
else:
if dir_exists("Data"):
raise SystemExit
else:
directory1 = "Data"
directory2 = "Repository"
if os.path.exists(directory1):
print "Removing directory: " + directory1
|
zwelchWI/BoostEM | source/runTests.py | Python | mit | 1,237 | 0.028294 | import sys
import subprocess
seeds = [1,2,3,4,5,6,7,8,9,10]
seeds = [1,2,3,4]
for seed in seeds:
boostem='python EM.py --input=../data/wdbc.arff --learner=dt --tboost=20 --labelFrac=0.2 --dtM=100 --seed='+str(seed)
print boostem
subprocess.call(boostem, shell=True)
fullBoost='python EM.py --input=../data/wdbc.arff --learner=dt --tboost=20 --labelFrac=1.0 --dtM=100 --seed='+str(seed)
#print fullBoost
#subprocess.call(fullBoost, shell=True)
justBoost='python EM.py --input=../data/wdbc.arff --learner=dt --tboost=20 --labelFrac=0.2 --dtM=100 --loseUnlabeled --seed='+str(seed)
#print justBoost
#subprocess.call(justBoost, shell=T | rue)
justEM='python EM.py --input=../data/wdbc.arff --learner=dt --tboost=1 --labelFrac=0.2 --dtM=100 --seed='+str(seed)
#print justEM
#subprocess.call(justEM, shell=True)
FullOne='python EM.py --input=../data/wdbc.arff --learner=dt --tboost=1 --labelFrac=1.0 --dtM=100 --see | d='+str(seed)
# print FullOne
# subprocess.call(FullOne, shell=True)
justOne='python EM.py --input=../data/wdbc.arff --learner=dt --tboost=1 --labelFrac=0.2 --dtM=100 --loseUnlabeled --seed='+str(seed)
#print justOne
#subprocess.call(justOne, shell=True)
|
wasiqmukhtar/tcp-eval.wasiq | src/buildings/bindings/modulegen__gcc_LP64.py | Python | gpl-2.0 | 309,810 | 0.015 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.buildings', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## propagation-environment.h (module 'propagation'): ns3::CitySize [enumeration]
module.add_enum('CitySize', ['SmallCity', 'MediumCity', 'LargeCity'], import_from_module='ns.propagation')
## propagation-environment.h (module 'propagation'): ns3::EnvironmentType [enumeration]
module.add_enum('EnvironmentType', ['UrbanEnvironment', 'SubUrbanEnvironment', 'OpenAreasEnvironment'], import_from_module='ns.propagation')
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## box.h (module 'mobility'): ns3::Box [class]
module.add_class('Box', import_from_module='ns.mobility')
## box.h (module 'mobility'): ns3::Box::Side [enumeration]
module.add_enum('Side', ['RIGHT', 'LEFT', 'TOP', 'BOTTOM', 'UP', 'DOWN'], outer_class=root_module['ns3::Box'], import_from_module='ns.mobility')
## building-container.h (module 'buildings'): ns3::BuildingContainer [class]
module.add_class('BuildingContainer')
## building-list.h (module 'buildings'): ns3::BuildingList [class]
module.add_class('BuildingList')
## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper [class]
module.add_class('BuildingsHelper')
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper [class]
module.add_class('ConstantVelocityHelper', import_from_module='ns.mobility')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (m | odule 'core'): ns3::TypeId::AttributeFla | g [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D', import_from_module='ns.core')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D', import_from_module='ns.core')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## position-allocator.h (module 'mobility'): ns3::PositionAllocator [class]
module.add_class('PositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel [class]
module.add_class('PropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::Object'])
## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator [class]
module.add_class('RandomBoxPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator'])
## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator [class]
module.add_class('RandomBuildingPositionAllocator', parent=root_module['ns3::PositionAllocator'])
## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator [class]
module.add_class('RandomDiscPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator'])
## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel [class]
module.add_class('RandomPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator [class]
module.add_class('Rand |
biomodels/MODEL1310110036 | MODEL1310110036/model.py | Python | cc0-1.0 | 427 | 0.009368 | import os
path = o | s.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1310110036.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
| return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) |
lncwwn/js-partner | js_partner/views.py | Python | mit | 1,205 | 0.00249 | from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
def index(request):
return render_to_response('index.html', None)
def run(request):
pass
def save(request):
pass
# user signup
def signup(request):
if request.method = | = 'POST':
name = request.POST['name']
password = request.PO | ST['password']
password_confirm = request.POST['password_confirm']
email = request.POST['email']
if password == password_confirm:
user = User.objects.create_user(name, password, email)
user.save()
return HttpResponseRedirect('/')
return render_to_response('user/signup.html', RequestContext(request))
# user login
def login(request):
if request.method == 'POST':
name = request.POST['name']
password = request.POST['password']
remember = request.POST['remember']
return render_to_response('user/login.html')
# user change password
def change_password(request):
if request.method != 'POST':
return render_to_response('user/change_password.html') |
evandrix/Splat | code/demo/pyutilib.math-3.3/setup.py | Python | mit | 1,569 | 0.013384 | # _________________________________________________________________________
#
# PyUtilib: A Python utility library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the BSD License.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
# _________________________________________________________________________
#
"""
Setup for pyutilib.math package
"""
import os
from setuptools import setup
def read(*rnames):
return | open(os.path.join(os.path.dirname(__file__), *rnames)).read()
setup(name="pyutilib.math",
version='3.3',
maintainer='William E. Hart',
maintainer_email='wehart@sandia.gov',
url = 'https://software.sandia.gov/svn/public/pyutilib/pyutilib.math',
license = 'BSD',
platforms = ["any"],
de | scription = 'PyUtilib math utilities.',
long_description = read('README.txt'),
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Unix Shell',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules'],
packages=['pyutilib', 'pyutilib.math'],
keywords=['utility'],
namespace_packages=['pyutilib']
)
|
GuillaumeDerval/INGInious | tests/tasks/edx/HelloWorld/insert_input.py | Python | agpl-3.0 | 891 | 0.015713 | # -*- coding: utf8 -*-
# Author: Adrien Bibal
# Date: 2014
# Insert the student answer in the correction framework file.
import sys
import codecs
input_file = sys.stdin # input = file containing the student answer.
oz_file = codecs.open("/task/task.oz", "r", "utf8") # Open the "correction framework file".
new_file = codecs.o | pen("new_file.oz", "w","utf8") # Open the fina | l file.
for line in oz_file:
# "@@q1@@" is the arbitrary marker used to say "insert the student answer here".
if "@@q1@@" in line :
for input_line in input_file :
if '\0' in input_line :
input_line = input_line.strip('\0')
new_file.write(input_line) # Copy each line from the student answer to the final file.
else :
new_file.write(line) # Copy each line from the "correction framework file" to the final file.
oz_file.close()
new_file.close()
|
daxm/fmcapi | unit_tests/port_object_group.py | Python | bsd-3-clause | 1,589 | 0 | import logging
import fmcapi
import time
def test__port_object_group(fmc):
logging.info("Testing PortObjectGroup class.")
starttime = str(int(time.time()))
namer = f"_fmcapi_test_{starttime}"
obj10 = fmcapi.ProtocolPortObjects(
fmc=fmc, name="_porttcp1", port="8443", protocol="TCP"
)
obj10.post()
obj11 = fmcapi.ProtocolPortObjects(
fmc=fmc, name="_portudp1", port="161", protocol="UDP"
)
obj11.post()
obj12 = fmcapi.ProtocolPortObjects(
fmc=fmc, name="_portrangetcp1", port="0-1023", protocol="TCP"
)
obj12.post()
time.sleep(1)
obj1 = fmcapi.PortObjectGroups(fmc=fmc, name=namer)
obj1.named_ports(action="add", name=obj10.name)
obj1.named_ports(action="add", name=obj10.name)
obj1.named_port | s(action="remove", name=obj10.name)
obj1.named_ports(action="clear")
obj1.named_ports(action="add", name=obj11.name)
obj1.named_ports(action="add", name=obj12.name)
obj1.named_ports(action="remove", name=obj11.name)
obj1.post()
time.sleep(1)
del obj1
obj1 = fmcapi.PortObjectGroups(fmc=fm | c, name=namer)
obj1.get()
obj1.named_ports(action="add", name="HTTP")
obj1.named_ports(action="clear")
obj1.named_ports(action="add", name="HTTP")
obj1.named_ports(action="remove", name="HTTP")
obj1.named_ports(action="add", name="HTTP")
obj1.named_ports(action="add", name="HTTPS")
obj1.put()
time.sleep(1)
obj1.delete()
obj10.delete()
obj11.delete()
obj12.delete()
logging.info("Testing PortObjectGroup class done.\n")
|
priestc/MultiExplorer | multiexplorer/pricetick/migrations/0001_initial.py | Python | mit | 872 | 0.001147 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-26 03:49
from __future__ import unicode_literals
from django.db import migrations, models
|
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PriceTick',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('currency', models.CharField(max_length=8)),
('exchange', models.CharField(max_length=128)),
('base_fiat | ', models.CharField(max_length=8)),
('date', models.DateTimeField(db_index=True)),
('price', models.FloatField()),
],
options={
'get_latest_by': 'date',
},
),
]
|
davidsminor/gaffer | python/GafferImageTest/ImagePlugTest.py | Python | bsd-3-clause | 5,013 | 0.055057 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS | OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, | OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class ImagePlugTest( GafferTest.TestCase ) :
def testTileOrigin( self ) :
ts = GafferImage.ImagePlug.tileSize()
testCases = [
( IECore.V2i( ts-1, ts-1 ), IECore.V2i( 0, 0 ) ),
( IECore.V2i( ts, ts-1 ), IECore.V2i( ts, 0 ) ),
( IECore.V2i( ts, ts ), IECore.V2i( ts, ts ) ),
( IECore.V2i( ts*3-1, ts+5 ), IECore.V2i( ts*2, ts ) ),
( IECore.V2i( ts*3, ts-5 ), IECore.V2i( ts*3, 0 ) ),
( IECore.V2i( -ts+ts/2, 0 ), IECore.V2i( -ts, 0 ) ),
( IECore.V2i( ts*5+ts/3, -ts*4 ), IECore.V2i( ts*5, -ts*4 ) ),
( IECore.V2i( -ts+1, -ts-1 ), IECore.V2i( -ts, -ts*2 ) )
]
for input, expectedResult in testCases :
self.assertEqual(
GafferImage.ImagePlug.tileOrigin( input ),
expectedResult
)
def testTileStaticMethod( self ) :
tileSize = GafferImage.ImagePlug.tileSize()
self.assertEqual(
GafferImage.ImagePlug.tileBound( IECore.V2i( 0 ) ),
IECore.Box2i(
IECore.V2i( 0, 0 ),
IECore.V2i( tileSize - 1, tileSize - 1 )
)
)
self.assertEqual(
GafferImage.ImagePlug.tileBound( IECore.V2i( 0, 1 ) ),
IECore.Box2i(
IECore.V2i( 0, tileSize ),
IECore.V2i( tileSize - 1, tileSize * 2 - 1 )
)
)
def testDefaultChannelNamesMethod( self ) :
channelNames = GafferImage.ImagePlug()['channelNames'].defaultValue()
self.assertTrue( 'R' in channelNames )
self.assertTrue( 'G' in channelNames )
self.assertTrue( 'B' in channelNames )
def testCreateCounterpart( self ) :
p = GafferImage.ImagePlug()
p2 = p.createCounterpart( "a", Gaffer.Plug.Direction.Out )
self.assertEqual( p2.getName(), "a" )
self.assertEqual( p2.direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( p2.getFlags(), p.getFlags() )
def testDynamicSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = GafferImage.ImagePlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertTrue( isinstance( s["n"]["p"], GafferImage.ImagePlug ) )
self.assertEqual( s["n"]["p"].getFlags(), Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
def testBoxPromotion( self ) :
b = Gaffer.Box()
b["n"] = GafferImage.Grade()
self.assertTrue( b.canPromotePlug( b["n"]["in"], asUserPlug=False ) )
self.assertTrue( b.canPromotePlug( b["n"]["out"], asUserPlug=False ) )
i = b.promotePlug( b["n"]["in"], asUserPlug=False )
o = b.promotePlug( b["n"]["out"], asUserPlug=False )
self.assertEqual( b["n"]["in"].getInput(), i )
self.assertEqual( o.getInput(), b["n"]["out"] )
self.assertTrue( b.plugIsPromoted( b["n"]["in"] ) )
self.assertTrue( b.plugIsPromoted( b["n"]["out"] ) )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed( GafferImage )
self.assertTypeNamesArePrefixed( GafferImageTest )
def testDefaultNames( self ) :
self.assertDefaultNamesAreCorrect( GafferImage )
self.assertDefaultNamesAreCorrect( GafferImageTest )
if __name__ == "__main__":
unittest.main()
|
OctavianLee/Barrage | tests/cores/test_danmaku_process.py | Python | mit | 4,281 | 0.001407 | # -*- coding: utf-8 -*-
from unittest import TestCase
from nose.tools import eq_
from mock import Mock
from datetime import datetime
from tests.constants import STRING, NUMBER, DATE
from tests.asserters import eq_obj
from danmaku.cores.danmaku_process import generate_danmaku
from danmaku.cores.danmaku_process import process_recieve_data
from danmaku.models import DANMU_MSG, SEND_GIFT, WELCOME, SEND_TOP
from danmaku.models.danmaku import DanmakuModel
from danmaku.configs.personal_settings import TIME_FORMAT
from danmaku.helpers import convert_hexascii_to_int
def test_generate_danmaku():
msg = {
u'info': [
[ 0, 1, 25, 16777215, 1441727762, 1585812335, 0, u'c8de2b91', 0],
u'xxxxx',
[ NUMBER, u'xxxx', 0, u'0']
],
u'cmd': u'DANMU_MSG',
u'roomid': NUMBER
}
danmaku_type = DANMU_MSG
publisher = msg['info'][2][1].encode('utf-8')
content = msg['info'][1].encode('utf-8')
is_vip = msg['info'][2][2] == 1
is_admin = int(msg['info'][2][3].encode('utf-8')) == 1
expect_danmaku = DanmakuModel(
publisher=publisher,
content=content,
recieved_time=datetime.now().strftime(TIME_FORMAT),
danmaku_type=danmaku_type,
is_admin=is_admin,
is_vip=is_vip
)
test_danmaku = generate_danmaku(msg)
eq_obj(expect_danmaku, test_danmaku)
msg = {
u'roomid': NUMBER,
u'cmd': u'SEND_GIFT',
u'data': {
u'top_list': [
{u'uname': u'xxx', u'coin': NUMBER, u'uid': NUMBER},
],
u'uid': NUMBER,
u'timestamp': 1441727778,
u'price': NUMBER,
u'giftId': 1,
u'uname': u'xxxxx',
u'num': NUMBER,
u'rcost': NUMBER,
u'super': 0,
u'action': u'\u5582\u98df',
u'giftName': u'\u8fa3\u6761'
}
}
danmaku_type = SEND_GIFT
publisher = msg['data']['uname'].encode('utf-8')
content = ''.join(
[str(msg['data']['num']), ' X ',
msg['data']['giftName'].encode('utf-8'),
' 目前共花销:', str(msg['data']['rcost'])])
is_vip = False
is_admin = False
expect_danmaku = DanmakuModel(
publisher=publisher,
content=content,
recieved_time=datetime.now().strftime(TIME_FORMAT),
danmaku_ty | pe=danmaku_type,
is_admin=is_admin,
is_vip=is_vip
)
test_danmaku = generate_danmaku(msg)
eq_obj(expect_danmaku, test_danmaku)
msg = {
u'roomid': NUMBER,
u'cmd': u'WELCOME',
u'data': {
u'uname': u'xxxxxr',
u'isadmin': 0,
| u'uid': NUMBER
}
}
danmaku_type = WELCOME
publisher = msg['data']['uname'].encode('utf-8')
is_vip = True
content = None
is_admin = msg['data']['isadmin'] == 1
expect_danmaku = DanmakuModel(
publisher=publisher,
content=content,
recieved_time=datetime.now().strftime(TIME_FORMAT),
danmaku_type=danmaku_type,
is_admin=is_admin,
is_vip=is_vip
)
test_danmaku = generate_danmaku(msg)
eq_obj(expect_danmaku, test_danmaku)
msg = {
u'roomid': u'11111',
u'cmd': u'SEND_TOP',
u'data': {
u'top_list': [
{u'uname': u'xxxx', u'coin': NUMBER, u'uid': NUMBER},
]
}
}
danmaku_type = SEND_TOP
tops = msg["data"]['top_list']
contents = ["{}: {} {}".format(top['uid'], top['uname'], top['coin'])
for top in tops]
content = '\n'.join(contents)
publisher = "排行榜"
is_vip = False
is_admin = False
expect_danmaku = DanmakuModel(
publisher=publisher,
content=content,
recieved_time=datetime.now().strftime(TIME_FORMAT),
danmaku_type=danmaku_type,
is_admin=is_admin,
is_vip=is_vip
)
test_danmaku = generate_danmaku(msg)
eq_obj(expect_danmaku, test_danmaku)
def test_process_recieve_data():
# I have no idea to tests it.
mock_fun = Mock(process_recieve_data)
mock_fun.return_value = True
eq_(mock_fun(), True)
|
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_hyperlink15.py | Python | bsd-2-clause | 1,235 | 0.00081 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'hyperlink15.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with h | yperlinks.This example doesn't have any link formatting and tests the relationshiplinkage code."""
workbook = W | orkbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url('B2', 'external:subdir/blank.xlsx')
workbook.close()
self.assertExcelEqual()
|
alveyworld-dev/calculator | team6.py | Python | apache-2.0 | 738 | 0.020325 | def add(a, b):
"""
This function adds two numbers
"""
return a + b
def sub(a,b):
"""
This function subtracts two numbers
"""
return a - b
# print "The first number you want to subtract?"
# a = int(raw_input("First no: "))
# print "What's the second | number you want to subtract"
# b = int(raw_input("Second no: "))
# """
# This function subtracts two numbers
# """
# result = sub(a,b)
# """
# this prints the results
# """
# print "The result is: %r." % result
def opp(a):
return a * -1
# print "Number you want to change"
# a = int(raw_input("Number to change: "))
# result = opp(a)
# """
# This function changes the sign of the number
# """
# print "The result is: %r." % result
# """
# this prints the results
# """ | |
kamcpp/tensorflow | tensorflow/python/lib/io/file_io.py | Python | apache-2.0 | 13,774 | 0.006607 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""File IO methods that wrap the C++ FileSystem API.
The C++ FileSystem API is SWIG wrapped in file_io.i. These functions call those
to accomplish basic File IO operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import uuid
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
class FileIO(object):
"""FileIO class that exposes methods to read / write to / from files.
The constructor takes the following arguments:
name: name of the file
mode: one of 'r', 'w', 'a', 'r+', 'w+', 'a+'.
Can be used as an iterator to iterate over lines in the file.
The default buffer size used for the BufferedInputStream used for reading
the file line by line is 1024 * 512 bytes.
"""
def __init__(self, name, mode):
self.__name = name
self.__mode = mode
self._read_buf = None
self._writable_file = None
if mode not in ("r", "w", "a", "r+", "w+", "a+"):
raise errors.InvalidArgumentError(
None, None, "mode is not 'r' or 'w' or 'a' or 'r+' or 'w+' or 'a+'")
self._read_check_passed = mode in ("r", "r+", "a+", "w+")
self._write_check_passed = mode in ("a", "w", "r+", "a+", "w+")
@property
def name(self):
"""Returns the file name."""
return self.__name
@property
def mode(self):
"""Returns the mode in which the file was opened."""
return self.__mode
def _preread_check(self):
if not self._read_buf:
if not self._read_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for reading")
with errors.raise_exception_on_not_ok_status() as status:
self._read_buf = pywrap_tensorflow.CreateBufferedInputStream(
compat.as_bytes(self.__name), 1024 * 512, status)
def _prewrite_check(self):
if not self._writable_file:
if not self._write_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for writing")
with errors.raise_exception_on_not_ok_status() as status:
self._writable_file = pywrap_tensorflow.CreateWritableFile(
compat.as_bytes(self.__name), compat.as_bytes(self.__mode), status)
def size(self):
"""Returns the size of the file."""
return stat(self.__name).length
def write(self, file_content):
"""Writes file_content to the file. Appends to the end of the file."""
self._prewrite_check()
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.AppendToFile(
compat.as_bytes(file_content), self._writable_file, status)
def read(self, n=-1):
"""Returns the contents of a file as a string.
Starts reading from current position in file.
Args:
n: Read 'n' bytes if n != -1. If n = -1, reads to end of file.
Returns:
'n' bytes of the file (or whole file) requested as a string.
"""
self._preread_check()
with errors.raise_exception_on_not_ok_status() as status:
if n == -1:
length = self.size() - self.tell()
else:
length = n
return pywrap_tensorflow.ReadFromStream(self._read_buf, length, status)
def seek(self, position):
"""Seeks to the position in the file."""
| self._preread_check()
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._read_buf.Seek(position)
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
def readline(self):
r"""Reads the next line from the | file. Leaves the '\n' at the end."""
self._preread_check()
return compat.as_str_any(self._read_buf.ReadLineAsString())
def readlines(self):
"""Returns all lines from the file in a list."""
self._preread_check()
lines = []
while True:
s = self.readline()
if not s:
break
lines.append(s)
return lines
def tell(self):
"""Returns the current position in the file."""
if not self._read_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for reading")
return self._read_buf.Tell()
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def __iter__(self):
return self
def next(self):
retval = self.readline()
if not retval:
raise StopIteration()
return retval
def __next__(self):
return self.next()
def flush(self):
"""Flushes the Writable file.
This only ensures that the data has made its way out of the process without
any guarantees on whether it's written to disk. This means that the
data would survive an application crash but not necessarily an OS crash.
"""
if self._writable_file:
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._writable_file.Flush()
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
def close(self):
"""Closes FileIO. Should be called for the WritableFile to be flushed."""
self._read_buf = None
if self._writable_file:
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._writable_file.Close()
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
self._writable_file = None
def file_exists(filename):
"""Determines whether a path exists or not.
Args:
filename: string, a path
Returns:
True if the path exists, whether its a file or a directory.
"""
return pywrap_tensorflow.FileExists(compat.as_bytes(filename))
def delete_file(filename):
"""Deletes the file located at 'filename'.
Args:
filename: string, a filename
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API. E.g.,
NotFoundError if the file does not exist.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.DeleteFile(compat.as_bytes(filename), status)
def read_file_to_string(filename):
"""Reads the entire contents of a file to a string.
Args:
filename: string, path to a file
Returns:
contents of the file as a string
Raises:
errors.OpError: Raises variety of errors that are subtypes e.g.
NotFoundError etc.
"""
f = FileIO(filename, mode="r")
return f.read()
def write_string_to_file(filename, file_content):
"""Writes a string to a given file.
Args:
filename: string, path to a file
file_content: string, contents that need to be written to the file
Raises:
errors.OpError: If there are errors during the operation.
"""
with FileIO(filename, mode="w") as f:
f.write(file_content)
def get_matching_files(filename):
"""Returns a list of files that match the given pattern.
Args:
filename: string, the pattern
Returns:
Returns a list of strings containing filenames that match the given pattern.
Raises:
errors.OpError: If there are filesystem / directory listing errors.
"""
with errors.raise_exception_on_not_ok_status() as status:
# Convert each element to string, since the return values of the
# vector of string should be interpreted as strings, not bytes.
return [compat.as_str_any(matching_filename)
for |
caterinaurban/Lyra | src/lyra/unittests/numerical/interval/backward/summarization/subscriptions1b.py | Python | mpl-2.0 | 350 | 0.005714 |
x: int = int(input())
# STATE | : L -> [-inf, inf]; len(L) -> [0, inf]; x -> [-inf, inf]; z -> [-inf, inf]
L: List[List[int]] = [[0], [1], [x]]
# STATE: L -> [-inf, inf]; len(L) -> [3, inf]; x -> [-inf, inf]; z -> [-inf, inf]
z: int = L[2][0]
# STATE: L -> [-inf, inf]; len(L) -> [0, inf]; x -> [ | -inf, inf]; z -> [2, 2]
if z != 2:
raise ValueError
|
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/_codecs_tw.py | Python | gpl-2.0 | 377 | 0.013263 | # encoding: utf-8
# module _codecs_tw
# from /usr/lib/python2.7/ | lib-dynload/_codecs_tw.x86_64-linux-gnu.so
# by generator 1.135
# no doc
# no imports
# functions
def getcodec(*args, **kwargs): # real signature unknown
""" """
pass
# no classes
# variables with complex values
__map_big5 = None # (!) real value is ''
__map_cp950ext = None # (!) real value i | s ''
|
mbikyaw/wordpress-mbinfo | utils.py | Python | mit | 1,016 | 0 | __author__ = 'mbikyaw'
class AttributeDict(dict):
"""
Dictionary subclass enabling attribute lookup/assignment of keys/values.
For example::
>>> m = AttributeDict({'foo': 'bar'})
>>> m.foo
'bar'
>>> m.foo = 'not bar'
>>> m['foo']
'not bar'
``AttributeDict`` objects also provide ``.first()`` which acts like
``.get()`` but accepts multiple keys as arguments, and returns the value of
the first hit, e.g.::
>>> m = AttributeDict({'foo': 'bar', 'biz': 'baz'})
>>> m.first(' | wrong', 'incorrect', 'foo', 'biz')
'bar'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError:
# to conform with __getattr__ spec
raise AttributeError(key)
def __setattr__(self, key, value):
self[key] = value
def first(self, *names):
for name in names:
value = self.get(name)
| if value:
return value
|
soslan/passgen | setup.py | Python | mit | 1,122 | 0 | #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core i | mport setup
with open('README.rst') as file:
long_description = file.read()
setup(name='passgen',
version='1.1.1',
description='Random password generator',
long_description=long_description,
url='https://github.com/soslan/passgen',
author='Soslan Khubulov',
author_email='soslanx | @gmail.com',
license='MIT',
package_dir={'': 'src'},
entry_points={
'console_scripts': [
'passgen = passgen:main',
],
},
py_modules=['passgen'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Topic :: Utilities',
],
keywords='password random',
)
|
Azure/azure-sdk-for-python | sdk/operationsmanagement/azure-mgmt-operationsmanagement/azure/mgmt/operationsmanagement/aio/_configuration.py | Python | mit | 4,167 | 0.00336 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class Operati | onsManagementClientConfiguration(Configuration):
"""Configuration for OperationsManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.co | re.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param provider_name: Provider name for the parent resource.
:type provider_name: str
:param resource_type: Resource type for the parent resource.
:type resource_type: str
:param resource_name: Parent resource name.
:type resource_name: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
provider_name: str,
resource_type: str,
resource_name: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if provider_name is None:
raise ValueError("Parameter 'provider_name' must not be None.")
if resource_type is None:
raise ValueError("Parameter 'resource_type' must not be None.")
if resource_name is None:
raise ValueError("Parameter 'resource_name' must not be None.")
super(OperationsManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.provider_name = provider_name
self.resource_type = resource_type
self.resource_name = resource_name
self.api_version = "2015-11-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-operationsmanagement/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
dalembertian/fabmanager | fabmanager/decorators.py | Python | bsd-3-clause | 3,353 | 0.006263 | # encoding: utf-8
# Useful decorators
from functools import wraps
from fabric.api import *
# These variables must be defined in the actual fabfile.py for the proxy decorators:
# env.proxy_server = 'proxy.com.br' Address of the proxy server intermediating the executation
# env.proxy_home = '/home/me/fabric' Location, at the proxy server, where fabfily.py will reside
# env.proxy_host = 'somehost' Host or role specified for this session - will be i | ncluded
# env.proxy_role = 'somerole' in the fab command executed at the proxy server
def _is_running_on_proxy():
"""
Returns True if this fabfile is being run on the proxy server.
"""
return env.real_fabfile.find(env.proxy_home) < 0
def _run_on_proxy(role=None, host=None):
"""
Decorator that creates the actual decorator to route tasks through proxy.
This is necessary in order to be able to pass parameters to the actual decorator.
Usage:
| @hosts(env.proxy_server)
@_run_on_proxy([host='somehost'|role='somerole'])
def mytask():
Each task must be surrounded by a @hosts decorator specifying the proxy server,
so the task will be initially run at the proxy.
Then the @_run_on_proxy decorator can be used with or without specifying the actual
servers where the task should be run. If no servers are specifyied, then the params
env.proxy_role or env.proxy_host should be previously populated by some other task.
"""
def actual_decorator(task):
"""
Actual decorator that routes the task to be run on proxy. It is iself decorated
with @wraps in order to keep the original task documentation (docstring).
"""
@wraps(task)
def wrapper(*args, **kwargs):
"""
Wrapper that checks if command is being run on proxy server.
If it is, invokes fab again specifying some other server.
If it is already being run on some other server, just execute the task.
"""
if _is_running_on_proxy():
# There are several ways to specify in which other server this
# task is to be run. Hosts/roles specified by the decorator itself
# have higher priority.
# If a role or host parameter was specified for the decorator, use it
if role:
kwargs['role'] = role
elif host:
kwargs['host'] = host
# If some previous task populated env.proxy_role or proxy_host, use it
elif env.proxy_role:
kwargs['role'] = env.proxy_role
elif env.proxy_host:
kwargs['host'] = env.proxy_host
with cd(env.proxy_home):
arguments = []
if args:
arguments.append(','.join(args))
if kwargs:
arguments.append(','.join(['%s=%s' % (k,v) for k,v in kwargs.items()]))
if args or kwargs:
run('fab %s:%s' % (task.__name__, ','.join(arguments)))
else:
run('fab %s' % task.__name__)
else:
task(*args, **kwargs)
return wrapper
return actual_decorator
|
hep-gc/glint-service | glint-service/glint_git_setup.py | Python | apache-2.0 | 12,100 | 0.025289 | #!/usr/bin/python
glint_lib_directory='/var/lib/glint'
horizon_git_repo='https://github.com/rd37/horizon.git'
glint_git_repo='https://github.com/hep-gc/glint.git'
glint_inst_type='default'
horizon_inst_type='default'
glint_server='django'
glint_horizon_server='django'
cfg_dir = '/etc/glint'
pkg_dir = 'glint-service'
import sys,subprocess
import glint_platform as plat
from glint_arg_parser import GlintArgumentParser
def proceed(msg):
print msg
input = raw_input()
if input == '' or input == 'y' or input == 'Y':
return True
return False
def execute_command(cmd_args,input):
if input is None:
process = subprocess.Popen(cmd_args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out,err = process.communicate()
else:
#print "Need to use use input"
process = subprocess.Popen(cmd_args,stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.PIPE)
out,err = process.communicate(input=input)
if err:
print "warning: %s"%err
sys.stdout.flush()
return out,err
def check_dependencies():
print "dependency check: check if git and user glint exist"
[out,err] = execute_command(['which','git'],None)
if "no git" in out:
print "Error, unable to find git tool, please install and attempt glint install again"
return False
[out,err] = execute_command(['grep','glint','/etc/passwd'],None)
if out == '':
print "Warning, unable to find system user glint"
if proceed('Do you wish to setup glint as a User? [Y,n]'):
print "Ok lets setup glint user "
[out,err] = execute_command(['python','glint_system_create_user.py','create-glint-user'],None)
if err:
print "Unable to create glint user"
return False
#print "out: %s"%out
return True
else:
return False
return True
def download_horizon():
print "download horizon using git clone"
[out,err] = execute_command(['git','clone','%s'%horizon_git_repo,'%s/horizon'%glint_lib_directory],None)
if err:
print "Unable to git clone glint-horizon "
return False
print "git clone glint-horizon result %s"%out
return True
def download_glint():
print "download glint using git clone"
[out,err] = execute_command(['git','clone','%s'%glint_git_repo,'%s/glint'%glint_lib_directory],None)
if err:
print "Unable to git clone glint"
return False
print "git clone glint result %s"%out
return True
def install_horizon():
print "Install glint-horizon"
print "Install library pre-reqs"
if plat.isRedhat():
[out,err] = execute_command(['yum','install','libxml2-devel'],'y')
print out
[out,err] = execute_command(['yum','install','libxslt-devel'],'y')
print out
[out,err] = execute_command(['yum','install','gcc'],'y')
print out
[out,err] = execute_command(['yum','install','git-core'],'y')
print out
[out,err] = execute_command(['yum','install','python-virtualenv'],'y')
print out
[out,err] = execute_command(['yum','install','python-devel'],'y')
print out
[out,err] = execute_command(['yum','install','openssl-devel'],'y')
print out
[out,err] = execute_command(['yum','install','libffi-devel'],'y')
print out
else:
[out,err] = execute_command(['apt-get','install','libxml2-dev'],'y')
print out
[out,err] = execute_command(['apt-get','install','libxslt-dev'],'y')
print out
[out,err] = execute_command(['apt-get','install','gcc'],'y')
print out
[out,err] = execute_command(['apt-get','install','git-core'],'y')
print out
[out,err] = execute_command(['apt-get','install','python-virtualenv'],'y')
print out
[out,err] = execute_command(['apt-get','install','python-dev'],'y')
print out
[out,err] = execute_command(['apt-get','install','libssl-dev'],'y')
print out
[out,err] = execute_command(['apt-get','install','libffi-dev'],'y')
print out
if horizon_inst_type == 'default':
print "Install Horizon using default (virtualenv in /var/lib/glint/horizon/.venv)"
[out,err] = execute_command(['python','/var/lib/glint/horizon/tools/install_venv.py'],None)
[out,err] = execute_command(['chown','-R','glint','/var/lib/glint/horizon'],None)
[out,err] = execute_command(['chgrp','-R','glint','/var/lib/glint/horizon'],None)
elif horizon_inst_type == 'replace':
print "Currently Unsupported: Remove openstack-horizon and replace with glint-horizon"
elif horizon_inst_type == 'contextualize':
print "Currently Unsupported: Insert or Replace parts of the openstack-horizon installation"
else:
print "Unrecognized installation type for glint - %s - error exiting"%horizon_inst_type
return
print "IP:Open Port used for glint-horizon ... port 8080, restart networking"
print "mkdir /var/run/glint and change permissions"
[out,err] = execute_command(['mkdir','/var/run/glint'],None)
[out,err] = execute_command(['chown','glint','/var/run/glint'],None)
[out,err] = execute_command(['chgrp','glint','/var/run/glint'],None)
if glint_horizon_server == 'django':
print "Setup /usr/bin/glint-horizon as main system start application (reads cfg file for gl-hor location)"
#copy glint-horizon from /var/lib/glint/horizon to /usr/bin/glint-horizon
[out,err] = execu | te_command(['cp','%s/gl | int-horizon'%pkg_dir,'/usr/bin/.'],None)
[out,err] = execute_command(['chmod','755','/usr/bin/glint-horizon'],None)
print "Setup /etc/init.d/glint-horizon as a service"
[out,err] = execute_command(['cp','%s/openstack-glint-horizon'%pkg_dir,'/etc/init.d/.'],None)
[out,err] = execute_command(['chmod','755','/etc/init.d/openstack-glint-horizon'],None)
elif glint_horizon_server == 'apache':
print "Currently Unsupprted: Register glint-horizon with local apache this is used by /user/bin/glint-horizon to start stop the apache app"
print "Currently Unsupported: Setup /usr/bin/glint-horizon as main system start application (reads cfg file for gl-hor location)"
print "Currently Unsupported: Setup /etc/init.d/glint-horizon as a service"
def install_glint():
print "Install glint"
if glint_inst_type == 'default':
print "Leave glint in /var/lib/glint/glint, but change own and group to glint"
[out,err] = execute_command(['chown','-R','glint','/var/lib/glint/glint'],None)
[out,err] = execute_command(['chgrp','-R','glint','/var/lib/glint/glint'],None)
elif glint_inst_type == 'local':
print "Currently Unsupported: Install glint into sites-packages - use setup.py"
else:
print "Unrecognized installation type for glint - %s - error exiting"%glint_inst_type
return
print "IP:Open Glint Port 9494 and restart networking"
print "mkdir /var/run/glint and change permissions"
[out,err] = execute_command(['mkdir','/var/log/glint-service'],None)
[out,err] = execute_command(['chown','glint','/var/log/glint-service'],None)
[out,err] = execute_command(['chgrp','glint','/var/log/glint-service'],None)
print "copy glint service yaml conf file"
[out,err] = execute_command(['cp','%s/glint_services.yaml'%cfg_dir,'/var/lib/glint/glint/.'],None)
[out,err] = execute_command(['chown','glint:glint','/var/lib/glint/glint/glint_services.yaml'],None)
if glint_server == 'django':
print "Setup /usr/bin/glint as main start of glint server from installed (either /var/lib or site-packeges) using django test server"
[out,err] = execute_command(['cp','%s/glint'%pkg_dir,'/usr/bin/.'],None)
[out,err] = execute_command(['chmod','755','/usr/bin/glint'],None)
print "Setup /etc/init.d/glint as a service "
[out,err] = execute_command(['cp','%s/openstack-glint'%pkg_dir,'/etc/init.d/.'],None)
[out,err] = execute_command(['chmod','755','/etc/init.d/ |
johnwheeler/flask-ask | tests/test_audio.py | Python | apache-2.0 | 2,585 | 0.003095 | import unittest
from mock import patch, MagicMock
from flask import Flask
from flask_ask import Ask, audio
from flask_ask.models import _Field
class AudioUnitTests(unittest.TestCase):
def setUp(self):
self.ask_patcher = patch('flask_ask.core.find_ask', return_value=Ask())
self.ask_patcher.start()
self.context_patcher = patch('flask_ask.models.context', return_value=MagicMock())
self.context_patcher.start()
def tearDown(self):
self.ask_patcher.stop()
self.context_patcher.stop()
def test_token_generation(self):
""" Confirm we get a new token when setting a stream url """
audio_item = audio()._audio_item(stream_url='https://fakestream', offset=123)
self.assertEqual(36, len | (audio_item['stream']['token']))
self.assertEqual(123, audio_item['stream']['offsetInMilliseconds'])
def test_custom_token(self):
""" Check to see t | hat the provided opaque token remains constant"""
token = "hello_world"
audio_item = audio()._audio_item(stream_url='https://fakestream', offset=10, opaque_token=token)
self.assertEqual(token, audio_item['stream']['token'])
self.assertEqual(10, audio_item['stream']['offsetInMilliseconds'])
class AskStreamHandlingTests(unittest.TestCase):
def setUp(self):
fake_context = {'System': {'user': {'userId': 'dave'}}}
self.context_patcher = patch.object(Ask, 'context', return_value=fake_context)
self.context_patcher.start()
self.request_patcher = patch.object(Ask, 'request', return_value=MagicMock())
self.request_patcher.start()
def tearDown(self):
self.context_patcher.stop()
self.request_patcher.stop()
def test_setting_and_getting_current_stream(self):
ask = Ask()
with patch('flask_ask.core.find_ask', return_value=ask):
self.assertEqual(_Field(), ask.current_stream)
stream = _Field()
stream.__dict__.update({'token': 'asdf', 'offsetInMilliseconds': 123, 'url': 'junk'})
with patch('flask_ask.core.top_stream', return_value=stream):
self.assertEqual(stream, ask.current_stream)
def test_from_directive_call(self):
ask = Ask()
fake_stream = _Field()
fake_stream.__dict__.update({'token':'fake'})
with patch('flask_ask.core.top_stream', return_value=fake_stream):
from_buffer = ask._from_directive()
self.assertEqual(fake_stream, from_buffer)
if __name__ == '__main__':
unittest.main()
|
georgidimov/cannon-wars | tests/test_position.py | Python | gpl-2.0 | 1,788 | 0 | import unittest
from src.position import Position
class TestPosition(unittest.TestCase):
def test_getters(self):
position = Position(1, 2)
x = position.get_horizontal_position()
y = position.get_vertical_position()
self.assertEqual(x, 1)
se | lf.assertEqual(y, 2)
def test_setters(self):
position = Position()
position.set_horizontal_position(21)
position.set_vertical_position(42)
x = position.get_horizontal_position()
y = position.get_ve | rtical_position()
self.assertEqual(x, 21)
self.assertEqual(y, 42)
def test_defualt_values(self):
position = Position()
x = position.get_horizontal_position()
y = position.get_vertical_position()
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_to_tuple(self):
position = Position(23, 47)
self.assertEqual(position.to_tuple(), (23, 47))
def test_serialization(self):
position = Position(5, 25)
self.assertEqual(position.serialize(), '(5, 25)')
def test_deserialization(self):
serialized = '(5, 25)'
position = Position(2, 22)
position.deserialize(serialized)
x = position.get_horizontal_position()
y = position.get_vertical_position()
self.assertEqual(x, 5)
self.assertEqual(y, 25)
def test_updating_values(self):
position = Position(2, 22)
position.set_horizontal_position(0)
position.set_vertical_position(1)
position.deserialize('(7, 8)')
x = position.get_horizontal_position()
y = position.get_vertical_position()
self.assertEqual(x, 7)
self.assertEqual(y, 8)
if __name__ == '__main__':
unittest.main()
|
timpalpant/calibre | src/calibre/ebooks/pdf/render/from_html.py | Python | gpl-3.0 | 16,407 | 0.004815 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import json, os
from future_builtins import map
from math import floor
from collections import defaultdict
from PyQt5.Qt import (
QObject, QPainter, Qt, QSize, QTimer, pyqtProperty, QEventLoop, QPixmap, QRect, pyqtSlot)
from PyQt5.QtWebKit im | port QWebSettings
from PyQt5.QtWebKitWidgets import QWebView, QWebPage
from calibre import fit_image
from calibre.constants import iswindows
from calibre.ebooks.oeb.display.webview import load_html
from calibre.ebooks.pdf.re | nder.common import (inch, cm, mm, pica, cicero,
didot, PAPER_SIZES)
from calibre.ebooks.pdf.render.engine import PdfDevice
from calibre.ptempfile import PersistentTemporaryFile
def get_page_size(opts, for_comic=False): # {{{
use_profile = not (opts.override_profile_size or
opts.output_profile.short_name == 'default' or
opts.output_profile.width > 9999)
if use_profile:
w = (opts.output_profile.comic_screen_size[0] if for_comic else
opts.output_profile.width)
h = (opts.output_profile.comic_screen_size[1] if for_comic else
opts.output_profile.height)
dpi = opts.output_profile.dpi
factor = 72.0 / dpi
page_size = (factor * w, factor * h)
else:
page_size = None
if opts.custom_size is not None:
width, sep, height = opts.custom_size.partition('x')
if height:
try:
width = float(width.replace(',', '.'))
height = float(height.replace(',', '.'))
except:
pass
else:
if opts.unit == 'devicepixel':
factor = 72.0 / opts.output_profile.dpi
else:
factor = {'point':1.0, 'inch':inch, 'cicero':cicero,
'didot':didot, 'pica':pica, 'millimeter':mm,
'centimeter':cm}[opts.unit]
page_size = (factor*width, factor*height)
if page_size is None:
page_size = PAPER_SIZES[opts.paper_size]
return page_size
# }}}
class Page(QWebPage): # {{{
def __init__(self, opts, log):
self.log = log
QWebPage.__init__(self)
settings = self.settings()
settings.setFontSize(QWebSettings.DefaultFontSize,
opts.pdf_default_font_size)
settings.setFontSize(QWebSettings.DefaultFixedFontSize,
opts.pdf_mono_font_size)
settings.setFontSize(QWebSettings.MinimumLogicalFontSize, 8)
settings.setFontSize(QWebSettings.MinimumFontSize, 8)
std = {'serif':opts.pdf_serif_family, 'sans':opts.pdf_sans_family,
'mono':opts.pdf_mono_family}.get(opts.pdf_standard_font,
opts.pdf_serif_family)
if std:
settings.setFontFamily(QWebSettings.StandardFont, std)
if opts.pdf_serif_family:
settings.setFontFamily(QWebSettings.SerifFont, opts.pdf_serif_family)
if opts.pdf_sans_family:
settings.setFontFamily(QWebSettings.SansSerifFont,
opts.pdf_sans_family)
if opts.pdf_mono_family:
settings.setFontFamily(QWebSettings.FixedFont, opts.pdf_mono_family)
self.longjs_counter = 0
def javaScriptConsoleMessage(self, msg, lineno, msgid):
self.log.debug(u'JS:', unicode(msg))
def javaScriptAlert(self, frame, msg):
self.log(unicode(msg))
@pyqtSlot(result=bool)
def shouldInterruptJavaScript(self):
if self.longjs_counter < 10:
self.log('Long running javascript, letting it proceed')
self.longjs_counter += 1
return False
self.log.warn('Long running javascript, aborting it')
return True
# }}}
def draw_image_page(page_rect, painter, p, preserve_aspect_ratio=True):
if preserve_aspect_ratio:
aspect_ratio = float(p.width())/p.height()
nw, nh = page_rect.width(), page_rect.height()
if aspect_ratio > 1:
nh = int(page_rect.width()/aspect_ratio)
else: # Width is smaller than height
nw = page_rect.height()*aspect_ratio
__, nnw, nnh = fit_image(nw, nh, page_rect.width(),
page_rect.height())
dx = int((page_rect.width() - nnw)/2.)
dy = int((page_rect.height() - nnh)/2.)
page_rect.translate(dx, dy)
page_rect.setHeight(nnh)
page_rect.setWidth(nnw)
painter.drawPixmap(page_rect, p, p.rect())
class PDFWriter(QObject):
def _pass_json_value_getter(self):
val = json.dumps(self.bridge_value)
return val
def _pass_json_value_setter(self, value):
self.bridge_value = json.loads(unicode(value))
_pass_json_value = pyqtProperty(str, fget=_pass_json_value_getter,
fset=_pass_json_value_setter)
@pyqtSlot(result=unicode)
def title(self):
return self.doc_title
@pyqtSlot(result=unicode)
def author(self):
return self.doc_author
@pyqtSlot(result=unicode)
def section(self):
return self.current_section
def __init__(self, opts, log, cover_data=None, toc=None):
from calibre.gui2 import must_use_qt
must_use_qt()
QObject.__init__(self)
self.logger = self.log = log
self.opts = opts
self.cover_data = cover_data
self.paged_js = None
self.toc = toc
self.loop = QEventLoop()
self.view = QWebView()
self.page = Page(opts, self.log)
self.view.setPage(self.page)
self.view.setRenderHints(QPainter.Antialiasing|
QPainter.TextAntialiasing|QPainter.SmoothPixmapTransform)
self.view.loadFinished.connect(self.render_html,
type=Qt.QueuedConnection)
for x in (Qt.Horizontal, Qt.Vertical):
self.view.page().mainFrame().setScrollBarPolicy(x,
Qt.ScrollBarAlwaysOff)
self.report_progress = lambda x, y: x
self.current_section = ''
def dump(self, items, out_stream, pdf_metadata):
opts = self.opts
page_size = get_page_size(self.opts)
xdpi, ydpi = self.view.logicalDpiX(), self.view.logicalDpiY()
# We cannot set the side margins in the webview as there is no right
# margin for the last page (the margins are implemented with
# -webkit-column-gap)
ml, mr = opts.margin_left, opts.margin_right
self.doc = PdfDevice(out_stream, page_size=page_size, left_margin=ml,
top_margin=0, right_margin=mr, bottom_margin=0,
xdpi=xdpi, ydpi=ydpi, errors=self.log.error,
debug=self.log.debug, compress=not
opts.uncompressed_pdf, opts=opts,
mark_links=opts.pdf_mark_links)
self.footer = opts.pdf_footer_template
if self.footer:
self.footer = self.footer.strip()
if not self.footer and opts.pdf_page_numbers:
self.footer = '<p style="text-align:center; text-indent: 0">_PAGENUM_</p>'
self.header = opts.pdf_header_template
if self.header:
self.header = self.header.strip()
min_margin = 1.5 * opts._final_base_font_size
if self.footer and opts.margin_bottom < min_margin:
self.log.warn('Bottom margin is too small for footer, increasing it to %.1fpts' % min_margin)
opts.margin_bottom = min_margin
if self.header and opts.margin_top < min_margin:
self.log.warn('Top margin is too small for header, increasing it to %.1fpts' % min_margin)
opts.margin_top = min_margin
self.page.setViewportSize(QSize(self.doc.width(), self.doc.height |
rohitranjan1991/home-assistant | homeassistant/components/freebox/const.py | Python | mit | 2,269 | 0.000881 | """Freebox component constants."""
from __future__ import annotations
import socket
from homeassistant.components.sensor import SensorEntityDescription
from homeassistant.const import DATA_RATE_KILOBYTES_PER_SECOND, PERCENTAGE, Platform
DOMAIN = "freebox"
SERVICE_REBOOT = "reboot"
APP_DESC = {
"app_id": "has | s",
"app_name": "Home Assistant",
"app_version": "0.106",
"device_name": s | ocket.gethostname(),
}
API_VERSION = "v6"
PLATFORMS = [Platform.BUTTON, Platform.DEVICE_TRACKER, Platform.SENSOR, Platform.SWITCH]
DEFAULT_DEVICE_NAME = "Unknown device"
# to store the cookie
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONNECTION_SENSORS: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="rate_down",
name="Freebox download speed",
native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:download-network",
),
SensorEntityDescription(
key="rate_up",
name="Freebox upload speed",
native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND,
icon="mdi:upload-network",
),
)
CONNECTION_SENSORS_KEYS: list[str] = [desc.key for desc in CONNECTION_SENSORS]
CALL_SENSORS: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="missed",
name="Freebox missed calls",
icon="mdi:phone-missed",
),
)
DISK_PARTITION_SENSORS: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="partition_free_space",
name="free space",
native_unit_of_measurement=PERCENTAGE,
icon="mdi:harddisk",
),
)
# Icons
DEVICE_ICONS = {
"freebox_delta": "mdi:television-guide",
"freebox_hd": "mdi:television-guide",
"freebox_mini": "mdi:television-guide",
"freebox_player": "mdi:television-guide",
"ip_camera": "mdi:cctv",
"ip_phone": "mdi:phone-voip",
"laptop": "mdi:laptop",
"multimedia_device": "mdi:play-network",
"nas": "mdi:nas",
"networking_device": "mdi:network",
"printer": "mdi:printer",
"router": "mdi:router-wireless",
"smartphone": "mdi:cellphone",
"tablet": "mdi:tablet",
"television": "mdi:television",
"vg_console": "mdi:gamepad-variant",
"workstation": "mdi:desktop-tower-monitor",
}
|
UCSUR-Pitt/wprdc-etl | test/unit/test_schema.py | Python | mit | 1,363 | 0.001467 | from operator import itemgetter
from unittest import TestCase
import pipeline as pl
from marshmallow import fields
class FakeSchema(pl.BaseSchema):
str = fields.String()
int = fields.Integer()
num = fields.Number()
datetime = fields.D | ateTime()
date = fields.Date(dump_to='a_different_name')
not_there = fields.String(load_only=True)
class TestSchema(TestCase):
def test_ckan_serialization(self):
fields = FakeSchema().serialize_to_ckan_fields()
self.assertListEqual(
sorted(fields, key=itemgetter('id')),
[
{'id': 'a_different_name', 'type': 'date'},
| {'id': 'datetime', 'type': 'timestamp'},
{'id': 'int', 'type': 'numeric'},
{'id': 'num', 'type': 'numeric'},
{'id': 'str', 'type': 'text'}
]
)
def test_ckan_serialization_caps(self):
fields = FakeSchema().serialize_to_ckan_fields(capitalize=True)
self.assertListEqual(
sorted(fields, key=itemgetter('id')),
[
{'id': 'A_DIFFERENT_NAME', 'type': 'date'},
{'id': 'DATETIME', 'type': 'timestamp'},
{'id': 'INT', 'type': 'numeric'},
{'id': 'NUM', 'type': 'numeric'},
{'id': 'STR', 'type': 'text'}
]
)
|
philipp-sumo/kitsune | kitsune/wiki/forms.py | Python | bsd-3-clause | 12,054 | 0.000083 | import re
from django import forms
from django.conf import settings
from django.template.defaultfilters import slugify
from tower import ugettext_lazy as _lazy
from kitsune.products.models import Product, Topic
from kitsune.sumo.form_fields import MultiUsernameField, StrippedCharField
from kitsune.wiki.config import SIGNIFICANCES, CATEGORIES
from kitsune.wiki.models import (
Document, Revision, MAX_REVISION_COMMENT_LENGTH)
from kitsune.wiki.tasks import add_short_links
from kitsune.wiki.widgets import (
RadioFieldRendererWithHelpText, ProductTopicsAndSubtopicsWidget)
TITLE_REQUIRED = _lazy(u'Please provide a title.')
TITLE_SHORT = _lazy(u'The title is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
TITLE_LONG = _lazy(u'Please keep the length of the title to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
SLUG_REQUIRED = _lazy(u'Please provide a slug.')
SLUG_INVALID = _lazy(u'The slug provided is not valid.')
SLUG_SHORT = _lazy(u'The slug is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SLUG_LONG = _lazy(u'Please keep the length of the slug to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
SUMMARY_REQUIRED = _lazy(u'Please provide a summary.')
SUMMARY_SHORT = _lazy(u'The summary is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SUMMARY_LONG = _lazy(u'Please keep the length of the summary to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
CONTENT_REQUIRED = _lazy(u'Please provide content.')
CONTENT_SHORT = _lazy(u'The content is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
CONTENT_LONG = _lazy(u'Please keep the length of the content to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
COMMENT_LONG = _lazy(u'Please keep the length of the comment to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
PRODUCT_REQUIRED = _lazy(u'Please select at least one product.')
TOPIC_REQUIRED = _lazy(u'Please select at least one topic.')
class DocumentForm(forms.ModelForm):
"""Form to create/edit a document."""
def __init__(self, *args, **kwargs):
# Quasi-kwargs:
can_archive = kwargs.pop('can_archive', False)
can_edit_needs_change = kwargs.pop('can_edit_needs_change', False)
initial_title = kwargs.pop('initial_title', '')
super(DocumentForm, self).__init__(*args, **kwargs)
title_field = self.fields['title']
title_field.initial = initial_title
slug_field = self.fields['slug']
slug_field.initial = slugify(initial_title)
topics_field = self.fields['topics']
topics_field.choices = Topic.objects.values_list('id', 'title')
products_field = self.fields['products']
products_field.choices = Product.objects.values_list('id', 'title')
# If user hasn't permission to frob is_archived, remove the field. This
# causes save() to skip it as well.
if not can_archive:
del self.fields['is_archived']
# If user hasn't permission to mess with needs_change*, remove the
# fields. This causes save() to skip it as well.
if not can_edit_needs_change:
del self.fields['needs_change']
del self.fields['needs_change_comment']
title = StrippedCharField(
min_length=5, max_length=255,
widget=forms.TextInput(),
label=_lazy(u'Title:'),
help_text=_lazy(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG})
# We don't use forms.SlugField because it is too strict in
# what it allows (English/Roman alpha-numeric characters and dashes).
# Instead, we do custom validation in `clean_slug` below.
slug = StrippedCharField(
min_length=3, max_length=255,
widget=forms.TextInput(),
label=_lazy(u'Slug:'),
help_text=_lazy(u'Article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
products = forms.MultipleChoiceField(
label=_lazy(u'Relevant to:'),
required=False,
widget=forms.CheckboxSelectMultiple())
is_localizable = forms.BooleanField(
initial=True,
label=_lazy(u'Allow translations:'),
required=False)
is_archived = forms.BooleanField(
label=_lazy(u'Obsolete:'),
required=False)
allow_discussion = forms.BooleanField(
label=_lazy(u'Allow discussion on this articl | e?'),
initial=True,
requir | ed=False)
category = forms.ChoiceField(
choices=CATEGORIES,
# Required for non-translations, which is
# enforced in Document.clean().
required=False,
label=_lazy(u'Category:'),
help_text=_lazy(u'Type of article'))
topics = forms.MultipleChoiceField(
label=_lazy(u'Topics:'),
required=False,
widget=ProductTopicsAndSubtopicsWidget())
locale = forms.CharField(widget=forms.HiddenInput())
needs_change = forms.BooleanField(
label=_lazy(u'Needs change:'),
initial=False,
required=False)
needs_change_comment = forms.CharField(
label=_lazy(u'Comment:'),
widget=forms.Textarea(),
required=False)
def clean_slug(self):
slug = self.cleaned_data['slug']
# Blacklist /, ?, % and +,
if not re.compile(r'^[^/^\+^\?%]+$').match(slug):
raise forms.ValidationError(SLUG_INVALID)
return slug
def clean(self):
c = super(DocumentForm, self).clean()
locale = c.get('locale')
# Products are required for en-US
products = c.get('products')
if (locale == settings.WIKI_DEFAULT_LANGUAGE and
(not products or len(products) < 1)):
raise forms.ValidationError(PRODUCT_REQUIRED)
# Topics are required for en-US
topics = c.get('topics')
if (locale == settings.WIKI_DEFAULT_LANGUAGE and
(not topics or len(topics) < 1)):
raise forms.ValidationError(TOPIC_REQUIRED)
return c
class Meta:
model = Document
fields = ('title', 'slug', 'category', 'is_localizable', 'products',
'topics', 'locale', 'is_archived', 'allow_discussion',
'needs_change', 'needs_change_comment')
def save(self, parent_doc, **kwargs):
"""Persist the Document form, and return the saved Document."""
doc = super(DocumentForm, self).save(commit=False, **kwargs)
doc.parent = parent_doc
# If document doesn't need change, clear out the comment.
if not doc.needs_change:
doc.needs_change_comment = ''
# Create the share link if it doesn't exist and is in
# a category it should show for.
doc.save()
if (doc.category in settings.IA_DEFAULT_CATEGORIES
and not doc.share_link):
# This operates under the constraints of passing in a list.
add_short_links.delay([doc.pk])
self.save_m2m()
if parent_doc:
# Products are not set on translations.
doc.products.remove(*[p for p in doc.products.all()])
return doc
class RevisionForm(forms.ModelForm):
"""Form to create new revisions."""
keywords = StrippedCharField(required=False,
label=_lazy(u'Keywords:'),
help_text=_lazy(u'Affects search results'))
su |
jose36/plugin.video.live.ProyectoLuzDigital- | servers/servertools.py | Python | gpl-2.0 | 17,227 | 0.029961 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Utilidades para detectar vídeos de los diferentes conectores
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
#LvX Edited Patched
import re,sys
from core import scrapertools
from core import config
from core import logger
# Listas de servidores empleadas a la hora de reproducir para explicarle al usuario por qué no puede ver un vídeo
# Lista de los servidores que se pueden ver sin cuenta premium de ningún tipo
FREE_SERVERS = []
FREE_SERVERS.extend(['directo','allmyvideos','adnstream','bliptv','divxstage','facebook','fourshared', 'hulkshare', 'twitvid'])
FREE_SERVERS.extend(['googlevideo','gigabyteupload','mediafire','moevideos','movshare','novamov']) #,'putlocker'
FREE_SERVERS.extend(['royalvids','sockshare','stagevu','tutv','userporn','veoh','videobam'])
FREE_SERVERS.extend(['vidbux','videoweed','vimeo','vk','watchfreeinhd','youtube'])#,'videobeer','nowdownload'
FREE_SERVERS.extend(['jumbofiles','nowvideo','streamcloud', 'zinwa', 'dailymotion','justintv', 'vidbull'])
FREE_SERVERS.extend(['vureel','nosvideo','videopremium','movreel','flashx','upafile'])
FREE_SERVERS.extend(['fileflyer','playedto','tunepk','powvideo','videomega','mega','vidspot','netutv','rutube'])
FREE_SERVERS.extend(['videozed','documentary','hugefiles', 'firedrive','videott','tumitv','gamovideo'])
FREE_SERVERS.extend(['torrent','video4you','mailru','streaminto','backin','akstream', 'speedvideo', 'junkyvideo', 'rapidvideo'])
# Lista de TODOS los servidores que funcionan con cuenta premium individual
PREMIUM_SERVERS = ['uploadedto','nowvideo']
# Lista de TODOS los servidores soportados por Filenium
FILENIUM_SERVERS = []
FILENIUM_SERVERS.extend(['linkto','uploadedto','gigasize','youtube','filepost','hotfile','rapidshare','turbobit','mediafire','bitshare','depositfiles'])
FILENIUM_SERVERS.extend(['oron','allmyvideos','novamov','videoweed','movshare','letitbit','shareonline','shareflare','rapidgator'])
FILENIUM_SERVERS.extend(['filefactory','netload','nowdownload','filevelocity','freakshare','userporn','divxstage','putlocker','extabit','vidxden'])
FILENIUM_SERVERS.extend(['vimeo','dailymotion','jumbofiles','zippyshare','glumbouploads','bayfiles','twoshared', 'fourshared','crocko','fiberupload'])
FILENIUM_SERVERS.extend(['ifile','megashares','slingfile','uploading','vipfile','filenium','movreel','one80upload','flashx','nowvideo','vk','moevideos'])
FILENIUM_SERVERS.extend(['cloudzer','filecloudio','luckyshare','lumfile','playedto','ryushare','streamcloud','videozed','xenubox','filesmonster'])
#wupload,fileserve
# Lista de TODOS los servidores soportados por Real-Debrid
REALDEBRID_SERVERS = ['one80upload','tenupload','onefichier','onehostclick','twoshared','fourfastfile','fourshared','abc','asfile','badongo','bayfiles','bitshare','cbscom','cloudzer','cramit','crocko','cwtv','dailymotion','dateito',
'dengee','diglo','extabit','fiberupload','filedino','filefactory','fileflyer','filekeen','filemade','filemates','fileover','filepost',
'filesend','filesmonster','filevelocity','freakshare','free','furk','fyels','gigasize','gigaup','glumbouploads','goldfile','hitfile','hipfile','hostingbulk',
'hotfile','hulkshare','hulu','ifile','jakfile','jumbofiles','justintv','letitbit','loadto','mediafire','mega','megashare','megashares','mixturevideo','muchshare','netload',
'novafile','nowdownload','purevid','putbit','putlocker','redtube','rapidgator','rapidshare','rutube','ryushare','scribd','sendspace','sharebees','shareflare','shragle','slingfile','sockshare',
'soundcloud','speedyshare','turbobit','unibytes','uploadc','uploadedto','uploading','uploadspace','uptobox',
'userporn','veevr','vidbux','vidhog','vidxden','vimeo','vipfile','wattv','xfileshare','youporn','youtube','yunfile','zippyshare','justintv','nowvideo','ultramegabit','filesmonster','oboom']
#wupload,fileserve
ALLDEBRID_SERVERS = ['one80upload','onefichier','twoshared','fourfastfile','fourshared','albafile','bayfiles','bitshare','cloudzer','cramit','crocko','cyberlocker','dailymotion','dengee',
'depfile','dlfree','extabit','extmatrix','filefactory','fileflyer','filegag','filehost','fileover','filepost','filerio','filesabc',
'filesend','filesmonster','filestay','freakshare','gigasize','hotfile','hulkshare','jumbofiles','letitbit','loadto','mediafire','megashares','mixturevideo','netload',
'nitrobits','oteupload','purevid','putlocker','rapidgator','rapidshare','redtube','scribd','secureupload','sharebees','shareflare','slingfile','sockshare',
'soundcloud','speedload','speedyshare','turbobit', 'uloadto', 'uploadc','uploadedto','uploading','uptobox',
'userporn','vimeo','vipfile','youporn','youtube','yunfile','zippyshare','lumfile','ultramegabit','filesmonster']
# Lista completa de todos los servidores soportados por pelisalacarta, usada para buscar patrones
ALL_SERVERS = list( set(FREE_SERVERS) | set(FILENIUM_SERVERS) | set(REALDEBRID_SERVERS) | set(ALLDEBRID_SERVERS) )
ALL_SERVERS.sort()
# Función genérica para encontrar vídeos en una página
def find_video_items(item=None, data=None, channel=""):
logger.info("[launcher.py] findvideos")
# Descarga la página
if data is None:
from core import scrapertools
data = scrapertools.cache_page(item.url)
#logger.info(data)
# Busca los enlaces a los videos
from core.item import Item
from servers import servertools
listavideos = servertools.findvideos(data)
if item is None:
item = Item()
itemlist = []
for video in listavideos:
scrapedtitle = item.title.strip() + " - " + video[0].strip()
scrapedurl = video[1]
server = video[2]
itemlist.append( Item(channel=item.channel, title=scrapedtitle , action="play" , server=server, page=item.page, url=scrapedurl, thumbnail=item.thumbnail, show=item.show , plot=item.plot , folder=False) )
return itemlist
def findvideosbyserver(data, serverid):
logger.info("[servertools.py] findvideos")
encontrados = set()
devuelve = []
try:
exec "from servers import "+serverid
exec "devuelve.extend("+serverid+".find_videos(data))"
except ImportError:
logger.info("No existe conector para "+serverid)
except:
logger.info("Error en el conector "+serverid)
import traceback,sys
from pprint import pprint
exc_type, exc_value, exc_tb = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
for line in lines:
line_splits = line.split("\n")
for line_split in line_splits:
logger.error(line_split)
return devuelve
def findvideos(data):
logger.info("[servertools.py] findvideos")
encontrados = set()
devuelve = []
# Ejecuta el findvideos en cada servidor
for serverid in ALL_SERVERS:
try:
# Sustituye el código por otro "Plex compatible"
#exec "from servers import "+serverid
#exec "devuelve.extend("+serverid+".find_videos(data))"
servers_module = __import__("serv | ers."+serverid)
server_module = getattr(servers_module,serverid)
devuelve.extend( server_module.find_videos(data) )
except ImportError:
| logger.info("No existe conector para "+serverid)
except:
logger.info("Error en el conector "+serverid)
import traceback,sys
from pprint import pprint
exc_type, exc_value, exc_tb = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
for line in lines:
line_splits = line.split("\n")
for line_split in line_splits:
logger.error(line_split)
return devuelve
def get_video_urls(server,url):
'''
servers_module = __import |
HMSBeagle1831/rapidscience | rlp/newsfeed/urls.py | Python | mit | 133 | 0 | from django.conf.urls import | url
from . import views
urlpatterns = [
url(r'^$', views.newsitem_list, name= | 'newsitem_list'),
]
|
blueboxgroup/keystone | keystone/controllers.py | Python | apache-2.0 | 6,964 | 0 | # Copyright 2012 OpenStack Foundation
#
# Lice | nsed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an | "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.serialization import jsonutils
import webob
from keystone.common import extension
from keystone.common import json_home
from keystone.common import wsgi
from keystone import exception
from keystone.openstack.common import log
LOG = log.getLogger(__name__)
MEDIA_TYPE_JSON = 'application/vnd.openstack.identity-%s+json'
_VERSIONS = []
# NOTE(blk-u): latest_app will be set by keystone.service.loadapp(). It gets
# set to the application that was just loaded. In the case of keystone-all,
# loadapp() gets called twice, once for the public app and once for the admin
# app. In the case of httpd/keystone, loadapp() gets called once for the public
# app if this is the public instance or loadapp() gets called for the admin app
# if it's the admin instance.
# This is used to fetch the /v3 JSON Home response. The /v3 JSON Home response
# is the same whether it's the admin or public service so either admin or
# public works.
latest_app = None
def request_v3_json_home(new_prefix):
if 'v3' not in _VERSIONS:
# No V3 support, so return an empty JSON Home document.
return {'resources': {}}
req = webob.Request.blank(
'/v3', headers={'Accept': 'application/json-home'})
v3_json_home_str = req.get_response(latest_app).body
v3_json_home = jsonutils.loads(v3_json_home_str)
json_home.translate_urls(v3_json_home, new_prefix)
return v3_json_home
class Extensions(wsgi.Application):
"""Base extensions controller to be extended by public and admin API's."""
# extend in subclass to specify the set of extensions
@property
def extensions(self):
return None
def get_extensions_info(self, context):
return {'extensions': {'values': self.extensions.values()}}
def get_extension_info(self, context, extension_alias):
try:
return {'extension': self.extensions[extension_alias]}
except KeyError:
raise exception.NotFound(target=extension_alias)
class AdminExtensions(Extensions):
@property
def extensions(self):
return extension.ADMIN_EXTENSIONS
class PublicExtensions(Extensions):
@property
def extensions(self):
return extension.PUBLIC_EXTENSIONS
def register_version(version):
_VERSIONS.append(version)
class MimeTypes(object):
JSON = 'application/json'
JSON_HOME = 'application/json-home'
def v3_mime_type_best_match(context):
# accept_header is a WebOb MIMEAccept object so supports best_match.
accept_header = context['accept_header']
if not accept_header:
return MimeTypes.JSON
SUPPORTED_TYPES = [MimeTypes.JSON, MimeTypes.JSON_HOME]
return accept_header.best_match(SUPPORTED_TYPES)
class Version(wsgi.Application):
def __init__(self, version_type, routers=None):
self.endpoint_url_type = version_type
self._routers = routers
super(Version, self).__init__()
def _get_identity_url(self, context, version):
"""Returns a URL to keystone's own endpoint."""
url = self.base_url(context, self.endpoint_url_type)
return '%s/%s/' % (url, version)
def _get_versions_list(self, context):
"""The list of versions is dependent on the context."""
versions = {}
if 'v2.0' in _VERSIONS:
versions['v2.0'] = {
'id': 'v2.0',
'status': 'stable',
'updated': '2014-04-17T00:00:00Z',
'links': [
{
'rel': 'self',
'href': self._get_identity_url(context, 'v2.0'),
}, {
'rel': 'describedby',
'type': 'text/html',
'href': 'http://docs.openstack.org/'
}
],
'media-types': [
{
'base': 'application/json',
'type': MEDIA_TYPE_JSON % 'v2.0'
}
]
}
if 'v3' in _VERSIONS:
versions['v3'] = {
'id': 'v3.0',
'status': 'stable',
'updated': '2013-03-06T00:00:00Z',
'links': [
{
'rel': 'self',
'href': self._get_identity_url(context, 'v3'),
}
],
'media-types': [
{
'base': 'application/json',
'type': MEDIA_TYPE_JSON % 'v3'
}
]
}
return versions
def get_versions(self, context):
req_mime_type = v3_mime_type_best_match(context)
if req_mime_type == MimeTypes.JSON_HOME:
v3_json_home = request_v3_json_home('/v3')
return wsgi.render_response(
body=v3_json_home,
headers=(('Content-Type', MimeTypes.JSON_HOME),))
versions = self._get_versions_list(context)
return wsgi.render_response(status=(300, 'Multiple Choices'), body={
'versions': {
'values': versions.values()
}
})
def get_version_v2(self, context):
versions = self._get_versions_list(context)
if 'v2.0' in _VERSIONS:
return wsgi.render_response(body={
'version': versions['v2.0']
})
else:
raise exception.VersionNotFound(version='v2.0')
def _get_json_home_v3(self):
def all_resources():
for router in self._routers:
for resource in router.v3_resources:
yield resource
return {
'resources': dict(all_resources())
}
def get_version_v3(self, context):
versions = self._get_versions_list(context)
if 'v3' in _VERSIONS:
req_mime_type = v3_mime_type_best_match(context)
if req_mime_type == MimeTypes.JSON_HOME:
return wsgi.render_response(
body=self._get_json_home_v3(),
headers=(('Content-Type', MimeTypes.JSON_HOME),))
return wsgi.render_response(body={
'version': versions['v3']
})
else:
raise exception.VersionNotFound(version='v3')
|
krayush07/deep-attention-text-classifier-tf | global_module/run_module/run_test.py | Python | bsd-3-clause | 1,283 | 0 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from global_module.settings_module import set_dict
import global_module.implementation_module.test as test_model
from global_module.utility_code import convert_pred_to_class as convert
#########################################################
# Utility function to load training vocab files
#########################################################
def load_dictionary():
"""
Utility function to load training vocab files
:return:
"""
return set_dict.Dictionary('TE')
def initialize_test_session():
dict_obj = test_util()
session, mtest = test_model.init_test()
return session, mtest, dict_obj
def call_test(session | , mtest, dict_obj):
test_model.run_test(session, mtest, d | ict_obj)
def test_util():
"""
Utility function to execute the testing pipeline
:return:
"""
dict_obj = load_dictionary()
return dict_obj
def main():
"""
Starting module for testing
:return:
"""
print('STARTING TESTING')
session, mtest, dict_obj = initialize_test_session()
call_test(session, mtest, dict_obj)
convert.convert(dict_obj.rel_dir.test_cost_path)
if __name__ == '__main__':
main()
|
dbarbier/ot-svn | python/doc/sphinxext/numpydoc/linkcode.py | Python | gpl-3.0 | 2,510 | 0.001992 | # -*- coding: utf-8 -*-
"""
linkcode
~~~~~~~~
Add external links to module code in Python object descriptions.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, absolute_import, print_function
import warnings
import collections
warnings.warn("This extension has been accepted to Sphinx upstream. "
"Use the version from there (Sphinx >= 1.2) "
"https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode",
FutureWarning, stacklevel=1)
from docutils import nodes
from sphinx import addnodes
from sphinx.locale import _
from sphinx.errors import SphinxError
class LinkcodeError(SphinxError):
category = "linkcode error"
def doctree_read(app, doctree):
env = app.builder.env
resolve_target = getattr(env.config, 'linkcode_resolve', None)
if not isinstance(env.config.linkcode_resolve, collections.Callable):
raise LinkcodeError(
"Function `linkcode_resolve` is not given in conf.py")
domain_keys = dict(
py=['module', 'fullname'],
c=['names'],
cpp=['names'],
js=['object', 'fullname'],
)
for objnode in doctree.traverse(addnodes.desc):
domain = objnode.get('domain')
uris = set()
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
# Convert signode to a specified format
info = {}
for key in domain_keys.get(domain, []):
| value = signode.get(key)
if not value:
value = ''
info[key] = value
if not info:
continue
# Call user cod | e to resolve the link
uri = resolve_target(domain, info)
if not uri:
# no source
continue
if uri in uris or not uri:
# only one link per name, please
continue
uris.add(uri)
onlynode = addnodes.only(expr='html')
onlynode += nodes.reference('', '', internal=False, refuri=uri)
onlynode[0] += nodes.inline('', _('[source]'),
classes=['viewcode-link'])
signode += onlynode
def setup(app):
app.connect('doctree-read', doctree_read)
app.add_config_value('linkcode_resolve', None, '')
|
CospanDesign/nysa-dionysus-platform | dionysus/misc.py | Python | gpl-2.0 | 8,094 | 0.001112 | # Copyright (c) 2008-2012, Neotion
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Neotion nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NEOTION BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Miscelleanous helpers
"""
import binascii
import re
# String values evaluated as true boolean values
TRUE_BOOLEANS = ['on', 'true', 'enable', 'enabled', 'yes', 'high', '1']
# String values evaluated as false boolean values
FALSE_BOOLEANS = ['off', 'false', 'disable', 'disabled', 'no', 'low', '0']
# ASCII or '.' filter
ASCIIFILTER = ''.join([((len(repr(chr(_x))) == 3) or (_x == 0x5c)) and chr(_x)
or '.' for _x in range(256)])
def hexdump(data, full=False, abbreviate=False):
"""Convert a binary buffer into a hexadecimal representation.
Return a multi-line strings with hexadecimal values and ASCII
representation of the buffer data.
:full: use `hexdump -Cv` format
:abbreviate: replace identical lines with '*'
"""
from array import array
if isinstance(data, array):
data = data.tostring()
src = ''.join(data)
length = 16
result = []
last = ''
abv = False
for i in xrange(0, len(src), length):
s = src[i:i+length]
if abbreviate:
if s == last:
if not abv:
result.append('*\n')
abv = True
continue
else:
abv = False
hexa = ' '.join(["%02x" % ord(x) for x in s])
printable = s.translate(ASCIIFILTER)
if full:
hx1, hx2 = hexa[:3*8], hexa[3*8:]
l = length/2
result.append("%08x %-*s %-*s |%s|\n" %
(i, l*3, hx1, l*3, hx2, printable))
else:
result.append("%06x %-*s %s\n" %
(i, length*3, hexa, printable))
last = s
return ''.join(result)
def hexline(data, sep=' '):
"""Convert a binary buffer into a hexadecimal representation
Return a string with hexadecimal values and ASCII representation
of the buffer data
"""
if isinstance(data, Array):
data = data.tostring()
src = ''.join(data)
hexa = sep.join(["%02x" % ord(x) for x in src])
printable = src.translate(ASCIIFILTER)
return "(%d) %s : %s" % (len(data), hexa, printable)
def to_int(value):
"""Parse a value and convert it into an integer value if possible.
Input value may be:
- a string with an integer coded as a decimal value
- a string with an integer coded as a hexadecimal value
- a integral value
- a integral value with a unit specifier (kilo or mega)
"""
if not value:
return 0
if isinstance(value, int):
return value
if isinstance(value, long):
return int(value)
mo = re.match('^\s*(\d+)\s*(?:([KMkm]i?)?B?)?\s*$', value)
if mo:
mult = {'K': (1000),
'KI': (1 << 10),
'M': (1000 * 1000),
'MI': (1 << 20)}
value = int(mo.group(1))
if mo.group(2):
value *= mult[mo.group(2).upper()]
return value
return int(value.strip(), value.startswith('0x') and 16 or 10)
def to_bool(value, permissive=True, allow_int=False):
"""Parse a string and convert it into a boolean value if possible.
:param value: the value to parse and convert
:param permissive: default to the False value if parsing fails
:param allow_int: allow an integral type as the input value
Input value may be:
- a string with an integer value, if `allow_int` is enabled
- a boolean value
- a string with a common boolean definition
"""
if value is None:
return False
if isinstance(value, bool):
return value
if isinstance(value, int):
if allow_int:
return bool(value)
else:
if permissive:
return False
raise ValueError("Invalid boolean value: '%d'", value)
if value.lower() in TRUE_BOOLEANS:
return True
if permissive or (value.lower() in FALSE_BOOLEANS):
return False
raise ValueError('"Invalid boolean value: "%s"' % value)
def _crccomp16():
"""Internal function used by crc16()"""
try:
from crcmod import mkCrcFun
except ImportError:
raise AssertionError("Python crcmod module not installed")
crc_polynomial = 0x11021
crc_initial = 0xFFFF
crc = mkCrcFun(crc_polynomial, crc_initial, False)
while True:
yield crc
def _crccomp32():
"""Internal function used by crc32()"""
try:
from crcmod import mkCrcFun
except ImportError:
raise AssertionError("Python crcmod module not installed")
crc_polynomial = 0x104C11DB7
crc_initial = 0xFFFFFFFFL
crc = mkCrcFun(crc_polynomial, crc_initial, False)
while True:
yield crc
def crc16(data):
"""Compute the CCITT CRC-16 checksum"""
crc = next(_crccomp16())
return crc(data)
def xor(_a_, _b_):
"""XOR logical operation.
:param _a_: first argument
:param _b_: second argument
"""
return bool((not(_a_) and _b_) or (_a_ and not(_b_)))
def crc32(data):
"""Compute the MPEG2 CRC-32 checksum"""
crc = next(_crccomp32())
return crc(data)
def is_iterable(obj):
"""Tells whether an instance is iterable or not"""
try:
iter(obj)
return True
except TypeError:
return False
def pretty_size(size, sep=' ', lim_k=1 << 10, lim_m=10 << 20, plural=True,
floor=True):
"""Convert a size into a more readable unit-indexed size (KiB, MiB)
:param size: integral value to convert
:param sep: the separator character between the integral value and
the unit specifier
:param lim_k: any value above this limit is a candidate for KiB
conversion.
:param lim_m: any value above this limit is a candidate for MiB
conversion.
:param plural: whether to append a final 's' to byte(s)
:param floor: how to behave when exact conversion cannot be a | chieved:
take the closest, smaller value or fallback to the next
unit that allows the exact representation of the input
value
"""
size = int(size)
if size > lim_m:
ssize = size >> 20
if floor or (ssize << 20) == size:
return '%d%sMiB' % (ssize, sep)
| if size > lim_k:
ssize = size >> 10
if floor or (ssize << 10) == size:
return '%d%sKiB' % (ssize, sep)
return '%d%sbyte%s' % (size, sep, (plural and 's' or ''))
|
pkimber/enquiry | enquiry/migrations/0001_initial.py | Python | apache-2.0 | 1,028 | 0.000973 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Enquiry',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_k | ey=True, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_le | ngth=100)),
('description', models.TextField()),
('email', models.EmailField(blank=True, max_length=75)),
('phone', models.CharField(blank=True, max_length=100)),
],
options={
'ordering': ['-created'],
'verbose_name_plural': 'Enquiries',
'verbose_name': 'Enquiry',
},
bases=(models.Model,),
),
]
|
rafaelwerneck/kuaa | classifiers/libSVM_3_17/grid.py | Python | gpl-3.0 | 15,616 | 0.045402 | #!/usr/bin/env python
__all__ = ['find_parameters']
import os, sys, traceback, getpass, time, re
from threading import Thread
from subprocess import *
import multiprocessing
#Werneck
import platform
#-------
if sys.version_info[0] < 3:
from Queue import Queue
else:
from queue import Queue
telnet_workers = []
ssh_workers = []
#Werneck
#nr_local_worker = 1
nr_local_worker = int(multiprocessing.cpu_count())
#-------
class GridOption:
def __init__(self, dataset_pathname, options):
dirname = os.path.dirname(__file__)
if sys.platform != 'win32':
self.svmtrain_pathname = os.path.join(dirname, 'svm-train')
#Werneck
if platform.architecture()[0] == '32bit':
self.svmtrain_pathname += '_32l'
#-------
self.gnuplot_pathname = '/usr/bin/gnuplot'
else:
# example for windows
self.svmtrain_pathname = os.path.join(dirname, r'..\windows\svm-train.exe')
# svmtrain_pathname = r'c:\Program Files\libsvm\windows\svm-train.exe'
self.gnuplot_pathname = r'c:\tmp\gnuplot\binary\pgnuplot.exe'
self.fold = 5
self.c_begin, self.c_end, self.c_step = -5, 15, 2
self.g_begin, self.g_end, self.g_step = 3, -15, -2
self.grid_with_c, self.grid_with_g = True, True
self.dataset_pathname = dataset_pathname
self.dataset_title = os.path.split(dataset_pathname)[1]
self.out_pathname = '{0}.out'.format(self.dataset_title)
self.png_pathname = '{0}.png'.format(self.dataset_title)
self.pass_through_string = ' '
self.resume_pathname = None
self.parse_options(options)
def parse_options(self, options):
if type(options) == str:
options = options.split()
i = 0
pass_through_options = []
while i < len(options):
if options[i] == '-log2c':
i = i + 1
if options[i] == 'null':
self.grid_with_c = False
else:
self.c_begin, self.c_end, self.c_step = map(float,options[i].split(','))
elif options[i] == '-log2g':
i = i + 1
if options[i] == 'null':
self.grid_with_g = False
else:
self.g_begin, self.g_end, self.g_step = map(float,options[i].split(','))
elif options[i] == '-v':
i = i + 1
self.fold = options[i]
elif options[i] in ('-c','-g'):
raise ValueError('Use -log2c and -log2g.')
elif options[i] == '-svmtrain':
i = i + 1
self.svmtrain_pathname = options[i]
elif options[i] == '-gnuplot':
i = i + 1
if options[i] == 'null':
self.gnuplot_pathname = None
else:
self.gnuplot_pathname = options[i]
elif options[i] == '-out':
i = i + 1
if options[i] == 'null':
self.out_pathname = None
else:
self.out_pathname = options[i]
elif options[i] == '-png':
i = i + 1
self.png_pathname = options[i]
elif options[i] == '-resume':
if i == (len(options)-1) or options[i+1].startswith('-'):
self.resume_pathname = self.dataset_title + '.out'
else:
i = i + 1
self.resume_pathname = options[i]
else:
pass_through_options.append(options[i])
i = i + 1
self.pass_through_string = ' '.join(pass_through_options)
if not os.path.exists(self.svmtrain_pathname):
raise IOError('svm-train executable not found')
if not os.path.exists(self.dataset_pathname):
raise IOError('dataset not found')
if self.resume_pathname and not os.path.exists(self.resume_pathname):
raise IOError('file for resumption not found')
if not self.grid_with_c and not self.grid_with_g:
raise ValueError('-log2c and -log2g should not be null simultaneously')
if self.gnuplot_pathname and not os.path.exists(self.gnuplot_pathname):
sys.stderr.write('gnuplot executable not found\n')
self.gnuplot_pathname = None
def redraw(db,best_param,gnuplot,options,tofile=False):
if len(db) == 0: return
begin_level = round(max(x[2] for x in db)) - 3
step_size = 0.5
best_log2c,best_log2g,best_rate = best_param
# if newly obtained c, g, or cv values are the same,
# then stop redrawing the contour.
if all(x[0] == db[0][0] for x in db): return
if all(x[1] == db[0][1] for x in db): return
if all(x[2] == db[0][2] for x in db): return
if tofile:
gnuplot.write(b"set term png transparent small linewidth 2 medium enhanced\n")
gnuplot.write("set output \"{0}\"\n".format(options.png_pathname.replace('\\','\\\\')).encode())
#gnuplot.write(b"set term postscript color solid\n")
#gnuplot.write("set output \"{0}.ps\"\n".format(options.dataset_title).encode().encode())
elif sys.platform == 'win32':
gnuplot.write(b"set term windows\n")
else:
gnuplot.write( b"set term x11\n")
gnuplot.write(b"set xlabel \"log2(C)\"\n")
gnuplot.write(b"set ylabel \"log2(gamma)\"\n")
gnuplot.write("set xrange [{0}:{1}]\n".format(options.c_begin,options.c_end).encode())
gnuplot.write("set yrange [{0}:{1}]\n".format(options.g_begin,options.g_end).encode())
gnuplot.write(b"set contour\n")
gnuplot.write("set cntrparam levels incremental {0},{1},100\n".format(begin_level,step_size).encode())
gnuplot.write(b"unset surfa | ce\n")
gnuplot.write(b"unset ztics\n")
gnuplot.write(b"set view 0,0\n")
gnuplot.write("set title \"{0}\"\n" | .format(options.dataset_title).encode())
gnuplot.write(b"unset label\n")
gnuplot.write("set label \"Best log2(C) = {0} log2(gamma) = {1} accuracy = {2}%\" \
at screen 0.5,0.85 center\n". \
format(best_log2c, best_log2g, best_rate).encode())
gnuplot.write("set label \"C = {0} gamma = {1}\""
" at screen 0.5,0.8 center\n".format(2**best_log2c, 2**best_log2g).encode())
gnuplot.write(b"set key at screen 0.9,0.9\n")
gnuplot.write(b"splot \"-\" with lines\n")
db.sort(key = lambda x:(x[0], -x[1]))
prevc = db[0][0]
for line in db:
if prevc != line[0]:
gnuplot.write(b"\n")
prevc = line[0]
gnuplot.write("{0[0]} {0[1]} {0[2]}\n".format(line).encode())
gnuplot.write(b"e\n")
gnuplot.write(b"\n") # force gnuplot back to prompt when term set failure
gnuplot.flush()
def calculate_jobs(options):
def range_f(begin,end,step):
# like range, but works on non-integer too
seq = []
while True:
if step > 0 and begin > end: break
if step < 0 and begin < end: break
seq.append(begin)
begin = begin + step
return seq
def permute_sequence(seq):
n = len(seq)
if n <= 1: return seq
mid = int(n/2)
left = permute_sequence(seq[:mid])
right = permute_sequence(seq[mid+1:])
ret = [seq[mid]]
while left or right:
if left: ret.append(left.pop(0))
if right: ret.append(right.pop(0))
return ret
c_seq = permute_sequence(range_f(options.c_begin,options.c_end,options.c_step))
g_seq = permute_sequence(range_f(options.g_begin,options.g_end,options.g_step))
if not options.grid_with_c:
c_seq = [None]
if not options.grid_with_g:
g_seq = [None]
nr_c = float(len(c_seq))
nr_g = float(len(g_seq))
i, j = 0, 0
jobs = []
while i < nr_c or j < nr_g:
if i/nr_c < j/nr_g:
# increase C resolution
line = []
for k in range(0,j):
line.append((c_seq[i],g_seq[k]))
i = i + 1
jobs.append(line)
else:
# increase g resolution
line = []
for k in range(0,i):
line.append((c_seq[k],g_seq[j]))
j = j + 1
jobs.append(line)
resumed_jobs = {}
if options.resume_pathname is None:
return jobs, resumed_jobs
for line in open(options.resume_pathname, 'r'):
line = line.strip()
rst = re.findall(r'rate=([0-9.]+)',line)
if not rst:
continue
rate = float(rst[0])
c, g = None, None
rst = re.findall(r'log2c=([0-9.-]+)',line)
if rst:
c = float(rst[0])
rst = re.findall(r'log2g=([0-9.-]+)',line)
if rst:
g = float(rst[0])
resumed_jobs[(c,g)] = rate
return jobs, resumed_jobs
class WorkerStopToken: # used to notify the worker to stop or if a worker is dead
pass
class Worker(Thread):
def __init__(self,name,job_queue,result_queue,options):
Thread.__init__(self)
self.name = name
self.job_queue = job_queue
self.result_queue = result_queue
self.options = options
def run(self):
while True:
(cexp,gexp) = self.job_queue.get()
if cexp is WorkerStopToken:
self.job_queue.put((cexp,gexp))
# print('worker {0} stop.'.format(self.name))
break
try:
c, g = None, None
if cexp != None:
c = 2.0**cexp
if gexp != None:
g = 2.0**gexp
|
lafranceinsoumise/api-django | agir/mailing/migrations/0007_segment_events_organizer.py | Python | agpl-3.0 | 552 | 0.001818 | # Generated by Django 2.2.5 on 2019-10-02 16:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("maili | ng", "0006_auto_20191002_1752")]
operations = [
migrations.AddField(
model_name="segment",
name="events_organizer",
field=models.BooleanField(
blank=True,
default=False,
verbose_name="Limiter aux organisateurices (sans effet si pas d'autres filtres événements)",
),
)
| ]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.